Merge "Add tests and fixes for BidirectionalSequenceLSTM op's aux_input." into qt-dev
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index 64cea58..3338493 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -339,7 +339,10 @@
uint8_t requantize(uint8_t value, const Shape& oldShape, const Shape& newShape) {
double doubleValue = (value - oldShape.offset) * oldShape.scale;
- return static_cast<uint8_t>(doubleValue / newShape.scale + newShape.offset);
+ double doubleRet = doubleValue / newShape.scale + newShape.offset;
+ if (doubleRet < 0) return 0;
+ if (doubleRet > 255) return 255;
+ return static_cast<uint8_t>(doubleRet);
}
bool floorPrepare(const Shape& input, Shape* output) {
diff --git a/nn/common/operations/Cast.cpp b/nn/common/operations/Cast.cpp
index 5ca92c8..f569767 100644
--- a/nn/common/operations/Cast.cpp
+++ b/nn/common/operations/Cast.cpp
@@ -27,7 +27,13 @@
template <typename FromT, typename ToT>
void copyCast(const FromT* in, ToT* out, int numElements) {
- std::transform(in, in + numElements, out, [](FromT a) { return static_cast<ToT>(a); });
+ std::transform(in, in + numElements, out, [](FromT a) -> ToT {
+ if constexpr (std::is_same_v<ToT, uint8_t>) {
+ if (a < 0) return 0;
+ if (a > 255) return 255;
+ }
+ return static_cast<ToT>(a);
+ });
}
template <typename FromT>
diff --git a/nn/common/operations/Comparisons.cpp b/nn/common/operations/Comparisons.cpp
index 1820dde..aca5882 100644
--- a/nn/common/operations/Comparisons.cpp
+++ b/nn/common/operations/Comparisons.cpp
@@ -120,7 +120,9 @@
} // namespace
-bool validate(const IOperationValidationContext* context) {
+// EQUAL and NOT_EQUAL ops support TENSOR_BOOL8 in addition to all the other
+// types supported by comparison ops
+bool validateEqualAndNotEqual(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
OperandType inputType = context->getInputType(kInputTensor1);
@@ -134,6 +136,19 @@
return validateHalVersion(context, HalVersion::V1_2);
}
+bool validateComparisons(const IOperationValidationContext* context) {
+ NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+ NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+ OperandType inputType = context->getInputType(kInputTensor1);
+ NN_RET_CHECK(
+ inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32 ||
+ inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM)
+ << "Unsupported input operand type for comparison op: " << toString(inputType);
+ NN_RET_CHECK(validateInputTypes(context, {inputType, inputType}));
+ NN_RET_CHECK(validateOutputTypes(context, {OperandType::TENSOR_BOOL8}));
+ return validateHalVersion(context, HalVersion::V1_2);
+}
+
bool prepare(IOperationExecutionContext* context) {
Shape input1 = context->getInputShape(kInputTensor1);
Shape input2 = context->getInputShape(kInputTensor2);
@@ -152,8 +167,6 @@
return executeLessTyped<int32_t, int32_t>(context);
case OperandType::TENSOR_QUANT8_ASYMM:
return executeLessTyped<uint8_t, float>(context);
- case OperandType::TENSOR_BOOL8:
- return executeLessTyped<bool8, bool8>(context);
default:
NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
}
@@ -169,8 +182,6 @@
return executeLessEqualTyped<int32_t, int32_t>(context);
case OperandType::TENSOR_QUANT8_ASYMM:
return executeLessEqualTyped<uint8_t, float>(context);
- case OperandType::TENSOR_BOOL8:
- return executeLessEqualTyped<bool8, bool8>(context);
default:
NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
}
@@ -220,8 +231,6 @@
return executeGreaterEqualTyped<int32_t, int32_t>(context);
case OperandType::TENSOR_QUANT8_ASYMM:
return executeGreaterEqualTyped<uint8_t, float>(context);
- case OperandType::TENSOR_BOOL8:
- return executeGreaterEqualTyped<bool8, bool8>(context);
default:
NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
}
@@ -237,8 +246,6 @@
return executeGreaterTyped<int32_t, int32_t>(context);
case OperandType::TENSOR_QUANT8_ASYMM:
return executeGreaterTyped<uint8_t, float>(context);
- case OperandType::TENSOR_BOOL8:
- return executeGreaterTyped<bool8, bool8>(context);
default:
NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
}
@@ -246,17 +253,17 @@
} // namespace comparisons
-NN_REGISTER_OPERATION(LESS, "LESS", comparisons::validate, comparisons::prepare,
+NN_REGISTER_OPERATION(LESS, "LESS", comparisons::validateComparisons, comparisons::prepare,
comparisons::executeLess);
-NN_REGISTER_OPERATION(LESS_EQUAL, "LESS_EQUAL", comparisons::validate, comparisons::prepare,
- comparisons::executeLessEqual);
-NN_REGISTER_OPERATION(EQUAL, "EQUAL", comparisons::validate, comparisons::prepare,
+NN_REGISTER_OPERATION(LESS_EQUAL, "LESS_EQUAL", comparisons::validateComparisons,
+ comparisons::prepare, comparisons::executeLessEqual);
+NN_REGISTER_OPERATION(EQUAL, "EQUAL", comparisons::validateEqualAndNotEqual, comparisons::prepare,
comparisons::executeEqual);
-NN_REGISTER_OPERATION(NOT_EQUAL, "NOT_EQUAL", comparisons::validate, comparisons::prepare,
- comparisons::executeNotEqual);
-NN_REGISTER_OPERATION(GREATER_EQUAL, "GREATER_EQUAL", comparisons::validate, comparisons::prepare,
- comparisons::executeGreaterEqual);
-NN_REGISTER_OPERATION(GREATER, "GREATER", comparisons::validate, comparisons::prepare,
+NN_REGISTER_OPERATION(NOT_EQUAL, "NOT_EQUAL", comparisons::validateEqualAndNotEqual,
+ comparisons::prepare, comparisons::executeNotEqual);
+NN_REGISTER_OPERATION(GREATER_EQUAL, "GREATER_EQUAL", comparisons::validateComparisons,
+ comparisons::prepare, comparisons::executeGreaterEqual);
+NN_REGISTER_OPERATION(GREATER, "GREATER", comparisons::validateComparisons, comparisons::prepare,
comparisons::executeGreater);
} // namespace nn
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 8a57d03..9fdc648 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -3109,7 +3109,6 @@
* For input tensors x and y, computes x > y elementwise.
*
* Supported tensor {@link OperandCode}:
- * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_INT32}
@@ -3134,7 +3133,6 @@
* For input tensors x and y, computes x >= y elementwise.
*
* Supported tensor {@link OperandCode}:
- * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_INT32}
@@ -3394,7 +3392,6 @@
* For input tensors x and y, computes x < y elementwise.
*
* Supported tensor {@link OperandCode}:
- * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_INT32}
@@ -3420,7 +3417,6 @@
* For input tensors x and y, computes x <= y elementwise.
*
* Supported tensor {@link OperandCode}:
- * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_INT32}
diff --git a/nn/runtime/test/TestValidateOperations.cpp b/nn/runtime/test/TestValidateOperations.cpp
index 4452eb9..1a610a1 100644
--- a/nn/runtime/test/TestValidateOperations.cpp
+++ b/nn/runtime/test/TestValidateOperations.cpp
@@ -2704,7 +2704,6 @@
}
TEST(OperationValidationTest, LESS) {
- comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_BOOL8);
comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_FLOAT16);
comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_FLOAT32);
comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_INT32);
@@ -2712,7 +2711,6 @@
}
TEST(OperationValidationTest, LESS_EQUAL) {
- comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
@@ -2736,7 +2734,6 @@
}
TEST(OperationValidationTest, GREATER) {
- comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_BOOL8);
comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_FLOAT16);
comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_FLOAT32);
comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_INT32);
@@ -2744,7 +2741,6 @@
}
TEST(OperationValidationTest, GREATER_EQUAL) {
- comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
index c1f7769..84d52f6 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -247,17 +247,126 @@
model->identifyInputsAndOutputs(modelInputs, modelOutputs);
}
-void RandomGraph::createRequest(test_wrapper::Execution* execution) {
+void RandomGraph::createRequest(test_wrapper::Execution* execution,
+ std::vector<OperandBuffer>* buffers) {
NN_FUZZER_LOG << "Create Request";
- for (auto& operand : mOperands) {
+ if (buffers != nullptr) buffers->clear();
+ for (const auto& operand : mOperands) {
if (operand->type == RandomOperandType::INPUT) {
EXPECT_EQ(execution->setInput(operand->ioIndex, operand->buffer.data(),
operand->getBufferSize(), nullptr),
Result::NO_ERROR);
} else if (operand->type == RandomOperandType::OUTPUT) {
- EXPECT_EQ(execution->setOutput(operand->ioIndex, operand->buffer.data(),
- operand->getBufferSize(), nullptr),
- Result::NO_ERROR);
+ if (buffers == nullptr) {
+ EXPECT_EQ(execution->setOutput(operand->ioIndex, operand->buffer.data(),
+ operand->getBufferSize(), nullptr),
+ Result::NO_ERROR);
+ } else {
+ // The order of the output buffers corresponds to the order in mOperands.
+ buffers->emplace_back(operand->buffer.size());
+ EXPECT_EQ(execution->setOutput(operand->ioIndex, buffers->back().data(),
+ operand->getBufferSize(), nullptr),
+ Result::NO_ERROR);
+ }
+ }
+ }
+}
+
+// Check if the actual results meet the accuracy criterion.
+constexpr uint32_t kMaxNumberOfPrintedErrors = 5;
+template <typename T>
+void expectNear(const RandomOperand& op, const OperandBuffer& test,
+ const AccuracyCriterion& criterion) {
+ const T* actualBuffer = reinterpret_cast<const T*>(test.data());
+ const T* expectedBuffer = reinterpret_cast<const T*>(op.buffer.data());
+ uint32_t len = op.getNumberOfElements();
+ uint32_t numSkip = 0, numErrors = 0;
+ double bias = 0.0f, mse = 0.0f;
+ for (uint32_t i = 0; i < len; i++) {
+ SCOPED_TRACE(testing::Message() << "When comparing element " << i);
+
+ // Compare all data types in double for precision and signed arithmetic.
+ double actual = static_cast<double>(actualBuffer[i]);
+ double expected = static_cast<double>(expectedBuffer[i]);
+ double tolerableRange = criterion.atol + criterion.rtol * std::fabs(expected);
+
+ // Skip invalid floating point values.
+ if (std::isnan(expected) || std::isinf(expected) || std::isnan(actual) ||
+ std::isinf(actual) || std::fabs(expected) > 1e3) {
+ numSkip++;
+ continue;
+ }
+
+ // Accumulate bias and MSE.
+ double diff = actual - expected;
+ bias += diff;
+ mse += diff * diff;
+
+ // Print at most kMaxNumberOfPrintedErrors errors by EXPECT_NEAR.
+ if (numErrors < kMaxNumberOfPrintedErrors) EXPECT_NEAR(expected, actual, tolerableRange);
+ if (!(std::fabs(diff) <= tolerableRange)) numErrors++;
+ }
+ EXPECT_EQ(numErrors, 0u);
+
+ // Test bias and MSE.
+ if (len == numSkip) return;
+ bias /= static_cast<double>(len - numSkip);
+ mse /= static_cast<double>(len - numSkip);
+ EXPECT_LE(bias, criterion.bias);
+ EXPECT_LE(mse, criterion.mse);
+}
+
+void expectBooleanEqual(const RandomOperand& op, const OperandBuffer& test) {
+ const bool8* actual = reinterpret_cast<const bool8*>(test.data());
+ const bool8* expected = reinterpret_cast<const bool8*>(op.buffer.data());
+ uint32_t len = op.getNumberOfElements();
+ uint32_t numErrors = 0;
+ for (uint32_t i = 0; i < len; i++) {
+ SCOPED_TRACE(testing::Message() << "When comparing element " << i);
+ if (numErrors < kMaxNumberOfPrintedErrors) EXPECT_EQ(expected[i], actual[i]);
+ if (expected[i] != actual[i]) numErrors++;
+ }
+ EXPECT_EQ(numErrors, 0u);
+}
+
+void RandomGraph::checkResults(const std::vector<OperandBuffer>& buffers,
+ const AccuracyCriteria& criteria) const {
+ NN_FUZZER_LOG << "Check Results";
+ // Make sure to keep the same order as the buffers are created.
+ int i = 0;
+ for (const auto& op : mOperands) {
+ if (op->type == RandomOperandType::OUTPUT) {
+ SCOPED_TRACE(testing::Message() << "When comparing output " << op->ioIndex
+ << " of type " << toString(op->dataType));
+ switch (op->dataType) {
+ case Type::TENSOR_FLOAT32:
+ expectNear<float>(*op, buffers[i], criteria.float32);
+ break;
+ case Type::TENSOR_FLOAT16:
+ expectNear<_Float16>(*op, buffers[i], criteria.float16);
+ break;
+ case Type::TENSOR_INT32:
+ expectNear<int32_t>(*op, buffers[i], criteria.int32);
+ break;
+ case Type::TENSOR_QUANT8_ASYMM:
+ expectNear<uint8_t>(*op, buffers[i], criteria.quant8Asymm);
+ break;
+ case Type::TENSOR_QUANT8_SYMM:
+ expectNear<int8_t>(*op, buffers[i], criteria.quant8Symm);
+ break;
+ case Type::TENSOR_QUANT16_ASYMM:
+ expectNear<uint16_t>(*op, buffers[i], criteria.quant16Asymm);
+ break;
+ case Type::TENSOR_QUANT16_SYMM:
+ expectNear<int16_t>(*op, buffers[i], criteria.quant16Symm);
+ break;
+ case Type::TENSOR_BOOL8:
+ expectBooleanEqual(*op, buffers[i]);
+ break;
+ default:
+ NN_FUZZER_CHECK(false) << "Data type not supported.";
+ }
+ i++;
}
}
}
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.h b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
index dc0f286..5599f08 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
@@ -100,6 +100,35 @@
RandomOperation(const OperationSignature& operation);
};
+// TODO: Consider relative bias and mse on floating point data types?
+struct AccuracyCriterion {
+ // We expect the driver results to be unbiased.
+ // Formula: abs(sum_{i}(actual - expected)) <= bias
+ float bias = std::numeric_limits<float>::max();
+
+ // Set the threshold on Mean Square Error (MSE).
+ // Formula: sum_{i}((actual - expected) ^ 2) / sum(1) <= mse
+ float mse = std::numeric_limits<float>::max();
+
+ // We also set accuracy thresholds on each element to detect any particular edge cases that may
+ // be shadowed in bias or MSE. We use the similar approach as our CTS unit tests, but with much
+ // relaxed criterion.
+ // Formula: abs(actual - expected) <= atol + rtol * abs(expected)
+ // where atol stands for Absolute TOLerance and rtol for Relative TOLerance.
+ float atol = 0.0f;
+ float rtol = 0.0f;
+};
+
+struct AccuracyCriteria {
+ AccuracyCriterion float32;
+ AccuracyCriterion float16;
+ AccuracyCriterion int32;
+ AccuracyCriterion quant8Asymm;
+ AccuracyCriterion quant8Symm;
+ AccuracyCriterion quant16Asymm;
+ AccuracyCriterion quant16Symm;
+};
+
// The main interface of the random graph generator.
class RandomGraph {
public:
@@ -111,8 +140,15 @@
// Create a NDK model from the random graph.
void createModel(test_wrapper::Model* model);
- // Set the input/output buffers.
- void createRequest(test_wrapper::Execution* execution);
+ // Set the input/output buffers to an NDK execution object. The input buffer resides in
+ // RandomOperand.buffer, the output buffer is either provided by "buffers" argument, or set
+ // buffers to nullptr to use RandomOperand.buffer to record reference result.
+ void createRequest(test_wrapper::Execution* execution,
+ std::vector<OperandBuffer>* buffers = nullptr);
+
+ // Check if the results in buffers meet the given accuracy criteria.
+ void checkResults(const std::vector<OperandBuffer>& buffers,
+ const AccuracyCriteria& criteria) const;
// Dump the generated random graph to a spec file for debugging and visualization purpose.
void dumpSpecFile(std::string filename, std::string testname);
diff --git a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
index 50456de..7799806 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -413,7 +413,13 @@
return dis(RandomNumberGenerator::generator);
}
+// std::is_floating_point_v<_Float16> evaluates to true in CTS build target but false in
+// NeuralNetworksTest_static, so we define getUniform<_Float16> explicitly here if not CTS.
+#ifdef NNTEST_CTS
+#define NN_IS_FLOAT(T) std::is_floating_point_v<T>
+#else
#define NN_IS_FLOAT(T) std::is_floating_point_v<T> || std::is_same_v<T, _Float16>
+#endif
// getUniform for floating point values operates on a open interval (lower, upper).
// This is important for generating a scale that is greater than but not equal to a lower bound.
diff --git a/nn/runtime/test/fuzzing/RandomVariable.cpp b/nn/runtime/test/fuzzing/RandomVariable.cpp
index b941559..102342a 100644
--- a/nn/runtime/test/fuzzing/RandomVariable.cpp
+++ b/nn/runtime/test/fuzzing/RandomVariable.cpp
@@ -1044,7 +1044,7 @@
// Initialize EvalInfo of each RandomVariable.
for (auto& var : evalOrder) {
if (context->find(var) == context->end()) context->emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var);
+ NN_FUZZER_LOG << " - " << toString(var, context);
}
// Enforce the product of the dimension values below kMaxValue:
@@ -1092,7 +1092,7 @@
// Initialize EvalInfo of each RandomVariable.
for (auto& var : evalOrder) {
if (context.find(var) == context.end()) context.emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var);
+ NN_FUZZER_LOG << " - " << toString(var, &context);
}
// Dispatch to different algorithm according to search range.
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 0896be7..67ccbec 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -14,7 +14,10 @@
* limitations under the License.
*/
+#ifndef NNTEST_CTS
#include <android-base/properties.h>
+#endif
+
#include <gtest/gtest.h>
#include <algorithm>
@@ -30,6 +33,21 @@
namespace fuzzing_test {
using test_wrapper::Result;
+constexpr char kRefDeviceName[] = "nnapi-reference";
+
+// Manages compilation on one single device.
+class CompilationForDevice : public test_wrapper::Compilation {
+ public:
+ CompilationForDevice() = default;
+ CompilationForDevice(const CompilationForDevice&) = delete;
+ CompilationForDevice& operator=(const CompilationForDevice&) = delete;
+
+ bool initialize(const test_wrapper::Model* model, const ANeuralNetworksDevice* device) {
+ int ret = ANeuralNetworksCompilation_createForDevices(model->getHandle(), &device, 1,
+ &mCompilation);
+ return ret == ANEURALNETWORKS_NO_ERROR;
+ }
+};
// NN API fuzzer logging setting comes from system property debug.nn.fuzzer.log and
// debug.nn.fuzzer.dumpspec.
@@ -42,15 +60,30 @@
// e.g. for test case TestRandomGraph/RandomGraphTest/Large/0,
// log : /data/local/tmp/TestRandomGraph_RandomGraphTest_Large_0.log
// spec: /data/local/tmp/TestRandomGraph_RandomGraphTest_Large_0.mod.py
+//
class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
public:
static void SetUpTestCase() {
+#ifndef NNTEST_CTS
mEnableLog = ::android::base::GetProperty("debug.nn.fuzzer.log", "") == "1";
mDumpSpec = ::android::base::GetProperty("debug.nn.fuzzer.dumpspec", "") == "1";
+#endif
+
+ // Get all the devices and device names.
+ uint32_t numDevices = 0;
+ ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+ for (uint32_t i = 0; i < numDevices; i++) {
+ ANeuralNetworksDevice* device = nullptr;
+ const char* name = nullptr;
+ ASSERT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR);
+ mDevices.emplace(name, device);
+ }
}
protected:
virtual void SetUp() override {
+ // Initialize logging.
const ::testing::TestInfo* const testInfo =
::testing::UnitTest::GetInstance()->current_test_info();
mTestName = mTestName + testInfo->test_case_name() + "_" + testInfo->name();
@@ -58,34 +91,88 @@
if (mEnableLog) NN_FUZZER_LOG_INIT("/data/local/tmp/" + mTestName + ".log");
}
- virtual void TearDown() override { NN_FUZZER_LOG_CLOSE; }
+ virtual void TearDown() override {
+ if (::testing::Test::HasFailure() || mDumpSpec) {
+ mGraph.dumpSpecFile("/data/local/tmp/" + mTestName + ".mod.py", mTestName);
+ }
+ NN_FUZZER_LOG_CLOSE;
+ }
+
+ // Compile and execute the generated graph on a device selected by name.
+ void compute(const test_wrapper::Model* model, uint32_t numOps, const std::string& name) {
+ SCOPED_TRACE("Device: " + name);
+ ASSERT_TRUE(mDevices.find(name) != mDevices.end());
+ const auto device = mDevices[name];
+ bool isRef = name.compare(kRefDeviceName) == 0;
+
+ // Check if the device fully supports the graph.
+ constexpr int kMaxNumberOperations = 1000;
+ ASSERT_TRUE(numOps <= kMaxNumberOperations);
+ bool supported[kMaxNumberOperations] = {false};
+ ASSERT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(model->getHandle(), &device,
+ 1, supported),
+ ANEURALNETWORKS_NO_ERROR);
+ if (!std::all_of(supported, supported + numOps, [](bool v) { return v; })) {
+ // The reference device should always support all operations.
+ ASSERT_FALSE(isRef);
+ std::cout << "[ ] SKIP: " << name << " does not support the graph.\n";
+ return;
+ }
+
+ // Since this test is introduced in Android Q, we only assert no compilation or execution
+ // failure if the device has feature level >= Q (API level 29). For pre-Q devices, we allow
+ // them to fail with OP_FAILED, but must not hang or crash.
+ int64_t featureLevel;
+ ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
+ ANEURALNETWORKS_NO_ERROR);
+
+ // Create compilation for device.
+ CompilationForDevice compilation;
+ ASSERT_TRUE(compilation.initialize(model, device));
+ Result compileReturn = compilation.finish();
+ if (featureLevel >= __ANDROID_API_Q__) {
+ ASSERT_EQ(compileReturn, Result::NO_ERROR);
+ } else {
+ ASSERT_TRUE(compileReturn == Result::NO_ERROR || compileReturn == Result::OP_FAILED);
+ }
+
+ // Create request.
+ test_wrapper::Execution execution(&compilation);
+ std::vector<OperandBuffer> outputs;
+ if (isRef) {
+ mGraph.createRequest(&execution);
+ } else {
+ mGraph.createRequest(&execution, &outputs);
+ }
+
+ // Compute result.
+ Result executeReturn = execution.compute();
+ if (featureLevel >= __ANDROID_API_Q__) {
+ ASSERT_EQ(executeReturn, Result::NO_ERROR);
+ if (!isRef) mGraph.checkResults(outputs, mCriteria);
+ } else {
+ ASSERT_TRUE(executeReturn == Result::NO_ERROR || executeReturn == Result::OP_FAILED);
+ }
+ }
// Main test entrance.
void testRandomGraph(uint32_t numOperations, uint32_t dimensionRange) {
// Generate a random graph.
- RandomGraph graph;
- EXPECT_TRUE(graph.generate(kSeed, numOperations, dimensionRange));
+ ASSERT_TRUE(mGraph.generate(kSeed, numOperations, dimensionRange));
// Create a model from the random graph.
test_wrapper::Model model;
- graph.createModel(&model);
- EXPECT_TRUE(model.isValid());
- EXPECT_EQ(model.finish(), Result::NO_ERROR);
-
- // Compile the model.
- test_wrapper::Compilation compilation(&model);
- EXPECT_EQ(compilation.finish(), Result::NO_ERROR);
-
- // Create request.
- test_wrapper::Execution execution(&compilation);
- graph.createRequest(&execution);
+ mGraph.createModel(&model);
+ ASSERT_TRUE(model.isValid());
+ ASSERT_EQ(model.finish(), Result::NO_ERROR);
// Compute reference result.
- EXPECT_EQ(execution.compute(), Result::NO_ERROR);
+ compute(&model, numOperations, kRefDeviceName);
- // Dump spec file.
- if (mDumpSpec) {
- graph.dumpSpecFile("/data/local/tmp/" + mTestName + ".mod.py", mTestName);
+ for (auto& pair : mDevices) {
+ // Skip the nnapi reference device.
+ if (pair.first.compare(kRefDeviceName) == 0) continue;
+ compute(&model, numOperations, pair.first);
}
}
@@ -94,24 +181,69 @@
static bool mEnableLog;
static bool mDumpSpec;
+ static std::map<std::string, ANeuralNetworksDevice*> mDevices;
const uint32_t kSeed = GetParam();
std::string mTestName;
+ RandomGraph mGraph;
+ AccuracyCriteria mCriteria;
};
bool RandomGraphTest::mEnableLog = false;
bool RandomGraphTest::mDumpSpec = false;
+std::map<std::string, ANeuralNetworksDevice*> RandomGraphTest::mDevices;
// Single-op graph with dimensions in range [1, 1000].
class SingleOperationTest : public RandomGraphTest {};
-#define TEST_SINGLE_OPERATION(operation, halVersion) \
+#define TEST_SINGLE_OPERATION(operation, halVersion, criteria) \
TEST_P(SingleOperationTest, operation##_##halVersion) { \
OperationFilter filter = {.opcodes = {ANEURALNETWORKS_##operation}, \
.versions = {HalVersion::halVersion}}; \
OperationManager::get()->applyFilter(filter); \
+ mCriteria = (criteria); \
testRandomGraph(GraphSize::SINGLE, DimensionRange::WIDE); \
}
+// TODO: Adjust the accuracy criteria based on testing.
+// We define three sets of accuracy criteria for single-operation tests.
+
+// This is for operations that only copy buffers around without any computation on buffer values.
+// Most of these operations fall into categories of reshape or selection, e.g. RESHAPE, GATHER.
+// Additionally, operations with only logical or comparison arithmetic also use this criteria, e.g.
+// EQUAL, ARGMAX, TOPK_V2.
+const AccuracyCriteria kStrictCriteria = {
+ .float32 = {.atol = 1e-6f, .rtol = 1e-6f, .bias = 1e-7f, .mse = 1e-10f},
+ .float16 = {.atol = 1e-3f, .rtol = 1e-3f, .bias = 1e-4f, .mse = 1e-8f},
+ .int32 = {.atol = 1},
+ .quant8Asymm = {.atol = 1, .bias = 0.1f, .mse = 0.1f},
+ .quant8Symm = {.atol = 1, .bias = 0.1f, .mse = 0.1f},
+ .quant16Asymm = {.atol = 1, .bias = 0.1f, .mse = 0.1f},
+ .quant16Symm = {.atol = 1, .bias = 0.1f, .mse = 0.1f}};
+
+// This is for operations that only do simple and single computation on buffer values, such as
+// addition, multiplication, or requantization. Most of these operations fall into categories of
+// broadcast or elementwise, e.g ADD, FLOOR.
+const AccuracyCriteria kMediumCriteria = {
+ .float32 = {.atol = 1e-5f, .rtol = 1e-5f, .bias = 1e-6f, .mse = 1e-8f},
+ .float16 = {.atol = 1e-2f, .rtol = 1e-2f, .bias = 1e-3f, .mse = 1e-6f},
+ .int32 = {.atol = 1},
+ .quant8Asymm = {.atol = 2, .bias = 0.5f, .mse = 0.5f},
+ .quant8Symm = {.atol = 2, .bias = 0.5f, .mse = 0.5f},
+ .quant16Asymm = {.atol = 2, .bias = 0.5f, .mse = 0.5f},
+ .quant16Symm = {.atol = 2, .bias = 0.5f, .mse = 0.5f}};
+
+// This is for operations that involve sophisticated computations on buffer values, either a single
+// but complex transformation, e.g. LOGISTIC, or multiple transformations with accumulated errors,
+// e.g. CONV_2D, REDUCE_*.
+const AccuracyCriteria kRelaxedCriteria = {
+ .float32 = {.atol = 1e-3f, .rtol = 1e-3f, .bias = 2e-5f, .mse = 1e-7f},
+ .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
+ .int32 = {.atol = 1},
+ .quant8Asymm = {.atol = 8, .bias = 1, .mse = 1},
+ .quant8Symm = {.atol = 8, .bias = 1, .mse = 1},
+ .quant16Asymm = {.atol = 8, .bias = 1, .mse = 1},
+ .quant16Symm = {.atol = 8, .bias = 1, .mse = 1}};
+
/*-- NNAPI 1.0 Operations ---------------------------------------------------*/
// TODO: The following 1.0 operation signatures are currently not defined:
@@ -120,78 +252,78 @@
// - ANEURALNETWORKS_RNN
// - ANEURALNETWORKS_SVDF
-TEST_SINGLE_OPERATION(ADD, V1_0);
-TEST_SINGLE_OPERATION(MUL, V1_0);
-TEST_SINGLE_OPERATION(FLOOR, V1_0);
-TEST_SINGLE_OPERATION(LOGISTIC, V1_0);
-TEST_SINGLE_OPERATION(RELU, V1_0);
-TEST_SINGLE_OPERATION(RELU1, V1_0);
-TEST_SINGLE_OPERATION(RELU6, V1_0);
-TEST_SINGLE_OPERATION(TANH, V1_0);
-TEST_SINGLE_OPERATION(SOFTMAX, V1_0);
-TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_0);
-TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_0);
-TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_0);
-TEST_SINGLE_OPERATION(L2_POOL_2D, V1_0);
-TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_0);
-TEST_SINGLE_OPERATION(CONV_2D, V1_0);
-TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_0);
-TEST_SINGLE_OPERATION(CONCATENATION, V1_0);
-TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_0);
-TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_0);
-TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_0);
-TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_0);
-TEST_SINGLE_OPERATION(HASHTABLE_LOOKUP, V1_0);
-TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_0);
-TEST_SINGLE_OPERATION(RESHAPE, V1_0);
-TEST_SINGLE_OPERATION(DEQUANTIZE, V1_0);
+TEST_SINGLE_OPERATION(ADD, V1_0, kMediumCriteria);
+TEST_SINGLE_OPERATION(MUL, V1_0, kMediumCriteria);
+TEST_SINGLE_OPERATION(FLOOR, V1_0, kMediumCriteria);
+TEST_SINGLE_OPERATION(LOGISTIC, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(RELU, V1_0, kMediumCriteria);
+TEST_SINGLE_OPERATION(RELU1, V1_0, kMediumCriteria);
+TEST_SINGLE_OPERATION(RELU6, V1_0, kMediumCriteria);
+TEST_SINGLE_OPERATION(TANH, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(SOFTMAX, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(L2_POOL_2D, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(CONV_2D, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(CONCATENATION, V1_0, kMediumCriteria);
+TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_0, kStrictCriteria);
+TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_0, kStrictCriteria);
+TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_0, kStrictCriteria);
+TEST_SINGLE_OPERATION(HASHTABLE_LOOKUP, V1_0, kStrictCriteria);
+TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_0, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(RESHAPE, V1_0, kStrictCriteria);
+TEST_SINGLE_OPERATION(DEQUANTIZE, V1_0, kMediumCriteria);
/*-- NNAPI 1.1 Operations ---------------------------------------------------*/
-TEST_SINGLE_OPERATION(SUB, V1_1);
-TEST_SINGLE_OPERATION(DIV, V1_1);
-TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_1);
-TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_1);
-TEST_SINGLE_OPERATION(MEAN, V1_1);
-TEST_SINGLE_OPERATION(PAD, V1_1);
-TEST_SINGLE_OPERATION(TRANSPOSE, V1_1);
-TEST_SINGLE_OPERATION(SQUEEZE, V1_1);
-TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_1);
+TEST_SINGLE_OPERATION(SUB, V1_1, kMediumCriteria);
+TEST_SINGLE_OPERATION(DIV, V1_1, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_1, kStrictCriteria);
+TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_1, kStrictCriteria);
+TEST_SINGLE_OPERATION(MEAN, V1_1, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(PAD, V1_1, kStrictCriteria);
+TEST_SINGLE_OPERATION(TRANSPOSE, V1_1, kStrictCriteria);
+TEST_SINGLE_OPERATION(SQUEEZE, V1_1, kStrictCriteria);
+TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_1, kStrictCriteria);
/*-- NNAPI 1.0 and 1.1 Operations with Extended Behavior in 1.2 -------------*/
-TEST_SINGLE_OPERATION(ADD, V1_2);
-TEST_SINGLE_OPERATION(MUL, V1_2);
-TEST_SINGLE_OPERATION(SUB, V1_2);
-TEST_SINGLE_OPERATION(DIV, V1_2);
-TEST_SINGLE_OPERATION(FLOOR, V1_2);
-TEST_SINGLE_OPERATION(LOGISTIC, V1_2);
-TEST_SINGLE_OPERATION(RELU, V1_2);
-TEST_SINGLE_OPERATION(RELU1, V1_2);
-TEST_SINGLE_OPERATION(RELU6, V1_2);
-TEST_SINGLE_OPERATION(TANH, V1_2);
-TEST_SINGLE_OPERATION(CONCATENATION, V1_2);
-TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_2);
-TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_2);
-TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_2);
-TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_2);
-TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_2);
-TEST_SINGLE_OPERATION(RESHAPE, V1_2);
-TEST_SINGLE_OPERATION(MEAN, V1_2);
-TEST_SINGLE_OPERATION(PAD, V1_2);
-TEST_SINGLE_OPERATION(TRANSPOSE, V1_2);
-TEST_SINGLE_OPERATION(CONV_2D, V1_2);
-TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_2);
-TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_2);
-TEST_SINGLE_OPERATION(L2_POOL_2D, V1_2);
-TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_2);
-TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_2);
-TEST_SINGLE_OPERATION(SOFTMAX, V1_2);
-TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_2);
-TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_2);
-TEST_SINGLE_OPERATION(DEQUANTIZE, V1_2);
-TEST_SINGLE_OPERATION(SQUEEZE, V1_2);
-TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_2);
+TEST_SINGLE_OPERATION(ADD, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(MUL, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(SUB, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(DIV, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(FLOOR, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(LOGISTIC, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(RELU, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(RELU1, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(RELU6, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(TANH, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(CONCATENATION, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(RESHAPE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(MEAN, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(PAD, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(TRANSPOSE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(CONV_2D, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(L2_POOL_2D, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(SOFTMAX, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(DEQUANTIZE, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(SQUEEZE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_2, kStrictCriteria);
/*-- NNAPI 1.2 Operations ---------------------------------------------------*/
@@ -207,53 +339,71 @@
// - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
// - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN
-TEST_SINGLE_OPERATION(ABS, V1_2);
-TEST_SINGLE_OPERATION(EXP, V1_2);
-TEST_SINGLE_OPERATION(LOG, V1_2);
-TEST_SINGLE_OPERATION(NEG, V1_2);
-TEST_SINGLE_OPERATION(RSQRT, V1_2);
-TEST_SINGLE_OPERATION(SIN, V1_2);
-TEST_SINGLE_OPERATION(SQRT, V1_2);
-TEST_SINGLE_OPERATION(ARGMAX, V1_2);
-TEST_SINGLE_OPERATION(ARGMIN, V1_2);
-TEST_SINGLE_OPERATION(EQUAL, V1_2);
-TEST_SINGLE_OPERATION(GREATER, V1_2);
-TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_2);
-TEST_SINGLE_OPERATION(LESS, V1_2);
-TEST_SINGLE_OPERATION(LESS_EQUAL, V1_2);
-TEST_SINGLE_OPERATION(LOGICAL_AND, V1_2);
-TEST_SINGLE_OPERATION(LOGICAL_NOT, V1_2);
-TEST_SINGLE_OPERATION(LOGICAL_OR, V1_2);
-TEST_SINGLE_OPERATION(NOT_EQUAL, V1_2);
-TEST_SINGLE_OPERATION(MAXIMUM, V1_2);
-TEST_SINGLE_OPERATION(MINIMUM, V1_2);
-TEST_SINGLE_OPERATION(POW, V1_2);
-TEST_SINGLE_OPERATION(PRELU, V1_2);
-TEST_SINGLE_OPERATION(REDUCE_ALL, V1_2);
-TEST_SINGLE_OPERATION(REDUCE_ANY, V1_2);
-TEST_SINGLE_OPERATION(REDUCE_MAX, V1_2);
-TEST_SINGLE_OPERATION(REDUCE_MIN, V1_2);
-TEST_SINGLE_OPERATION(REDUCE_PROD, V1_2);
-TEST_SINGLE_OPERATION(REDUCE_SUM, V1_2);
-TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_2);
-TEST_SINGLE_OPERATION(INSTANCE_NORMALIZATION, V1_2);
-TEST_SINGLE_OPERATION(LOG_SOFTMAX, V1_2);
-TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_2);
-TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_2);
-TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_2);
-TEST_SINGLE_OPERATION(PAD_V2, V1_2);
-TEST_SINGLE_OPERATION(QUANTIZE, V1_2);
-TEST_SINGLE_OPERATION(CAST, V1_2);
-TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_2);
-TEST_SINGLE_OPERATION(TILE, V1_2);
-TEST_SINGLE_OPERATION(GATHER, V1_2);
-TEST_SINGLE_OPERATION(SELECT, V1_2);
-TEST_SINGLE_OPERATION(TOPK_V2, V1_2);
-TEST_SINGLE_OPERATION(SLICE, V1_2);
-TEST_SINGLE_OPERATION(SPLIT, V1_2);
-TEST_SINGLE_OPERATION(ROI_ALIGN, V1_2);
-TEST_SINGLE_OPERATION(ROI_POOLING, V1_2);
-TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_2);
+TEST_SINGLE_OPERATION(ABS, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(EXP, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(LOG, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(NEG, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(RSQRT, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(SIN, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(SQRT, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(ARGMAX, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(ARGMIN, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(EQUAL, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(GREATER, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(LESS, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(LESS_EQUAL, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(LOGICAL_AND, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(LOGICAL_NOT, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(LOGICAL_OR, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(NOT_EQUAL, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(MAXIMUM, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(MINIMUM, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(POW, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(PRELU, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(REDUCE_ALL, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(REDUCE_ANY, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(REDUCE_MAX, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(REDUCE_MIN, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(REDUCE_PROD, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(REDUCE_SUM, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(INSTANCE_NORMALIZATION, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(LOG_SOFTMAX, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(PAD_V2, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(QUANTIZE, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(CAST, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(TILE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(GATHER, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(SELECT, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(TOPK_V2, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(SLICE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(SPLIT, V1_2, kMediumCriteria);
+TEST_SINGLE_OPERATION(ROI_ALIGN, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(ROI_POOLING, V1_2, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_2, kRelaxedCriteria);
+
+const AccuracyCriteria kSmallGraphCriteria = {
+ .float32 = {.atol = 1e-2f, .rtol = 1e-2f, .bias = 2e-5f, .mse = 1e-7f},
+ .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
+ .int32 = {.atol = 1},
+ .quant8Asymm = {.atol = 8, .bias = 1, .mse = 1},
+ .quant8Symm = {.atol = 8, .bias = 1, .mse = 1},
+ .quant16Asymm = {.atol = 8, .bias = 1, .mse = 1},
+ .quant16Symm = {.atol = 8, .bias = 1, .mse = 1}};
+
+const AccuracyCriteria kLargeGraphCriteria = {
+ .float32 = {.atol = 1e-1f, .rtol = 1e-1f, .bias = 1e-2f, .mse = 1e-4f},
+ .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 1e-1f, .mse = 5e-2f},
+ .int32 = {.atol = 1},
+ .quant8Asymm = {.atol = 10, .bias = 2, .mse = 2},
+ .quant8Symm = {.atol = 10, .bias = 2, .mse = 2},
+ .quant16Asymm = {.atol = 10, .bias = 2, .mse = 2},
+ .quant16Symm = {.atol = 10, .bias = 2, .mse = 2}};
// Due to the limitation of the random graph generator, graphs generated with mixed-type or
// mixed-rank operations are likely to result in a disconnected network. Thus, we filter the
@@ -268,11 +418,13 @@
TEST_P(RandomGraphTest, SmallGraph_##dataType##_Rank##rank) { \
OperationFilter filter = {.dataTypes = {Type::dataType}, .ranks = {rank}}; \
OperationManager::get()->applyFilter(filter); \
+ mCriteria = kSmallGraphCriteria; \
testRandomGraph(GraphSize::SMALL, DimensionRange::WIDE); \
} \
TEST_P(RandomGraphTest, LargeGraph_##dataType##_Rank##rank) { \
OperationFilter filter = {.dataTypes = {Type::dataType}, .ranks = {rank}}; \
OperationManager::get()->applyFilter(filter); \
+ mCriteria = kLargeGraphCriteria; \
testRandomGraph(GraphSize::LARGE, DimensionRange::NARROW); \
}
@@ -300,14 +452,13 @@
TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 2);
TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 1);
-constexpr uint32_t kFirstSeed = 0;
-constexpr uint32_t kNumTestCases = 100;
-
-INSTANTIATE_TEST_CASE_P(TestRandomGraph, SingleOperationTest,
- ::testing::Range(kFirstSeed, kFirstSeed + kNumTestCases));
-
-INSTANTIATE_TEST_CASE_P(TestRandomGraph, RandomGraphTest,
- ::testing::Range(kFirstSeed, kFirstSeed + kNumTestCases));
+#ifdef NNTEST_CTS
+INSTANTIATE_TEST_CASE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 50u));
+INSTANTIATE_TEST_CASE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 50u));
+#else
+INSTANTIATE_TEST_CASE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 100u));
+INSTANTIATE_TEST_CASE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 100u));
+#endif
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
index a64761f..ca65cc4 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
+++ b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
@@ -84,8 +84,12 @@
using type = _Float16;
};
-constexpr float kMaxFloat32 = 10;
-constexpr float kMinFloat32 = -10;
+// The buffer value X is chosen uniformly in the range [kMinFloat32, kMaxFloat32]. kMinFloat32 and
+// kMaxFloat32 are selected by setting:
+// * E[X] = 0, so that the sum will less likely to overflow or underflow;
+// * E[abs(X)] = 1, so that the production will less likely to overflow or underflow.
+constexpr float kMaxFloat32 = 2.0f;
+constexpr float kMinFloat32 = -kMaxFloat32;
template <typename T>
inline void uniform(T low, T up, RandomOperand* op) {
@@ -224,10 +228,10 @@
inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op) {
op->dataType = dataType;
if (dataType == Type::TENSOR_QUANT8_ASYMM) {
- op->scale = getUniform<float>(0.1, 10.0);
+ op->scale = getUniform<float>(0.1, 2.0);
op->zeroPoint = getUniform<int32_t>(0, 255);
} else if (dataType == Type::TENSOR_QUANT8_SYMM) {
- op->scale = getUniform<float>(0.1, 10.0);
+ op->scale = getUniform<float>(0.1, 2.0);
op->zeroPoint = 0;
} else {
op->scale = 0.0f;
diff --git a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index d55a523..633c380 100644
--- a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -6473,6 +6473,134 @@
#endif
+TEST_F(NeuralnetworksHidlTest, cast_float16_to_quant8_overflow) {
+ generated_tests::Execute(device,
+ cast::createTestModel_17,
+ cast::is_ignored_17,
+ cast::get_examples_float16_to_quant8_overflow());
+}
+
+TEST_F(ValidationTest, cast_float16_to_quant8_overflow) {
+ const Model model = cast::createTestModel_17();
+ const std::vector<Request> requests = createRequests(cast::get_examples_float16_to_quant8_overflow());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, cast_float16_to_quant8_overflow_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ cast::createTestModel_dynamic_output_shape_17,
+ cast::is_ignored_dynamic_output_shape_17,
+ cast::get_examples_float16_to_quant8_overflow_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, cast_float16_to_quant8_overflow_dynamic_output_shape) {
+ const Model model = cast::createTestModel_dynamic_output_shape_17();
+ const std::vector<Request> requests = createRequests(cast::get_examples_float16_to_quant8_overflow_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
+TEST_F(NeuralnetworksHidlTest, cast_float32_to_quant8_overflow) {
+ generated_tests::Execute(device,
+ cast::createTestModel_18,
+ cast::is_ignored_18,
+ cast::get_examples_float32_to_quant8_overflow());
+}
+
+TEST_F(ValidationTest, cast_float32_to_quant8_overflow) {
+ const Model model = cast::createTestModel_18();
+ const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_quant8_overflow());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, cast_float32_to_quant8_overflow_relaxed) {
+ generated_tests::Execute(device,
+ cast::createTestModel_relaxed_8,
+ cast::is_ignored_relaxed_8,
+ cast::get_examples_float32_to_quant8_overflow_relaxed());
+}
+
+TEST_F(ValidationTest, cast_float32_to_quant8_overflow_relaxed) {
+ const Model model = cast::createTestModel_relaxed_8();
+ const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_quant8_overflow_relaxed());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, cast_float32_to_quant8_overflow_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ cast::createTestModel_dynamic_output_shape_18,
+ cast::is_ignored_dynamic_output_shape_18,
+ cast::get_examples_float32_to_quant8_overflow_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, cast_float32_to_quant8_overflow_dynamic_output_shape) {
+ const Model model = cast::createTestModel_dynamic_output_shape_18();
+ const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_quant8_overflow_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, cast_float32_to_quant8_overflow_dynamic_output_shape_relaxed) {
+ generated_tests::Execute(device,
+ cast::createTestModel_dynamic_output_shape_relaxed_8,
+ cast::is_ignored_dynamic_output_shape_relaxed_8,
+ cast::get_examples_float32_to_quant8_overflow_dynamic_output_shape_relaxed(), true);
+}
+
+TEST_F(ValidationTest, cast_float32_to_quant8_overflow_dynamic_output_shape_relaxed) {
+ const Model model = cast::createTestModel_dynamic_output_shape_relaxed_8();
+ const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_quant8_overflow_dynamic_output_shape_relaxed());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
+TEST_F(NeuralnetworksHidlTest, cast_int32_to_quant8_overflow) {
+ generated_tests::Execute(device,
+ cast::createTestModel_19,
+ cast::is_ignored_19,
+ cast::get_examples_int32_to_quant8_overflow());
+}
+
+TEST_F(ValidationTest, cast_int32_to_quant8_overflow) {
+ const Model model = cast::createTestModel_19();
+ const std::vector<Request> requests = createRequests(cast::get_examples_int32_to_quant8_overflow());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, cast_int32_to_quant8_overflow_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ cast::createTestModel_dynamic_output_shape_19,
+ cast::is_ignored_dynamic_output_shape_19,
+ cast::get_examples_int32_to_quant8_overflow_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, cast_int32_to_quant8_overflow_dynamic_output_shape) {
+ const Model model = cast::createTestModel_dynamic_output_shape_19();
+ const std::vector<Request> requests = createRequests(cast::get_examples_int32_to_quant8_overflow_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: channel_shuffle.mod.py.
namespace channel_shuffle {
// Generated channel_shuffle test
@@ -23761,6 +23889,38 @@
#endif
+TEST_F(NeuralnetworksHidlTest, equal_boolean) {
+ generated_tests::Execute(device,
+ equal::createTestModel_7,
+ equal::is_ignored_7,
+ equal::get_examples_boolean());
+}
+
+TEST_F(ValidationTest, equal_boolean) {
+ const Model model = equal::createTestModel_7();
+ const std::vector<Request> requests = createRequests(equal::get_examples_boolean());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, equal_boolean_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ equal::createTestModel_dynamic_output_shape_7,
+ equal::is_ignored_dynamic_output_shape_7,
+ equal::get_examples_boolean_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, equal_boolean_dynamic_output_shape) {
+ const Model model = equal::createTestModel_dynamic_output_shape_7();
+ const std::vector<Request> requests = createRequests(equal::get_examples_boolean_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: exp.mod.py.
namespace exp {
// Generated exp test
@@ -53177,6 +53337,38 @@
#endif
+TEST_F(NeuralnetworksHidlTest, maximum_overflow) {
+ generated_tests::Execute(device,
+ maximum::createTestModel_3,
+ maximum::is_ignored_3,
+ maximum::get_examples_overflow());
+}
+
+TEST_F(ValidationTest, maximum_overflow) {
+ const Model model = maximum::createTestModel_3();
+ const std::vector<Request> requests = createRequests(maximum::get_examples_overflow());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, maximum_overflow_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ maximum::createTestModel_dynamic_output_shape_3,
+ maximum::is_ignored_dynamic_output_shape_3,
+ maximum::get_examples_overflow_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, maximum_overflow_dynamic_output_shape) {
+ const Model model = maximum::createTestModel_dynamic_output_shape_3();
+ const std::vector<Request> requests = createRequests(maximum::get_examples_overflow_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: mean_float16.mod.py.
namespace mean_float16 {
// Generated mean_float16 test
@@ -53545,6 +53737,38 @@
#endif
+TEST_F(NeuralnetworksHidlTest, minimum_overflow) {
+ generated_tests::Execute(device,
+ minimum::createTestModel_3,
+ minimum::is_ignored_3,
+ minimum::get_examples_overflow());
+}
+
+TEST_F(ValidationTest, minimum_overflow) {
+ const Model model = minimum::createTestModel_3();
+ const std::vector<Request> requests = createRequests(minimum::get_examples_overflow());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, minimum_overflow_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ minimum::createTestModel_dynamic_output_shape_3,
+ minimum::is_ignored_dynamic_output_shape_3,
+ minimum::get_examples_overflow_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, minimum_overflow_dynamic_output_shape) {
+ const Model model = minimum::createTestModel_dynamic_output_shape_3();
+ const std::vector<Request> requests = createRequests(minimum::get_examples_overflow_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: mul_v1_2.mod.py.
namespace mul_v1_2 {
// Generated mul_v1_2 test
@@ -54273,6 +54497,38 @@
#endif
+TEST_F(NeuralnetworksHidlTest, not_equal_boolean) {
+ generated_tests::Execute(device,
+ not_equal::createTestModel_7,
+ not_equal::is_ignored_7,
+ not_equal::get_examples_boolean());
+}
+
+TEST_F(ValidationTest, not_equal_boolean) {
+ const Model model = not_equal::createTestModel_7();
+ const std::vector<Request> requests = createRequests(not_equal::get_examples_boolean());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, not_equal_boolean_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ not_equal::createTestModel_dynamic_output_shape_7,
+ not_equal::is_ignored_dynamic_output_shape_7,
+ not_equal::get_examples_boolean_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, not_equal_boolean_dynamic_output_shape) {
+ const Model model = not_equal::createTestModel_dynamic_output_shape_7();
+ const std::vector<Request> requests = createRequests(not_equal::get_examples_boolean_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: pad_all_dims.mod.py.
namespace pad_all_dims {
// Generated pad_all_dims test
@@ -56185,6 +56441,70 @@
#endif
+TEST_F(NeuralnetworksHidlTest, quantized_lstm_constant_weights) {
+ generated_tests::Execute(device,
+ quantized_lstm::createTestModel_2,
+ quantized_lstm::is_ignored_2,
+ quantized_lstm::get_examples_constant_weights());
+}
+
+TEST_F(ValidationTest, quantized_lstm_constant_weights) {
+ const Model model = quantized_lstm::createTestModel_2();
+ const std::vector<Request> requests = createRequests(quantized_lstm::get_examples_constant_weights());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, quantized_lstm_constant_weights_relaxed) {
+ generated_tests::Execute(device,
+ quantized_lstm::createTestModel_relaxed_2,
+ quantized_lstm::is_ignored_relaxed_2,
+ quantized_lstm::get_examples_constant_weights_relaxed());
+}
+
+TEST_F(ValidationTest, quantized_lstm_constant_weights_relaxed) {
+ const Model model = quantized_lstm::createTestModel_relaxed_2();
+ const std::vector<Request> requests = createRequests(quantized_lstm::get_examples_constant_weights_relaxed());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, quantized_lstm_constant_weights_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ quantized_lstm::createTestModel_dynamic_output_shape_2,
+ quantized_lstm::is_ignored_dynamic_output_shape_2,
+ quantized_lstm::get_examples_constant_weights_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, quantized_lstm_constant_weights_dynamic_output_shape) {
+ const Model model = quantized_lstm::createTestModel_dynamic_output_shape_2();
+ const std::vector<Request> requests = createRequests(quantized_lstm::get_examples_constant_weights_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, quantized_lstm_constant_weights_dynamic_output_shape_relaxed) {
+ generated_tests::Execute(device,
+ quantized_lstm::createTestModel_dynamic_output_shape_relaxed_2,
+ quantized_lstm::is_ignored_dynamic_output_shape_relaxed_2,
+ quantized_lstm::get_examples_constant_weights_dynamic_output_shape_relaxed(), true);
+}
+
+TEST_F(ValidationTest, quantized_lstm_constant_weights_dynamic_output_shape_relaxed) {
+ const Model model = quantized_lstm::createTestModel_dynamic_output_shape_relaxed_2();
+ const std::vector<Request> requests = createRequests(quantized_lstm::get_examples_constant_weights_dynamic_output_shape_relaxed());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: random_multinomial.mod.py.
namespace random_multinomial {
// Generated random_multinomial test
diff --git a/nn/runtime/test/generated/examples/cast.example.cpp b/nn/runtime/test/generated/examples/cast.example.cpp
index 74e454b..c952719 100644
--- a/nn/runtime/test/generated/examples/cast.example.cpp
+++ b/nn/runtime/test/generated/examples/cast.example.cpp
@@ -2622,3 +2622,459 @@
return examples_quant8_to_quant8_dynamic_output_shape;
};
+std::vector<MixedTypedExample>& get_examples_float16_to_quant8_overflow() {
+static std::vector<MixedTypedExample> examples_float16_to_quant8_overflow = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {-1.0f, 256.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float16_to_quant8_overflow;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_to_quant8_overflow_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_float16_to_quant8_overflow_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {-1.0f, 256.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float16_to_quant8_overflow_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_float32_to_quant8_overflow() {
+static std::vector<MixedTypedExample> examples_float32_to_quant8_overflow = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {-1.0f, 256.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float32_to_quant8_overflow;
+};
+
+std::vector<MixedTypedExample>& get_examples_float32_to_quant8_overflow_relaxed() {
+static std::vector<MixedTypedExample> examples_float32_to_quant8_overflow_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {-1.0f, 256.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float32_to_quant8_overflow_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float32_to_quant8_overflow_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_float32_to_quant8_overflow_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {-1.0f, 256.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float32_to_quant8_overflow_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_float32_to_quant8_overflow_dynamic_output_shape_relaxed() {
+static std::vector<MixedTypedExample> examples_float32_to_quant8_overflow_dynamic_output_shape_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {-1.0f, 256.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float32_to_quant8_overflow_dynamic_output_shape_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32_to_quant8_overflow() {
+static std::vector<MixedTypedExample> examples_int32_to_quant8_overflow = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{0, {-1, 256}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_int32_to_quant8_overflow;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32_to_quant8_overflow_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_int32_to_quant8_overflow_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{0, {-1, 256}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_int32_to_quant8_overflow_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/examples/equal.example.cpp b/nn/runtime/test/generated/examples/equal.example.cpp
index 340e4ef..f395337 100644
--- a/nn/runtime/test/generated/examples/equal.example.cpp
+++ b/nn/runtime/test/generated/examples/equal.example.cpp
@@ -1368,3 +1368,117 @@
return examples_quantized_overflow_first_input_if_requantized_dynamic_output_shape;
};
+std::vector<MixedTypedExample>& get_examples_boolean() {
+static std::vector<MixedTypedExample> examples_boolean = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}, {1, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {false, true, false, true}}, {1, {false, false, true, true}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {true, false, false, true}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_boolean;
+};
+
+std::vector<MixedTypedExample>& get_examples_boolean_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_boolean_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}, {1, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {false, true, false, true}}, {1, {false, false, true, true}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {true, false, false, true}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_boolean_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/examples/maximum.example.cpp b/nn/runtime/test/generated/examples/maximum.example.cpp
index 1f12cdc..a77c066 100644
--- a/nn/runtime/test/generated/examples/maximum.example.cpp
+++ b/nn/runtime/test/generated/examples/maximum.example.cpp
@@ -1140,3 +1140,117 @@
return examples_broadcast_dynamic_output_shape_quant8;
};
+std::vector<MixedTypedExample>& get_examples_overflow() {
+static std::vector<MixedTypedExample> examples_overflow = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}, {1, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {60, 128}}, {1, {128, 200}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {128, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_overflow;
+};
+
+std::vector<MixedTypedExample>& get_examples_overflow_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_overflow_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}, {1, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {60, 128}}, {1, {128, 200}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {128, 255}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_overflow_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/examples/minimum.example.cpp b/nn/runtime/test/generated/examples/minimum.example.cpp
index fd6652f..0f69d0b 100644
--- a/nn/runtime/test/generated/examples/minimum.example.cpp
+++ b/nn/runtime/test/generated/examples/minimum.example.cpp
@@ -1140,3 +1140,117 @@
return examples_broadcast_dynamic_output_shape_quant8;
};
+std::vector<MixedTypedExample>& get_examples_overflow() {
+static std::vector<MixedTypedExample> examples_overflow = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}, {1, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {60, 128}}, {1, {128, 200}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 128}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_overflow;
+};
+
+std::vector<MixedTypedExample>& get_examples_overflow_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_overflow_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}, {1, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {60, 128}}, {1, {128, 200}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {0, 128}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_overflow_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/examples/not_equal.example.cpp b/nn/runtime/test/generated/examples/not_equal.example.cpp
index feb0fb4..85e7d4a 100644
--- a/nn/runtime/test/generated/examples/not_equal.example.cpp
+++ b/nn/runtime/test/generated/examples/not_equal.example.cpp
@@ -1368,3 +1368,117 @@
return examples_quantized_overflow_first_input_if_requantized_dynamic_output_shape;
};
+std::vector<MixedTypedExample>& get_examples_boolean() {
+static std::vector<MixedTypedExample> examples_boolean = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}, {1, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {false, true, false, true}}, {1, {false, false, true, true}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {false, true, true, false}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_boolean;
+};
+
+std::vector<MixedTypedExample>& get_examples_boolean_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_boolean_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}, {1, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {false, true, false, true}}, {1, {false, false, true, true}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {{0, {false, true, true, false}}},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_boolean_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/examples/quantized_lstm.example.cpp b/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
index af485ea..1cd2005 100644
--- a/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
+++ b/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
@@ -228,3 +228,231 @@
return examples_dynamic_output_shape_relaxed;
};
+std::vector<MixedTypedExample>& get_examples_constant_weights() {
+static std::vector<MixedTypedExample> examples_constant_weights = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 2}}, {1, {1, 4}}, {2, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {166, 179}}, {2, {136, 150, 140, 115}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{1, {876, 1034, 955, -909}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 4}}, {1, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{1, {140, 151, 146, 112}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{0, {1485, 1177, 1373, -1023}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_constant_weights;
+};
+
+std::vector<MixedTypedExample>& get_examples_constant_weights_relaxed() {
+static std::vector<MixedTypedExample> examples_constant_weights_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 2}}, {1, {1, 4}}, {2, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {166, 179}}, {2, {136, 150, 140, 115}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{1, {876, 1034, 955, -909}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 4}}, {1, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{1, {140, 151, 146, 112}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{0, {1485, 1177, 1373, -1023}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_constant_weights_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_constant_weights_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_constant_weights_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 2}}, {1, {1, 4}}, {2, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {166, 179}}, {2, {136, 150, 140, 115}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{1, {876, 1034, 955, -909}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 4}}, {1, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{1, {140, 151, 146, 112}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{0, {1485, 1177, 1373, -1023}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_constant_weights_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_constant_weights_dynamic_output_shape_relaxed() {
+static std::vector<MixedTypedExample> examples_constant_weights_dynamic_output_shape_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 2}}, {1, {1, 4}}, {2, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{0, {166, 179}}, {2, {136, 150, 140, 115}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{1, {876, 1034, 955, -909}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {1, 4}}, {1, {1, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {{1, {140, 151, 146, 112}}},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {{0, {1485, 1177, 1373, -1023}}},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_constant_weights_dynamic_output_shape_relaxed;
+};
+
diff --git a/nn/runtime/test/generated/models/cast.model.cpp b/nn/runtime/test/generated/models/cast.model.cpp
index b45c5ed..1a10cd6 100644
--- a/nn/runtime/test/generated/models/cast.model.cpp
+++ b/nn/runtime/test/generated/models/cast.model.cpp
@@ -21,10 +21,10 @@
void CreateModel_dynamic_output_shape(Model *model) {
OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
- OperandType type4(Type::TENSOR_FLOAT16, {0, 0});
+ OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input0}, {output0});
// Phase 3, inputs and outputs
@@ -83,10 +83,10 @@
void CreateModel_dynamic_output_shape_2(Model *model) {
OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input0}, {output01});
// Phase 3, inputs and outputs
@@ -103,10 +103,10 @@
void CreateModel_dynamic_output_shape_relaxed(Model *model) {
OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input0}, {output01});
// Phase 3, inputs and outputs
@@ -145,10 +145,10 @@
void CreateModel_dynamic_output_shape_3(Model *model) {
OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
- OperandType type6(Type::TENSOR_INT32, {0, 0});
+ OperandType type10(Type::TENSOR_INT32, {0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
- auto output02 = model->addOperand(&type6);
+ auto output02 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input0}, {output02});
// Phase 3, inputs and outputs
@@ -185,10 +185,10 @@
void CreateModel_dynamic_output_shape_4(Model *model) {
OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
+ OperandType type11(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
// Phase 1, operands
auto input0 = model->addOperand(&type0);
- auto output03 = model->addOperand(&type7);
+ auto output03 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input0}, {output03});
// Phase 3, inputs and outputs
@@ -247,10 +247,10 @@
void CreateModel_dynamic_output_shape_5(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type4(Type::TENSOR_FLOAT16, {0, 0});
+ OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output0 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output0});
// Phase 3, inputs and outputs
@@ -267,10 +267,10 @@
void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type4(Type::TENSOR_FLOAT16, {0, 0});
+ OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output0 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output0});
// Phase 3, inputs and outputs
@@ -329,10 +329,10 @@
void CreateModel_dynamic_output_shape_6(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output01});
// Phase 3, inputs and outputs
@@ -349,10 +349,10 @@
void CreateModel_dynamic_output_shape_relaxed_3(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output01});
// Phase 3, inputs and outputs
@@ -413,10 +413,10 @@
void CreateModel_dynamic_output_shape_7(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type6(Type::TENSOR_INT32, {0, 0});
+ OperandType type10(Type::TENSOR_INT32, {0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output02 = model->addOperand(&type6);
+ auto output02 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output02});
// Phase 3, inputs and outputs
@@ -433,10 +433,10 @@
void CreateModel_dynamic_output_shape_relaxed_4(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type6(Type::TENSOR_INT32, {0, 0});
+ OperandType type10(Type::TENSOR_INT32, {0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output02 = model->addOperand(&type6);
+ auto output02 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output02});
// Phase 3, inputs and outputs
@@ -497,10 +497,10 @@
void CreateModel_dynamic_output_shape_8(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
+ OperandType type11(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output03 = model->addOperand(&type7);
+ auto output03 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output03});
// Phase 3, inputs and outputs
@@ -517,10 +517,10 @@
void CreateModel_dynamic_output_shape_relaxed_5(Model *model) {
OperandType type1(Type::TENSOR_FLOAT32, {2, 3});
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
+ OperandType type11(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
// Phase 1, operands
auto input01 = model->addOperand(&type1);
- auto output03 = model->addOperand(&type7);
+ auto output03 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input01}, {output03});
// Phase 3, inputs and outputs
@@ -559,10 +559,10 @@
void CreateModel_dynamic_output_shape_9(Model *model) {
OperandType type2(Type::TENSOR_INT32, {2, 3});
- OperandType type4(Type::TENSOR_FLOAT16, {0, 0});
+ OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
// Phase 1, operands
auto input02 = model->addOperand(&type2);
- auto output0 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input02}, {output0});
// Phase 3, inputs and outputs
@@ -621,10 +621,10 @@
void CreateModel_dynamic_output_shape_10(Model *model) {
OperandType type2(Type::TENSOR_INT32, {2, 3});
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input02 = model->addOperand(&type2);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input02}, {output01});
// Phase 3, inputs and outputs
@@ -641,10 +641,10 @@
void CreateModel_dynamic_output_shape_relaxed_6(Model *model) {
OperandType type2(Type::TENSOR_INT32, {2, 3});
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input02 = model->addOperand(&type2);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input02}, {output01});
// Phase 3, inputs and outputs
@@ -681,11 +681,11 @@
}
void CreateModel_dynamic_output_shape_11(Model *model) {
+ OperandType type10(Type::TENSOR_INT32, {0, 0});
OperandType type2(Type::TENSOR_INT32, {2, 3});
- OperandType type6(Type::TENSOR_INT32, {0, 0});
// Phase 1, operands
auto input02 = model->addOperand(&type2);
- auto output02 = model->addOperand(&type6);
+ auto output02 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input02}, {output02});
// Phase 3, inputs and outputs
@@ -721,11 +721,11 @@
}
void CreateModel_dynamic_output_shape_12(Model *model) {
+ OperandType type11(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
OperandType type2(Type::TENSOR_INT32, {2, 3});
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
// Phase 1, operands
auto input02 = model->addOperand(&type2);
- auto output03 = model->addOperand(&type7);
+ auto output03 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input02}, {output03});
// Phase 3, inputs and outputs
@@ -762,10 +762,10 @@
void CreateModel_dynamic_output_shape_13(Model *model) {
OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 4.0f, 100);
- OperandType type4(Type::TENSOR_FLOAT16, {0, 0});
+ OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
// Phase 1, operands
auto input03 = model->addOperand(&type3);
- auto output0 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input03}, {output0});
// Phase 3, inputs and outputs
@@ -824,10 +824,10 @@
void CreateModel_dynamic_output_shape_14(Model *model) {
OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 4.0f, 100);
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input03 = model->addOperand(&type3);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input03}, {output01});
// Phase 3, inputs and outputs
@@ -844,10 +844,10 @@
void CreateModel_dynamic_output_shape_relaxed_7(Model *model) {
OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 4.0f, 100);
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0});
// Phase 1, operands
auto input03 = model->addOperand(&type3);
- auto output01 = model->addOperand(&type5);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input03}, {output01});
// Phase 3, inputs and outputs
@@ -885,11 +885,11 @@
}
void CreateModel_dynamic_output_shape_15(Model *model) {
+ OperandType type10(Type::TENSOR_INT32, {0, 0});
OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 4.0f, 100);
- OperandType type6(Type::TENSOR_INT32, {0, 0});
// Phase 1, operands
auto input03 = model->addOperand(&type3);
- auto output02 = model->addOperand(&type6);
+ auto output02 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input03}, {output02});
// Phase 3, inputs and outputs
@@ -924,11 +924,11 @@
}
void CreateModel_dynamic_output_shape_16(Model *model) {
+ OperandType type11(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 4.0f, 100);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 4.0f, 100);
// Phase 1, operands
auto input03 = model->addOperand(&type3);
- auto output03 = model->addOperand(&type7);
+ auto output03 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_CAST, {input03}, {output03});
// Phase 3, inputs and outputs
@@ -943,3 +943,167 @@
return ignore.find(i) != ignore.end();
}
+void CreateModel_17(Model *model) {
+ OperandType type4(Type::TENSOR_FLOAT16, {2});
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {2}, 4.0f, 100);
+ // Phase 1, operands
+ auto input04 = model->addOperand(&type4);
+ auto output04 = model->addOperand(&type7);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input04}, {output04});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input04},
+ {output04});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_17(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_17(Model *model) {
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0}, 4.0f, 100);
+ OperandType type4(Type::TENSOR_FLOAT16, {2});
+ // Phase 1, operands
+ auto input04 = model->addOperand(&type4);
+ auto output04 = model->addOperand(&type12);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input04}, {output04});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input04},
+ {output04});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_17(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_18(Model *model) {
+ OperandType type5(Type::TENSOR_FLOAT32, {2});
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {2}, 4.0f, 100);
+ // Phase 1, operands
+ auto input05 = model->addOperand(&type5);
+ auto output05 = model->addOperand(&type7);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input05}, {output05});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input05},
+ {output05});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_18(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_8(Model *model) {
+ OperandType type5(Type::TENSOR_FLOAT32, {2});
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {2}, 4.0f, 100);
+ // Phase 1, operands
+ auto input05 = model->addOperand(&type5);
+ auto output05 = model->addOperand(&type7);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input05}, {output05});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input05},
+ {output05});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_8(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_18(Model *model) {
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0}, 4.0f, 100);
+ OperandType type5(Type::TENSOR_FLOAT32, {2});
+ // Phase 1, operands
+ auto input05 = model->addOperand(&type5);
+ auto output05 = model->addOperand(&type12);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input05}, {output05});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input05},
+ {output05});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_18(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_relaxed_8(Model *model) {
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0}, 4.0f, 100);
+ OperandType type5(Type::TENSOR_FLOAT32, {2});
+ // Phase 1, operands
+ auto input05 = model->addOperand(&type5);
+ auto output05 = model->addOperand(&type12);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input05}, {output05});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input05},
+ {output05});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_relaxed_8(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_19(Model *model) {
+ OperandType type6(Type::TENSOR_INT32, {2});
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {2}, 4.0f, 100);
+ // Phase 1, operands
+ auto input06 = model->addOperand(&type6);
+ auto output06 = model->addOperand(&type7);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input06}, {output06});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input06},
+ {output06});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_19(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_19(Model *model) {
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0}, 4.0f, 100);
+ OperandType type6(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input06 = model->addOperand(&type6);
+ auto output06 = model->addOperand(&type12);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_CAST, {input06}, {output06});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input06},
+ {output06});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_19(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/equal.model.cpp b/nn/runtime/test/generated/models/equal.model.cpp
index e10f372..d41410b 100644
--- a/nn/runtime/test/generated/models/equal.model.cpp
+++ b/nn/runtime/test/generated/models/equal.model.cpp
@@ -23,10 +23,10 @@
void CreateModel_int32(Model *model) {
OperandType type1(Type::TENSOR_BOOL8, {3});
- OperandType type11(Type::TENSOR_INT32, {3});
+ OperandType type12(Type::TENSOR_INT32, {3});
// Phase 1, operands
- auto input0 = model->addOperand(&type11);
- auto input1 = model->addOperand(&type11);
+ auto input0 = model->addOperand(&type12);
+ auto input1 = model->addOperand(&type12);
auto output0 = model->addOperand(&type1);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input0, input1}, {output0});
@@ -44,10 +44,10 @@
void CreateModel_float16(Model *model) {
OperandType type1(Type::TENSOR_BOOL8, {3});
- OperandType type12(Type::TENSOR_FLOAT16, {3});
+ OperandType type13(Type::TENSOR_FLOAT16, {3});
// Phase 1, operands
- auto input0 = model->addOperand(&type12);
- auto input1 = model->addOperand(&type12);
+ auto input0 = model->addOperand(&type13);
+ auto input1 = model->addOperand(&type13);
auto output0 = model->addOperand(&type1);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input0, input1}, {output0});
@@ -88,11 +88,11 @@
void CreateModel_dynamic_output_shape(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type13);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -108,12 +108,12 @@
}
void CreateModel_dynamic_output_shape_int32(Model *model) {
- OperandType type11(Type::TENSOR_INT32, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type12(Type::TENSOR_INT32, {3});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
- auto input0 = model->addOperand(&type11);
- auto input1 = model->addOperand(&type11);
- auto output0 = model->addOperand(&type13);
+ auto input0 = model->addOperand(&type12);
+ auto input1 = model->addOperand(&type12);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -129,12 +129,12 @@
}
void CreateModel_dynamic_output_shape_float16(Model *model) {
- OperandType type12(Type::TENSOR_FLOAT16, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type13(Type::TENSOR_FLOAT16, {3});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
- auto input0 = model->addOperand(&type12);
- auto input1 = model->addOperand(&type12);
- auto output0 = model->addOperand(&type13);
+ auto input0 = model->addOperand(&type13);
+ auto input1 = model->addOperand(&type13);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -151,11 +151,11 @@
void CreateModel_dynamic_output_shape_relaxed(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type13);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -195,12 +195,12 @@
}
void CreateModel_int32_2(Model *model) {
- OperandType type14(Type::TENSOR_INT32, {2, 1});
- OperandType type15(Type::TENSOR_INT32, {2});
+ OperandType type15(Type::TENSOR_INT32, {2, 1});
+ OperandType type16(Type::TENSOR_INT32, {2});
OperandType type4(Type::TENSOR_BOOL8, {2, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type14);
- auto input11 = model->addOperand(&type15);
+ auto input01 = model->addOperand(&type15);
+ auto input11 = model->addOperand(&type16);
auto output01 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input01, input11}, {output01});
@@ -217,12 +217,12 @@
}
void CreateModel_float16_2(Model *model) {
- OperandType type16(Type::TENSOR_FLOAT16, {2, 1});
- OperandType type17(Type::TENSOR_FLOAT16, {2});
+ OperandType type17(Type::TENSOR_FLOAT16, {2, 1});
+ OperandType type18(Type::TENSOR_FLOAT16, {2});
OperandType type4(Type::TENSOR_BOOL8, {2, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type16);
- auto input11 = model->addOperand(&type17);
+ auto input01 = model->addOperand(&type17);
+ auto input11 = model->addOperand(&type18);
auto output01 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input01, input11}, {output01});
@@ -263,13 +263,13 @@
}
void CreateModel_dynamic_output_shape_2(Model *model) {
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
OperandType type2(Type::TENSOR_FLOAT32, {2, 1});
OperandType type3(Type::TENSOR_FLOAT32, {2});
// Phase 1, operands
auto input01 = model->addOperand(&type2);
auto input11 = model->addOperand(&type3);
- auto output01 = model->addOperand(&type18);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -285,13 +285,13 @@
}
void CreateModel_dynamic_output_shape_int32_2(Model *model) {
- OperandType type14(Type::TENSOR_INT32, {2, 1});
- OperandType type15(Type::TENSOR_INT32, {2});
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type15(Type::TENSOR_INT32, {2, 1});
+ OperandType type16(Type::TENSOR_INT32, {2});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
// Phase 1, operands
- auto input01 = model->addOperand(&type14);
- auto input11 = model->addOperand(&type15);
- auto output01 = model->addOperand(&type18);
+ auto input01 = model->addOperand(&type15);
+ auto input11 = model->addOperand(&type16);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -307,13 +307,13 @@
}
void CreateModel_dynamic_output_shape_float16_2(Model *model) {
- OperandType type16(Type::TENSOR_FLOAT16, {2, 1});
- OperandType type17(Type::TENSOR_FLOAT16, {2});
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type17(Type::TENSOR_FLOAT16, {2, 1});
+ OperandType type18(Type::TENSOR_FLOAT16, {2});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
// Phase 1, operands
- auto input01 = model->addOperand(&type16);
- auto input11 = model->addOperand(&type17);
- auto output01 = model->addOperand(&type18);
+ auto input01 = model->addOperand(&type17);
+ auto input11 = model->addOperand(&type18);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -329,13 +329,13 @@
}
void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
OperandType type2(Type::TENSOR_FLOAT32, {2, 1});
OperandType type3(Type::TENSOR_FLOAT32, {2});
// Phase 1, operands
auto input01 = model->addOperand(&type2);
auto input11 = model->addOperand(&type3);
- auto output01 = model->addOperand(&type18);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -375,13 +375,13 @@
}
void CreateModel_dynamic_output_shape_3(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0f, 128);
OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1}, 2.0f, 128);
// Phase 1, operands
auto input02 = model->addOperand(&type5);
auto input12 = model->addOperand(&type6);
- auto output02 = model->addOperand(&type13);
+ auto output02 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input02, input12}, {output02});
// Phase 3, inputs and outputs
@@ -419,13 +419,13 @@
}
void CreateModel_dynamic_output_shape_4(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0f, 128);
OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1}, 1.0f, 129);
// Phase 1, operands
auto input03 = model->addOperand(&type5);
auto input13 = model->addOperand(&type7);
- auto output03 = model->addOperand(&type13);
+ auto output03 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input03, input13}, {output03});
// Phase 3, inputs and outputs
@@ -463,13 +463,13 @@
}
void CreateModel_dynamic_output_shape_5(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1}, 1.64771f, 31);
OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1}, 1.49725f, 240);
// Phase 1, operands
auto input04 = model->addOperand(&type8);
auto input14 = model->addOperand(&type9);
- auto output04 = model->addOperand(&type13);
+ auto output04 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input04, input14}, {output04});
// Phase 3, inputs and outputs
@@ -507,13 +507,13 @@
}
void CreateModel_dynamic_output_shape_6(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1}, 1.64771f, 31);
OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1}, 1.49725f, 240);
// Phase 1, operands
auto input05 = model->addOperand(&type9);
auto input15 = model->addOperand(&type8);
- auto output05 = model->addOperand(&type13);
+ auto output05 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_EQUAL, {input05, input15}, {output05});
// Phase 3, inputs and outputs
@@ -528,3 +528,44 @@
return ignore.find(i) != ignore.end();
}
+void CreateModel_7(Model *model) {
+ OperandType type11(Type::TENSOR_BOOL8, {4});
+ // Phase 1, operands
+ auto input06 = model->addOperand(&type11);
+ auto input16 = model->addOperand(&type11);
+ auto output06 = model->addOperand(&type11);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_EQUAL, {input06, input16}, {output06});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input06, input16},
+ {output06});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_7(Model *model) {
+ OperandType type11(Type::TENSOR_BOOL8, {4});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
+ // Phase 1, operands
+ auto input06 = model->addOperand(&type11);
+ auto input16 = model->addOperand(&type11);
+ auto output06 = model->addOperand(&type14);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_EQUAL, {input06, input16}, {output06});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input06, input16},
+ {output06});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/maximum.model.cpp b/nn/runtime/test/generated/models/maximum.model.cpp
index d046ce8..d9c2fb9 100644
--- a/nn/runtime/test/generated/models/maximum.model.cpp
+++ b/nn/runtime/test/generated/models/maximum.model.cpp
@@ -43,11 +43,11 @@
}
void CreateModel_float16(Model *model) {
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type2);
- auto input1 = model->addOperand(&type2);
- auto output0 = model->addOperand(&type2);
+ auto input0 = model->addOperand(&type4);
+ auto input1 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -63,11 +63,11 @@
}
void CreateModel_int32(Model *model) {
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type3);
- auto input1 = model->addOperand(&type3);
- auto output0 = model->addOperand(&type3);
+ auto input0 = model->addOperand(&type5);
+ auto input1 = model->addOperand(&type5);
+ auto output0 = model->addOperand(&type5);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -83,13 +83,13 @@
}
void CreateModel_quant8(Model *model) {
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
- OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
// Phase 1, operands
- auto input0 = model->addOperand(&type4);
- auto input1 = model->addOperand(&type5);
- auto output0 = model->addOperand(&type6);
+ auto input0 = model->addOperand(&type6);
+ auto input1 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -106,11 +106,11 @@
void CreateModel_dynamic_output_shape(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -127,11 +127,11 @@
void CreateModel_dynamic_output_shape_relaxed(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -149,12 +149,12 @@
}
void CreateModel_dynamic_output_shape_float16(Model *model) {
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
- OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type10(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type2);
- auto input1 = model->addOperand(&type2);
- auto output0 = model->addOperand(&type8);
+ auto input0 = model->addOperand(&type4);
+ auto input1 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -170,12 +170,12 @@
}
void CreateModel_dynamic_output_shape_int32(Model *model) {
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
- OperandType type9(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type11(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type3);
- auto input1 = model->addOperand(&type3);
- auto output0 = model->addOperand(&type9);
+ auto input0 = model->addOperand(&type5);
+ auto input1 = model->addOperand(&type5);
+ auto output0 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -191,13 +191,13 @@
}
void CreateModel_dynamic_output_shape_quant8(Model *model) {
- OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
// Phase 1, operands
- auto input0 = model->addOperand(&type4);
- auto input1 = model->addOperand(&type5);
- auto output0 = model->addOperand(&type10);
+ auto input0 = model->addOperand(&type6);
+ auto input1 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type12);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -257,12 +257,12 @@
}
void CreateModel_float16_2(Model *model) {
- OperandType type11(Type::TENSOR_FLOAT16, {2});
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
+ OperandType type13(Type::TENSOR_FLOAT16, {2});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type2);
- auto input11 = model->addOperand(&type11);
- auto output01 = model->addOperand(&type2);
+ auto input01 = model->addOperand(&type4);
+ auto input11 = model->addOperand(&type13);
+ auto output01 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -278,12 +278,12 @@
}
void CreateModel_int32_2(Model *model) {
- OperandType type12(Type::TENSOR_INT32, {2});
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
+ OperandType type14(Type::TENSOR_INT32, {2});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type3);
- auto input11 = model->addOperand(&type12);
- auto output01 = model->addOperand(&type3);
+ auto input01 = model->addOperand(&type5);
+ auto input11 = model->addOperand(&type14);
+ auto output01 = model->addOperand(&type5);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -299,13 +299,13 @@
}
void CreateModel_quant8_2(Model *model) {
- OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
- OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
+ OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
// Phase 1, operands
- auto input01 = model->addOperand(&type4);
- auto input11 = model->addOperand(&type13);
- auto output01 = model->addOperand(&type6);
+ auto input01 = model->addOperand(&type6);
+ auto input11 = model->addOperand(&type15);
+ auto output01 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -323,11 +323,11 @@
void CreateModel_dynamic_output_shape_2(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
OperandType type1(Type::TENSOR_FLOAT32, {2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type0);
auto input11 = model->addOperand(&type1);
- auto output01 = model->addOperand(&type7);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -345,11 +345,11 @@
void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
OperandType type1(Type::TENSOR_FLOAT32, {2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type0);
auto input11 = model->addOperand(&type1);
- auto output01 = model->addOperand(&type7);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -367,13 +367,13 @@
}
void CreateModel_dynamic_output_shape_float16_2(Model *model) {
- OperandType type11(Type::TENSOR_FLOAT16, {2});
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
- OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type10(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type13(Type::TENSOR_FLOAT16, {2});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type2);
- auto input11 = model->addOperand(&type11);
- auto output01 = model->addOperand(&type8);
+ auto input01 = model->addOperand(&type4);
+ auto input11 = model->addOperand(&type13);
+ auto output01 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -389,13 +389,13 @@
}
void CreateModel_dynamic_output_shape_int32_2(Model *model) {
- OperandType type12(Type::TENSOR_INT32, {2});
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
- OperandType type9(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type11(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type14(Type::TENSOR_INT32, {2});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type3);
- auto input11 = model->addOperand(&type12);
- auto output01 = model->addOperand(&type9);
+ auto input01 = model->addOperand(&type5);
+ auto input11 = model->addOperand(&type14);
+ auto output01 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -411,13 +411,13 @@
}
void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
- OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
- OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
+ OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
// Phase 1, operands
- auto input01 = model->addOperand(&type4);
- auto input11 = model->addOperand(&type13);
- auto output01 = model->addOperand(&type10);
+ auto input01 = model->addOperand(&type6);
+ auto input11 = model->addOperand(&type15);
+ auto output01 = model->addOperand(&type12);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MAXIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -432,3 +432,45 @@
return ignore.find(i) != ignore.end();
}
+void CreateModel_3(Model *model) {
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 128);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 0.5f, 128);
+ // Phase 1, operands
+ auto input02 = model->addOperand(&type2);
+ auto input12 = model->addOperand(&type2);
+ auto output02 = model->addOperand(&type3);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_MAXIMUM, {input02, input12}, {output02});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input02, input12},
+ {output02});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_3(Model *model) {
+ OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 128);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 128);
+ // Phase 1, operands
+ auto input02 = model->addOperand(&type2);
+ auto input12 = model->addOperand(&type2);
+ auto output02 = model->addOperand(&type16);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_MAXIMUM, {input02, input12}, {output02});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input02, input12},
+ {output02});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/minimum.model.cpp b/nn/runtime/test/generated/models/minimum.model.cpp
index e1cfc2f..89b1c75 100644
--- a/nn/runtime/test/generated/models/minimum.model.cpp
+++ b/nn/runtime/test/generated/models/minimum.model.cpp
@@ -43,11 +43,11 @@
}
void CreateModel_float16(Model *model) {
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type2);
- auto input1 = model->addOperand(&type2);
- auto output0 = model->addOperand(&type2);
+ auto input0 = model->addOperand(&type4);
+ auto input1 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -63,11 +63,11 @@
}
void CreateModel_int32(Model *model) {
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type3);
- auto input1 = model->addOperand(&type3);
- auto output0 = model->addOperand(&type3);
+ auto input0 = model->addOperand(&type5);
+ auto input1 = model->addOperand(&type5);
+ auto output0 = model->addOperand(&type5);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -83,13 +83,13 @@
}
void CreateModel_quant8(Model *model) {
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
- OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
// Phase 1, operands
- auto input0 = model->addOperand(&type4);
- auto input1 = model->addOperand(&type5);
- auto output0 = model->addOperand(&type6);
+ auto input0 = model->addOperand(&type6);
+ auto input1 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -106,11 +106,11 @@
void CreateModel_dynamic_output_shape(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -127,11 +127,11 @@
void CreateModel_dynamic_output_shape_relaxed(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -149,12 +149,12 @@
}
void CreateModel_dynamic_output_shape_float16(Model *model) {
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
- OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type10(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type2);
- auto input1 = model->addOperand(&type2);
- auto output0 = model->addOperand(&type8);
+ auto input0 = model->addOperand(&type4);
+ auto input1 = model->addOperand(&type4);
+ auto output0 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -170,12 +170,12 @@
}
void CreateModel_dynamic_output_shape_int32(Model *model) {
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
- OperandType type9(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type11(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input0 = model->addOperand(&type3);
- auto input1 = model->addOperand(&type3);
- auto output0 = model->addOperand(&type9);
+ auto input0 = model->addOperand(&type5);
+ auto input1 = model->addOperand(&type5);
+ auto output0 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -191,13 +191,13 @@
}
void CreateModel_dynamic_output_shape_quant8(Model *model) {
- OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type7(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
// Phase 1, operands
- auto input0 = model->addOperand(&type4);
- auto input1 = model->addOperand(&type5);
- auto output0 = model->addOperand(&type10);
+ auto input0 = model->addOperand(&type6);
+ auto input1 = model->addOperand(&type7);
+ auto output0 = model->addOperand(&type12);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -257,12 +257,12 @@
}
void CreateModel_float16_2(Model *model) {
- OperandType type11(Type::TENSOR_FLOAT16, {2});
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
+ OperandType type13(Type::TENSOR_FLOAT16, {2});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type2);
- auto input11 = model->addOperand(&type11);
- auto output01 = model->addOperand(&type2);
+ auto input01 = model->addOperand(&type4);
+ auto input11 = model->addOperand(&type13);
+ auto output01 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -278,12 +278,12 @@
}
void CreateModel_int32_2(Model *model) {
- OperandType type12(Type::TENSOR_INT32, {2});
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
+ OperandType type14(Type::TENSOR_INT32, {2});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type3);
- auto input11 = model->addOperand(&type12);
- auto output01 = model->addOperand(&type3);
+ auto input01 = model->addOperand(&type5);
+ auto input11 = model->addOperand(&type14);
+ auto output01 = model->addOperand(&type5);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -299,13 +299,13 @@
}
void CreateModel_quant8_2(Model *model) {
- OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
- OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
+ OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
// Phase 1, operands
- auto input01 = model->addOperand(&type4);
- auto input11 = model->addOperand(&type13);
- auto output01 = model->addOperand(&type6);
+ auto input01 = model->addOperand(&type6);
+ auto input11 = model->addOperand(&type15);
+ auto output01 = model->addOperand(&type8);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -323,11 +323,11 @@
void CreateModel_dynamic_output_shape_2(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
OperandType type1(Type::TENSOR_FLOAT32, {2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type0);
auto input11 = model->addOperand(&type1);
- auto output01 = model->addOperand(&type7);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -345,11 +345,11 @@
void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
OperandType type1(Type::TENSOR_FLOAT32, {2});
- OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0});
+ OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
// Phase 1, operands
auto input01 = model->addOperand(&type0);
auto input11 = model->addOperand(&type1);
- auto output01 = model->addOperand(&type7);
+ auto output01 = model->addOperand(&type9);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -367,13 +367,13 @@
}
void CreateModel_dynamic_output_shape_float16_2(Model *model) {
- OperandType type11(Type::TENSOR_FLOAT16, {2});
- OperandType type2(Type::TENSOR_FLOAT16, {3, 1, 2});
- OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type10(Type::TENSOR_FLOAT16, {0, 0, 0});
+ OperandType type13(Type::TENSOR_FLOAT16, {2});
+ OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type2);
- auto input11 = model->addOperand(&type11);
- auto output01 = model->addOperand(&type8);
+ auto input01 = model->addOperand(&type4);
+ auto input11 = model->addOperand(&type13);
+ auto output01 = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -389,13 +389,13 @@
}
void CreateModel_dynamic_output_shape_int32_2(Model *model) {
- OperandType type12(Type::TENSOR_INT32, {2});
- OperandType type3(Type::TENSOR_INT32, {3, 1, 2});
- OperandType type9(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type11(Type::TENSOR_INT32, {0, 0, 0});
+ OperandType type14(Type::TENSOR_INT32, {2});
+ OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type3);
- auto input11 = model->addOperand(&type12);
- auto output01 = model->addOperand(&type9);
+ auto input01 = model->addOperand(&type5);
+ auto input11 = model->addOperand(&type14);
+ auto output01 = model->addOperand(&type11);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -411,13 +411,13 @@
}
void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
- OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
- OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
+ OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
+ OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
// Phase 1, operands
- auto input01 = model->addOperand(&type4);
- auto input11 = model->addOperand(&type13);
- auto output01 = model->addOperand(&type10);
+ auto input01 = model->addOperand(&type6);
+ auto input11 = model->addOperand(&type15);
+ auto output01 = model->addOperand(&type12);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -432,3 +432,45 @@
return ignore.find(i) != ignore.end();
}
+void CreateModel_3(Model *model) {
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 128);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 0.5f, 128);
+ // Phase 1, operands
+ auto input02 = model->addOperand(&type2);
+ auto input12 = model->addOperand(&type2);
+ auto output02 = model->addOperand(&type3);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_MINIMUM, {input02, input12}, {output02});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input02, input12},
+ {output02});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_3(Model *model) {
+ OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 128);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 128);
+ // Phase 1, operands
+ auto input02 = model->addOperand(&type2);
+ auto input12 = model->addOperand(&type2);
+ auto output02 = model->addOperand(&type16);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_MINIMUM, {input02, input12}, {output02});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input02, input12},
+ {output02});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/not_equal.model.cpp b/nn/runtime/test/generated/models/not_equal.model.cpp
index cc77761..ac36e48 100644
--- a/nn/runtime/test/generated/models/not_equal.model.cpp
+++ b/nn/runtime/test/generated/models/not_equal.model.cpp
@@ -23,10 +23,10 @@
void CreateModel_int32(Model *model) {
OperandType type1(Type::TENSOR_BOOL8, {3});
- OperandType type11(Type::TENSOR_INT32, {3});
+ OperandType type12(Type::TENSOR_INT32, {3});
// Phase 1, operands
- auto input0 = model->addOperand(&type11);
- auto input1 = model->addOperand(&type11);
+ auto input0 = model->addOperand(&type12);
+ auto input1 = model->addOperand(&type12);
auto output0 = model->addOperand(&type1);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input0, input1}, {output0});
@@ -44,10 +44,10 @@
void CreateModel_float16(Model *model) {
OperandType type1(Type::TENSOR_BOOL8, {3});
- OperandType type12(Type::TENSOR_FLOAT16, {3});
+ OperandType type13(Type::TENSOR_FLOAT16, {3});
// Phase 1, operands
- auto input0 = model->addOperand(&type12);
- auto input1 = model->addOperand(&type12);
+ auto input0 = model->addOperand(&type13);
+ auto input1 = model->addOperand(&type13);
auto output0 = model->addOperand(&type1);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input0, input1}, {output0});
@@ -88,11 +88,11 @@
void CreateModel_dynamic_output_shape(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type13);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -108,12 +108,12 @@
}
void CreateModel_dynamic_output_shape_int32(Model *model) {
- OperandType type11(Type::TENSOR_INT32, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type12(Type::TENSOR_INT32, {3});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
- auto input0 = model->addOperand(&type11);
- auto input1 = model->addOperand(&type11);
- auto output0 = model->addOperand(&type13);
+ auto input0 = model->addOperand(&type12);
+ auto input1 = model->addOperand(&type12);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -129,12 +129,12 @@
}
void CreateModel_dynamic_output_shape_float16(Model *model) {
- OperandType type12(Type::TENSOR_FLOAT16, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type13(Type::TENSOR_FLOAT16, {3});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
- auto input0 = model->addOperand(&type12);
- auto input1 = model->addOperand(&type12);
- auto output0 = model->addOperand(&type13);
+ auto input0 = model->addOperand(&type13);
+ auto input1 = model->addOperand(&type13);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -151,11 +151,11 @@
void CreateModel_dynamic_output_shape_relaxed(Model *model) {
OperandType type0(Type::TENSOR_FLOAT32, {3});
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
// Phase 1, operands
auto input0 = model->addOperand(&type0);
auto input1 = model->addOperand(&type0);
- auto output0 = model->addOperand(&type13);
+ auto output0 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input0, input1}, {output0});
// Phase 3, inputs and outputs
@@ -195,12 +195,12 @@
}
void CreateModel_int32_2(Model *model) {
- OperandType type14(Type::TENSOR_INT32, {2, 1});
- OperandType type15(Type::TENSOR_INT32, {2});
+ OperandType type15(Type::TENSOR_INT32, {2, 1});
+ OperandType type16(Type::TENSOR_INT32, {2});
OperandType type4(Type::TENSOR_BOOL8, {2, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type14);
- auto input11 = model->addOperand(&type15);
+ auto input01 = model->addOperand(&type15);
+ auto input11 = model->addOperand(&type16);
auto output01 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input01, input11}, {output01});
@@ -217,12 +217,12 @@
}
void CreateModel_float16_2(Model *model) {
- OperandType type16(Type::TENSOR_FLOAT16, {2, 1});
- OperandType type17(Type::TENSOR_FLOAT16, {2});
+ OperandType type17(Type::TENSOR_FLOAT16, {2, 1});
+ OperandType type18(Type::TENSOR_FLOAT16, {2});
OperandType type4(Type::TENSOR_BOOL8, {2, 2});
// Phase 1, operands
- auto input01 = model->addOperand(&type16);
- auto input11 = model->addOperand(&type17);
+ auto input01 = model->addOperand(&type17);
+ auto input11 = model->addOperand(&type18);
auto output01 = model->addOperand(&type4);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input01, input11}, {output01});
@@ -263,13 +263,13 @@
}
void CreateModel_dynamic_output_shape_2(Model *model) {
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
OperandType type2(Type::TENSOR_FLOAT32, {2, 1});
OperandType type3(Type::TENSOR_FLOAT32, {2});
// Phase 1, operands
auto input01 = model->addOperand(&type2);
auto input11 = model->addOperand(&type3);
- auto output01 = model->addOperand(&type18);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -285,13 +285,13 @@
}
void CreateModel_dynamic_output_shape_int32_2(Model *model) {
- OperandType type14(Type::TENSOR_INT32, {2, 1});
- OperandType type15(Type::TENSOR_INT32, {2});
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type15(Type::TENSOR_INT32, {2, 1});
+ OperandType type16(Type::TENSOR_INT32, {2});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
// Phase 1, operands
- auto input01 = model->addOperand(&type14);
- auto input11 = model->addOperand(&type15);
- auto output01 = model->addOperand(&type18);
+ auto input01 = model->addOperand(&type15);
+ auto input11 = model->addOperand(&type16);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -307,13 +307,13 @@
}
void CreateModel_dynamic_output_shape_float16_2(Model *model) {
- OperandType type16(Type::TENSOR_FLOAT16, {2, 1});
- OperandType type17(Type::TENSOR_FLOAT16, {2});
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type17(Type::TENSOR_FLOAT16, {2, 1});
+ OperandType type18(Type::TENSOR_FLOAT16, {2});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
// Phase 1, operands
- auto input01 = model->addOperand(&type16);
- auto input11 = model->addOperand(&type17);
- auto output01 = model->addOperand(&type18);
+ auto input01 = model->addOperand(&type17);
+ auto input11 = model->addOperand(&type18);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -329,13 +329,13 @@
}
void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
- OperandType type18(Type::TENSOR_BOOL8, {0, 0});
+ OperandType type19(Type::TENSOR_BOOL8, {0, 0});
OperandType type2(Type::TENSOR_FLOAT32, {2, 1});
OperandType type3(Type::TENSOR_FLOAT32, {2});
// Phase 1, operands
auto input01 = model->addOperand(&type2);
auto input11 = model->addOperand(&type3);
- auto output01 = model->addOperand(&type18);
+ auto output01 = model->addOperand(&type19);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input01, input11}, {output01});
// Phase 3, inputs and outputs
@@ -375,13 +375,13 @@
}
void CreateModel_dynamic_output_shape_3(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0f, 128);
OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1}, 2.0f, 128);
// Phase 1, operands
auto input02 = model->addOperand(&type5);
auto input12 = model->addOperand(&type6);
- auto output02 = model->addOperand(&type13);
+ auto output02 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input02, input12}, {output02});
// Phase 3, inputs and outputs
@@ -419,13 +419,13 @@
}
void CreateModel_dynamic_output_shape_4(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0f, 128);
OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1}, 1.0f, 129);
// Phase 1, operands
auto input03 = model->addOperand(&type5);
auto input13 = model->addOperand(&type7);
- auto output03 = model->addOperand(&type13);
+ auto output03 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input03, input13}, {output03});
// Phase 3, inputs and outputs
@@ -463,13 +463,13 @@
}
void CreateModel_dynamic_output_shape_5(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1}, 1.64771f, 31);
OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1}, 1.49725f, 240);
// Phase 1, operands
auto input04 = model->addOperand(&type8);
auto input14 = model->addOperand(&type9);
- auto output04 = model->addOperand(&type13);
+ auto output04 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input04, input14}, {output04});
// Phase 3, inputs and outputs
@@ -507,13 +507,13 @@
}
void CreateModel_dynamic_output_shape_6(Model *model) {
- OperandType type13(Type::TENSOR_BOOL8, {0});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1}, 1.64771f, 31);
OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1}, 1.49725f, 240);
// Phase 1, operands
auto input05 = model->addOperand(&type9);
auto input15 = model->addOperand(&type8);
- auto output05 = model->addOperand(&type13);
+ auto output05 = model->addOperand(&type14);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input05, input15}, {output05});
// Phase 3, inputs and outputs
@@ -528,3 +528,44 @@
return ignore.find(i) != ignore.end();
}
+void CreateModel_7(Model *model) {
+ OperandType type11(Type::TENSOR_BOOL8, {4});
+ // Phase 1, operands
+ auto input06 = model->addOperand(&type11);
+ auto input16 = model->addOperand(&type11);
+ auto output06 = model->addOperand(&type11);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input06, input16}, {output06});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input06, input16},
+ {output06});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_7(Model *model) {
+ OperandType type11(Type::TENSOR_BOOL8, {4});
+ OperandType type14(Type::TENSOR_BOOL8, {0});
+ // Phase 1, operands
+ auto input06 = model->addOperand(&type11);
+ auto input16 = model->addOperand(&type11);
+ auto output06 = model->addOperand(&type14);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_NOT_EQUAL, {input06, input16}, {output06});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input06, input16},
+ {output06});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/quantized_lstm.model.cpp b/nn/runtime/test/generated/models/quantized_lstm.model.cpp
index d3759bd..e1e5ddb 100644
--- a/nn/runtime/test/generated/models/quantized_lstm.model.cpp
+++ b/nn/runtime/test/generated/models/quantized_lstm.model.cpp
@@ -83,12 +83,12 @@
void CreateModel_dynamic_output_shape(Model *model) {
OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.0078125f, 128);
OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
+ OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
OperandType type4(Type::TENSOR_QUANT16_SYMM, {2, 4}, 0.00048828125f, 0);
OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 4}, 0.0078125f, 128);
- OperandType type6(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
+ OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
// Phase 1, operands
auto input = model->addOperand(&type0);
auto inputToInputWeights = model->addOperand(&type1);
@@ -105,8 +105,8 @@
auto outputGateBias = model->addOperand(&type3);
auto prevCellState = model->addOperand(&type4);
auto prevOutput = model->addOperand(&type5);
- auto cellStateOut = model->addOperand(&type6);
- auto output = model->addOperand(&type7);
+ auto cellStateOut = model->addOperand(&type9);
+ auto output = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput}, {cellStateOut, output});
// Phase 3, inputs and outputs
@@ -124,12 +124,12 @@
void CreateModel_dynamic_output_shape_relaxed(Model *model) {
OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.0078125f, 128);
OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
+ OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
OperandType type4(Type::TENSOR_QUANT16_SYMM, {2, 4}, 0.00048828125f, 0);
OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 4}, 0.0078125f, 128);
- OperandType type6(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
+ OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
// Phase 1, operands
auto input = model->addOperand(&type0);
auto inputToInputWeights = model->addOperand(&type1);
@@ -146,8 +146,8 @@
auto outputGateBias = model->addOperand(&type3);
auto prevCellState = model->addOperand(&type4);
auto prevOutput = model->addOperand(&type5);
- auto cellStateOut = model->addOperand(&type6);
- auto output = model->addOperand(&type7);
+ auto cellStateOut = model->addOperand(&type9);
+ auto output = model->addOperand(&type10);
// Phase 2, operations
model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput}, {cellStateOut, output});
// Phase 3, inputs and outputs
@@ -164,3 +164,263 @@
return ignore.find(i) != ignore.end();
}
+void CreateModel_2(Model *model) {
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
+ OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
+ OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
+ // Phase 1, operands
+ auto input1 = model->addOperand(&type6);
+ auto inputToInputWeights1 = model->addOperand(&type1);
+ auto inputToForgetWeights1 = model->addOperand(&type1);
+ auto inputToCellWeights1 = model->addOperand(&type1);
+ auto inputToOutputWeights1 = model->addOperand(&type1);
+ auto recurrentToInputWeights1 = model->addOperand(&type2);
+ auto recurrentToForgetWeights1 = model->addOperand(&type2);
+ auto recurrentToCellWeights1 = model->addOperand(&type2);
+ auto recurrentToOutputWeights1 = model->addOperand(&type2);
+ auto inputGateBias1 = model->addOperand(&type3);
+ auto forgetGateBias1 = model->addOperand(&type3);
+ auto cellGateBias1 = model->addOperand(&type3);
+ auto outputGateBias1 = model->addOperand(&type3);
+ auto prevCellState1 = model->addOperand(&type7);
+ auto prevOutput1 = model->addOperand(&type8);
+ auto cellStateOut1 = model->addOperand(&type7);
+ auto output1 = model->addOperand(&type8);
+ // Phase 2, operations
+ static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
+ model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
+ model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
+ model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
+ model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
+ model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
+ model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
+ model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
+ model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
+ static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
+ model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
+ model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
+ model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
+ model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input1, prevCellState1, prevOutput1},
+ {cellStateOut1, output1});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
+ OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
+ OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
+ // Phase 1, operands
+ auto input1 = model->addOperand(&type6);
+ auto inputToInputWeights1 = model->addOperand(&type1);
+ auto inputToForgetWeights1 = model->addOperand(&type1);
+ auto inputToCellWeights1 = model->addOperand(&type1);
+ auto inputToOutputWeights1 = model->addOperand(&type1);
+ auto recurrentToInputWeights1 = model->addOperand(&type2);
+ auto recurrentToForgetWeights1 = model->addOperand(&type2);
+ auto recurrentToCellWeights1 = model->addOperand(&type2);
+ auto recurrentToOutputWeights1 = model->addOperand(&type2);
+ auto inputGateBias1 = model->addOperand(&type3);
+ auto forgetGateBias1 = model->addOperand(&type3);
+ auto cellGateBias1 = model->addOperand(&type3);
+ auto outputGateBias1 = model->addOperand(&type3);
+ auto prevCellState1 = model->addOperand(&type7);
+ auto prevOutput1 = model->addOperand(&type8);
+ auto cellStateOut1 = model->addOperand(&type7);
+ auto output1 = model->addOperand(&type8);
+ // Phase 2, operations
+ static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
+ model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
+ model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
+ model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
+ model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
+ model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
+ model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
+ model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
+ model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
+ static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
+ model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
+ model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
+ model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
+ model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input1, prevCellState1, prevOutput1},
+ {cellStateOut1, output1});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_2(Model *model) {
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
+ OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
+ OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
+ OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
+ OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
+ // Phase 1, operands
+ auto input1 = model->addOperand(&type6);
+ auto inputToInputWeights1 = model->addOperand(&type1);
+ auto inputToForgetWeights1 = model->addOperand(&type1);
+ auto inputToCellWeights1 = model->addOperand(&type1);
+ auto inputToOutputWeights1 = model->addOperand(&type1);
+ auto recurrentToInputWeights1 = model->addOperand(&type2);
+ auto recurrentToForgetWeights1 = model->addOperand(&type2);
+ auto recurrentToCellWeights1 = model->addOperand(&type2);
+ auto recurrentToOutputWeights1 = model->addOperand(&type2);
+ auto inputGateBias1 = model->addOperand(&type3);
+ auto forgetGateBias1 = model->addOperand(&type3);
+ auto cellGateBias1 = model->addOperand(&type3);
+ auto outputGateBias1 = model->addOperand(&type3);
+ auto prevCellState1 = model->addOperand(&type7);
+ auto prevOutput1 = model->addOperand(&type8);
+ auto cellStateOut1 = model->addOperand(&type9);
+ auto output1 = model->addOperand(&type10);
+ // Phase 2, operations
+ static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
+ model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
+ model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
+ model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
+ model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
+ model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
+ model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
+ model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
+ model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
+ static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
+ model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
+ model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
+ model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
+ model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input1, prevCellState1, prevOutput1},
+ {cellStateOut1, output1});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
+ OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
+ OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
+ OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
+ OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
+ OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
+ OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
+ // Phase 1, operands
+ auto input1 = model->addOperand(&type6);
+ auto inputToInputWeights1 = model->addOperand(&type1);
+ auto inputToForgetWeights1 = model->addOperand(&type1);
+ auto inputToCellWeights1 = model->addOperand(&type1);
+ auto inputToOutputWeights1 = model->addOperand(&type1);
+ auto recurrentToInputWeights1 = model->addOperand(&type2);
+ auto recurrentToForgetWeights1 = model->addOperand(&type2);
+ auto recurrentToCellWeights1 = model->addOperand(&type2);
+ auto recurrentToOutputWeights1 = model->addOperand(&type2);
+ auto inputGateBias1 = model->addOperand(&type3);
+ auto forgetGateBias1 = model->addOperand(&type3);
+ auto cellGateBias1 = model->addOperand(&type3);
+ auto outputGateBias1 = model->addOperand(&type3);
+ auto prevCellState1 = model->addOperand(&type7);
+ auto prevOutput1 = model->addOperand(&type8);
+ auto cellStateOut1 = model->addOperand(&type9);
+ auto output1 = model->addOperand(&type10);
+ // Phase 2, operations
+ static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
+ model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
+ model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
+ model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
+ model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
+ static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
+ model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
+ model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
+ model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
+ static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
+ model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
+ static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
+ model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
+ model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
+ model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
+ static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
+ model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input1, prevCellState1, prevOutput1},
+ {cellStateOut1, output1});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/tests/cast.mod.py.cpp b/nn/runtime/test/generated/tests/cast.mod.py.cpp
index e266f8b..ca7b97f 100644
--- a/nn/runtime/test/generated/tests/cast.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/cast.mod.py.cpp
@@ -285,3 +285,51 @@
cast::get_examples_quant8_to_quant8_dynamic_output_shape());
}
+TEST_F(GeneratedTests, cast_float16_to_quant8_overflow) {
+ execute(cast::CreateModel_17,
+ cast::is_ignored_17,
+ cast::get_examples_float16_to_quant8_overflow());
+}
+
+TEST_F(DynamicOutputShapeTest, cast_float16_to_quant8_overflow_dynamic_output_shape) {
+ execute(cast::CreateModel_dynamic_output_shape_17,
+ cast::is_ignored_dynamic_output_shape_17,
+ cast::get_examples_float16_to_quant8_overflow_dynamic_output_shape());
+}
+
+TEST_F(GeneratedTests, cast_float32_to_quant8_overflow) {
+ execute(cast::CreateModel_18,
+ cast::is_ignored_18,
+ cast::get_examples_float32_to_quant8_overflow());
+}
+
+TEST_F(GeneratedTests, cast_float32_to_quant8_overflow_relaxed) {
+ execute(cast::CreateModel_relaxed_8,
+ cast::is_ignored_relaxed_8,
+ cast::get_examples_float32_to_quant8_overflow_relaxed());
+}
+
+TEST_F(DynamicOutputShapeTest, cast_float32_to_quant8_overflow_dynamic_output_shape) {
+ execute(cast::CreateModel_dynamic_output_shape_18,
+ cast::is_ignored_dynamic_output_shape_18,
+ cast::get_examples_float32_to_quant8_overflow_dynamic_output_shape());
+}
+
+TEST_F(DynamicOutputShapeTest, cast_float32_to_quant8_overflow_dynamic_output_shape_relaxed) {
+ execute(cast::CreateModel_dynamic_output_shape_relaxed_8,
+ cast::is_ignored_dynamic_output_shape_relaxed_8,
+ cast::get_examples_float32_to_quant8_overflow_dynamic_output_shape_relaxed());
+}
+
+TEST_F(GeneratedTests, cast_int32_to_quant8_overflow) {
+ execute(cast::CreateModel_19,
+ cast::is_ignored_19,
+ cast::get_examples_int32_to_quant8_overflow());
+}
+
+TEST_F(DynamicOutputShapeTest, cast_int32_to_quant8_overflow_dynamic_output_shape) {
+ execute(cast::CreateModel_dynamic_output_shape_19,
+ cast::is_ignored_dynamic_output_shape_19,
+ cast::get_examples_int32_to_quant8_overflow_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/tests/equal.mod.py.cpp b/nn/runtime/test/generated/tests/equal.mod.py.cpp
index 9d06cc6..47e1bad 100644
--- a/nn/runtime/test/generated/tests/equal.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/equal.mod.py.cpp
@@ -153,3 +153,15 @@
equal::get_examples_quantized_overflow_first_input_if_requantized_dynamic_output_shape());
}
+TEST_F(GeneratedTests, equal_boolean) {
+ execute(equal::CreateModel_7,
+ equal::is_ignored_7,
+ equal::get_examples_boolean());
+}
+
+TEST_F(DynamicOutputShapeTest, equal_boolean_dynamic_output_shape) {
+ execute(equal::CreateModel_dynamic_output_shape_7,
+ equal::is_ignored_dynamic_output_shape_7,
+ equal::get_examples_boolean_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/tests/maximum.mod.py.cpp b/nn/runtime/test/generated/tests/maximum.mod.py.cpp
index 72a08a0..aa713d6 100644
--- a/nn/runtime/test/generated/tests/maximum.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/maximum.mod.py.cpp
@@ -129,3 +129,15 @@
maximum::get_examples_broadcast_dynamic_output_shape_quant8());
}
+TEST_F(GeneratedTests, maximum_overflow) {
+ execute(maximum::CreateModel_3,
+ maximum::is_ignored_3,
+ maximum::get_examples_overflow());
+}
+
+TEST_F(DynamicOutputShapeTest, maximum_overflow_dynamic_output_shape) {
+ execute(maximum::CreateModel_dynamic_output_shape_3,
+ maximum::is_ignored_dynamic_output_shape_3,
+ maximum::get_examples_overflow_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/tests/minimum.mod.py.cpp b/nn/runtime/test/generated/tests/minimum.mod.py.cpp
index 97728f6..62e1621 100644
--- a/nn/runtime/test/generated/tests/minimum.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/minimum.mod.py.cpp
@@ -129,3 +129,15 @@
minimum::get_examples_broadcast_dynamic_output_shape_quant8());
}
+TEST_F(GeneratedTests, minimum_overflow) {
+ execute(minimum::CreateModel_3,
+ minimum::is_ignored_3,
+ minimum::get_examples_overflow());
+}
+
+TEST_F(DynamicOutputShapeTest, minimum_overflow_dynamic_output_shape) {
+ execute(minimum::CreateModel_dynamic_output_shape_3,
+ minimum::is_ignored_dynamic_output_shape_3,
+ minimum::get_examples_overflow_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/tests/not_equal.mod.py.cpp b/nn/runtime/test/generated/tests/not_equal.mod.py.cpp
index 218d455..4ad7a29 100644
--- a/nn/runtime/test/generated/tests/not_equal.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/not_equal.mod.py.cpp
@@ -153,3 +153,15 @@
not_equal::get_examples_quantized_overflow_first_input_if_requantized_dynamic_output_shape());
}
+TEST_F(GeneratedTests, not_equal_boolean) {
+ execute(not_equal::CreateModel_7,
+ not_equal::is_ignored_7,
+ not_equal::get_examples_boolean());
+}
+
+TEST_F(DynamicOutputShapeTest, not_equal_boolean_dynamic_output_shape) {
+ execute(not_equal::CreateModel_dynamic_output_shape_7,
+ not_equal::is_ignored_dynamic_output_shape_7,
+ not_equal::get_examples_boolean_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp b/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp
index 1c5f821..8f40d0d 100644
--- a/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp
@@ -33,3 +33,27 @@
quantized_lstm::get_examples_dynamic_output_shape_relaxed());
}
+TEST_F(GeneratedTests, quantized_lstm_constant_weights) {
+ execute(quantized_lstm::CreateModel_2,
+ quantized_lstm::is_ignored_2,
+ quantized_lstm::get_examples_constant_weights());
+}
+
+TEST_F(GeneratedTests, quantized_lstm_constant_weights_relaxed) {
+ execute(quantized_lstm::CreateModel_relaxed_2,
+ quantized_lstm::is_ignored_relaxed_2,
+ quantized_lstm::get_examples_constant_weights_relaxed());
+}
+
+TEST_F(DynamicOutputShapeTest, quantized_lstm_constant_weights_dynamic_output_shape) {
+ execute(quantized_lstm::CreateModel_dynamic_output_shape_2,
+ quantized_lstm::is_ignored_dynamic_output_shape_2,
+ quantized_lstm::get_examples_constant_weights_dynamic_output_shape());
+}
+
+TEST_F(DynamicOutputShapeTest, quantized_lstm_constant_weights_dynamic_output_shape_relaxed) {
+ execute(quantized_lstm::CreateModel_dynamic_output_shape_relaxed_2,
+ quantized_lstm::is_ignored_dynamic_output_shape_relaxed_2,
+ quantized_lstm::get_examples_constant_weights_dynamic_output_shape_relaxed());
+}
+
diff --git a/nn/runtime/test/generated/vts_models/cast.model.cpp b/nn/runtime/test/generated/vts_models/cast.model.cpp
index 552cb1c..c8cf207 100644
--- a/nn/runtime/test/generated/vts_models/cast.model.cpp
+++ b/nn/runtime/test/generated/vts_models/cast.model.cpp
@@ -2360,3 +2360,413 @@
return ignore.find(i) != ignore.end();
}
+// Create the model
+Model createTestModel_17() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_17(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_17() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_17(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_18() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_18(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_8() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ .relaxComputationFloat32toFloat16 = true,
+ };
+}
+
+inline bool is_ignored_relaxed_8(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_18() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_18(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_relaxed_8() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ .relaxComputationFloat32toFloat16 = true,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_relaxed_8(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_19() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_19(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_19() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 4.0f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::CAST,
+ .inputs = {0},
+ .outputs = {1},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {1};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_19(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/equal.model.cpp b/nn/runtime/test/generated/vts_models/equal.model.cpp
index 84789c5..d9a63ba 100644
--- a/nn/runtime/test/generated/vts_models/equal.model.cpp
+++ b/nn/runtime/test/generated/vts_models/equal.model.cpp
@@ -1444,3 +1444,123 @@
return ignore.find(i) != ignore.end();
}
+// Create the model
+Model createTestModel_7() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::EQUAL,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_7() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::EQUAL,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/maximum.model.cpp b/nn/runtime/test/generated/vts_models/maximum.model.cpp
index 370b5df..41e8c81 100644
--- a/nn/runtime/test/generated/vts_models/maximum.model.cpp
+++ b/nn/runtime/test/generated/vts_models/maximum.model.cpp
@@ -1204,3 +1204,123 @@
return ignore.find(i) != ignore.end();
}
+// Create the model
+Model createTestModel_3() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 0,
+ .scale = 0.5f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::MAXIMUM,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_3() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 0.5f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::MAXIMUM,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/minimum.model.cpp b/nn/runtime/test/generated/vts_models/minimum.model.cpp
index f5a8a64..60c3020 100644
--- a/nn/runtime/test/generated/vts_models/minimum.model.cpp
+++ b/nn/runtime/test/generated/vts_models/minimum.model.cpp
@@ -1204,3 +1204,123 @@
return ignore.find(i) != ignore.end();
}
+// Create the model
+Model createTestModel_3() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 0,
+ .scale = 0.5f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::MINIMUM,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_3() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 1.0f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 0.5f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::MINIMUM,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_3(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/not_equal.model.cpp b/nn/runtime/test/generated/vts_models/not_equal.model.cpp
index ff3f077..1a0c361 100644
--- a/nn/runtime/test/generated/vts_models/not_equal.model.cpp
+++ b/nn/runtime/test/generated/vts_models/not_equal.model.cpp
@@ -1444,3 +1444,123 @@
return ignore.find(i) != ignore.end();
}
+// Create the model
+Model createTestModel_7() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::NOT_EQUAL,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_7() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_BOOL8,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::NOT_EQUAL,
+ .inputs = {0, 1},
+ .outputs = {2},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1};
+ const std::vector<uint32_t> outputIndexes = {2};
+ std::vector<uint8_t> operandValues = {};
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_7(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp b/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
index 089a28a..33923bf 100644
--- a/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
+++ b/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
@@ -746,3 +746,757 @@
return ignore.find(i) != ignore.end();
}
+// Create the model
+Model createTestModel_2() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 8, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 24, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 32, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 48, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 64, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 80, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 96, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 112, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 128, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 144, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 0,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 0,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::QUANTIZED_16BIT_LSTM,
+ .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
+ .outputs = {15, 16},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 13, 14};
+ const std::vector<uint32_t> outputIndexes = {15, 16};
+ std::vector<uint8_t> operandValues = {
+ 146, 250, 235, 171, 10, 218, 171, 108, 24, 50, 132, 179, 158, 110, 3, 169, 133, 34, 29, 49, 206, 109, 54, 183, 195, 187, 11, 99, 109, 10, 218, 48, 254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26, 137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253, 172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216, 106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98, 60, 225, 255, 255, 176, 52, 0, 0, 42, 253, 255, 255, 71, 128, 0, 0, 246, 35, 0, 0, 220, 72, 255, 255, 83, 210, 255, 255, 188, 104, 255, 255, 57, 154, 0, 0, 240, 189, 0, 0, 80, 191, 0, 0, 85, 172, 255, 255, 137, 25, 255, 255, 102, 189, 255, 255, 132, 92, 255, 255, 166, 97, 255, 255
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_2() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 8, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 24, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 32, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 48, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 64, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 80, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 96, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 112, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 128, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 144, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 0,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 0,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::QUANTIZED_16BIT_LSTM,
+ .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
+ .outputs = {15, 16},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 13, 14};
+ const std::vector<uint32_t> outputIndexes = {15, 16};
+ std::vector<uint8_t> operandValues = {
+ 146, 250, 235, 171, 10, 218, 171, 108, 24, 50, 132, 179, 158, 110, 3, 169, 133, 34, 29, 49, 206, 109, 54, 183, 195, 187, 11, 99, 109, 10, 218, 48, 254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26, 137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253, 172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216, 106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98, 60, 225, 255, 255, 176, 52, 0, 0, 42, 253, 255, 255, 71, 128, 0, 0, 246, 35, 0, 0, 220, 72, 255, 255, 83, 210, 255, 255, 188, 104, 255, 255, 57, 154, 0, 0, 240, 189, 0, 0, 80, 191, 0, 0, 85, 172, 255, 255, 137, 25, 255, 255, 102, 189, 255, 255, 132, 92, 255, 255, 166, 97, 255, 255
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ .relaxComputationFloat32toFloat16 = true,
+ };
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_2() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 8, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 24, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 32, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 48, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 64, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 80, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 96, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 112, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 128, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 144, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::QUANTIZED_16BIT_LSTM,
+ .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
+ .outputs = {15, 16},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 13, 14};
+ const std::vector<uint32_t> outputIndexes = {15, 16};
+ std::vector<uint8_t> operandValues = {
+ 146, 250, 235, 171, 10, 218, 171, 108, 24, 50, 132, 179, 158, 110, 3, 169, 133, 34, 29, 49, 206, 109, 54, 183, 195, 187, 11, 99, 109, 10, 218, 48, 254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26, 137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253, 172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216, 106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98, 60, 225, 255, 255, 176, 52, 0, 0, 42, 253, 255, 255, 71, 128, 0, 0, 246, 35, 0, 0, 220, 72, 255, 255, 83, 210, 255, 255, 188, 104, 255, 255, 57, 154, 0, 0, 240, 189, 0, 0, 80, 191, 0, 0, 85, 172, 255, 255, 137, 25, 255, 255, 102, 189, 255, 255, 132, 92, 255, 255, 166, 97, 255, 255
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_relaxed_2() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 8, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 24, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 32, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 48, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 64, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {4, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00408021f,
+ .zeroPoint = 100,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 80, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 96, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 112, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 128, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 3.1876640625e-05f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 144, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {1, 4},
+ .numberOfConsumers = 1,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT16_SYMM,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.00048828125f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_QUANT8_ASYMM,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0078125f,
+ .zeroPoint = 128,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::QUANTIZED_16BIT_LSTM,
+ .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
+ .outputs = {15, 16},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 13, 14};
+ const std::vector<uint32_t> outputIndexes = {15, 16};
+ std::vector<uint8_t> operandValues = {
+ 146, 250, 235, 171, 10, 218, 171, 108, 24, 50, 132, 179, 158, 110, 3, 169, 133, 34, 29, 49, 206, 109, 54, 183, 195, 187, 11, 99, 109, 10, 218, 48, 254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26, 137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253, 172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216, 106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98, 60, 225, 255, 255, 176, 52, 0, 0, 42, 253, 255, 255, 71, 128, 0, 0, 246, 35, 0, 0, 220, 72, 255, 255, 83, 210, 255, 255, 188, 104, 255, 255, 57, 154, 0, 0, 240, 189, 0, 0, 80, 191, 0, 0, 85, 172, 255, 255, 137, 25, 255, 255, 102, 189, 255, 255, 132, 92, 255, 255, 166, 97, 255, 255
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ .relaxComputationFloat32toFloat16 = true,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/specs/V1_2/cast.mod.py b/nn/runtime/test/specs/V1_2/cast.mod.py
index 8d5d34b..f1d93ce 100644
--- a/nn/runtime/test/specs/V1_2/cast.mod.py
+++ b/nn/runtime/test/specs/V1_2/cast.mod.py
@@ -60,3 +60,40 @@
if operand1.supports_relaxation or operand2.supports_relaxation:
example.AddRelaxed()
+
+
+# Test overflow and underflow.
+operands = [
+ Operand(
+ name="float16",
+ as_input=Input("input0", "TENSOR_FLOAT16", "{2}"),
+ as_output=None,
+ data=[-1, 256],
+ supports_relaxation=False),
+ Operand(
+ name="float32",
+ as_input=Input("input0", "TENSOR_FLOAT32", "{2}"),
+ as_output=None,
+ data=[-1, 256],
+ supports_relaxation=True),
+ Operand(
+ name="int32",
+ as_input=Input("input0", "TENSOR_INT32", "{2}"),
+ as_output=None,
+ data=[-1, 256],
+ supports_relaxation=False),
+]
+
+for operand1 in operands:
+ input0 = operand1.as_input
+ output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2}, 4.0, 100")
+
+ model = Model().Operation("CAST", input0).To(output0)
+
+ example = Example({
+ input0: operand1.data,
+ output0: [0, 255],
+ }, model=model, name='{}_to_quant8_overflow'.format(operand1.name))
+
+ if operand1.supports_relaxation:
+ example.AddRelaxed()
diff --git a/nn/runtime/test/specs/V1_2/equal.mod.py b/nn/runtime/test/specs/V1_2/equal.mod.py
index 3432873..d7c40fe 100644
--- a/nn/runtime/test/specs/V1_2/equal.mod.py
+++ b/nn/runtime/test/specs/V1_2/equal.mod.py
@@ -86,3 +86,14 @@
output_data=[False],
do_variations=False,
)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[True, False, False, True],
+ do_variations=False,
+)
diff --git a/nn/runtime/test/specs/V1_2/maximum.mod.py b/nn/runtime/test/specs/V1_2/maximum.mod.py
index 73fabf1..0d37a20 100644
--- a/nn/runtime/test/specs/V1_2/maximum.mod.py
+++ b/nn/runtime/test/specs/V1_2/maximum.mod.py
@@ -49,3 +49,16 @@
input1_data=[0.5, 2.0],
output_data=[1.0, 2.0, 0.5, 2.0, 0.5, 11.0],
)
+
+
+# Test overflow and underflow.
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2}, 0.5f, 128")
+model = Model().Operation("MAXIMUM", input0, input1).To(output0)
+
+Example({
+ input0: [60, 128],
+ input1: [128, 200],
+ output0: [128, 255],
+}, model=model, name="overflow")
diff --git a/nn/runtime/test/specs/V1_2/minimum.mod.py b/nn/runtime/test/specs/V1_2/minimum.mod.py
index f96d91f..76b0586 100644
--- a/nn/runtime/test/specs/V1_2/minimum.mod.py
+++ b/nn/runtime/test/specs/V1_2/minimum.mod.py
@@ -49,3 +49,16 @@
input1_data=[0.5, 2.0],
output_data=[0.5, 0.0, -1.0, -2.0, -1.44, 2.0],
)
+
+
+# Test overflow and underflow.
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2}, 0.5f, 128")
+model = Model().Operation("MINIMUM", input0, input1).To(output0)
+
+Example({
+ input0: [60, 128],
+ input1: [128, 200],
+ output0: [0, 128],
+}, model=model, name="overflow")
diff --git a/nn/runtime/test/specs/V1_2/not_equal.mod.py b/nn/runtime/test/specs/V1_2/not_equal.mod.py
index 38e8e46..2c36b5a 100644
--- a/nn/runtime/test/specs/V1_2/not_equal.mod.py
+++ b/nn/runtime/test/specs/V1_2/not_equal.mod.py
@@ -86,3 +86,14 @@
output_data=[True],
do_variations=False,
)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[False, True, True, False],
+ do_variations=False,
+)
diff --git a/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py b/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py
index 5f4293c..5fd4c7a 100644
--- a/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py
+++ b/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py
@@ -29,8 +29,8 @@
input_to_input_weights = Input("inputToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
input_to_forget_weights = Input("inputToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
-input_to_cell_weights = Input("inputToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
-input_to_output_weights = Input("inputToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
+input_to_cell_weights = Input("inputToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
+input_to_output_weights = Input("inputToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
recurrent_to_input_weights = Input("recurrentToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
recurrent_to_forget_weights = Input("recurrentToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
@@ -90,3 +90,110 @@
output: [140, 151, 146, 112, 136, 156, 142, 112]
}
Example((input_dict, output_dict), model=model).AddVariations("relaxed")
+
+
+# TEST 2: same as the first one but only the first batch is tested and weights
+# are compile time constants
+model = Model()
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = n_cell
+
+input_ = Input("input",
+ ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128))
+
+weights_scale = 0.00408021
+weights_zero_point = 100
+
+input_to_input_weights = Parameter(
+ "inputToInputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [146, 250, 235, 171, 10, 218, 171, 108])
+input_to_forget_weights = Parameter(
+ "inputToForgetWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [24, 50, 132, 179, 158, 110, 3, 169])
+input_to_cell_weights = Parameter(
+ "inputToCellWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [133, 34, 29, 49, 206, 109, 54, 183])
+input_to_output_weights = Parameter(
+ "inputToOutputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [195, 187, 11, 99, 109, 10, 218, 48])
+
+recurrent_to_input_weights = Parameter(
+ "recurrentToInputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26])
+recurrent_to_forget_weights = Parameter(
+ "recurrentToForgetWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253])
+recurrent_to_cell_weights = Parameter(
+ "recurrentToCellWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216])
+recurrent_to_output_weights = Parameter(
+ "recurrentToOutputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98])
+
+input_gate_bias = Parameter("inputGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [-7876, 13488, -726, 32839])
+forget_gate_bias = Parameter("forgetGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [9206, -46884, -11693, -38724])
+cell_gate_bias = Parameter("cellGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [39481, 48624, 48976, -21419])
+output_gate_bias = Parameter("outputGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [-58999, -17050, -41852, -40538])
+
+prev_cell_state = Input("prevCellState",
+ ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
+prev_output = Input("prevOutput",
+ ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
+
+cell_state_out = Output("cellStateOut",
+ ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
+output = Output("output",
+ ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
+
+model = model.Operation("QUANTIZED_16BIT_LSTM", input_, input_to_input_weights,
+ input_to_forget_weights, input_to_cell_weights,
+ input_to_output_weights, recurrent_to_input_weights,
+ recurrent_to_forget_weights, recurrent_to_cell_weights,
+ recurrent_to_output_weights, input_gate_bias,
+ forget_gate_bias, cell_gate_bias, output_gate_bias,
+ prev_cell_state,
+ prev_output).To([cell_state_out, output])
+
+input_dict = {
+ input_: [166, 179],
+ prev_cell_state: [876, 1034, 955, -909],
+ prev_output: [136, 150, 140, 115],
+}
+
+output_dict = {
+ cell_state_out: [1485, 1177, 1373, -1023],
+ output: [140, 151, 146, 112]
+}
+Example((input_dict, output_dict), model=model,
+ name="constant_weights").AddVariations("relaxed")