Merge "Create tests for SVDF with non-null bias"
diff --git a/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp b/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
index 7104f6e..cd96b55 100644
--- a/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -147,6 +147,7 @@
#include "../generated/tests/space_to_depth_quant8_2.mod.py.cpp"
#include "../generated/tests/svdf.mod.py.cpp"
#include "../generated/tests/svdf2.mod.py.cpp"
+#include "../generated/tests/svdf_bias_present.mod.py.cpp"
#include "../generated/tests/svdf_state.mod.py.cpp"
#include "../generated/tests/tanh.mod.py.cpp"
#include "../generated/tests/add_relaxed.mod.py.cpp"
@@ -317,6 +318,7 @@
#include "../generated/tests/sub_broadcast_float_relaxed.mod.py.cpp"
#include "../generated/tests/sub_relaxed.mod.py.cpp"
#include "../generated/tests/svdf2_relaxed.mod.py.cpp"
+#include "../generated/tests/svdf_bias_present_relaxed.mod.py.cpp"
#include "../generated/tests/svdf_relaxed.mod.py.cpp"
#include "../generated/tests/svdf_state_relaxed.mod.py.cpp"
#include "../generated/tests/tanh_relaxed.mod.py.cpp"
@@ -463,6 +465,7 @@
#include "../generated/tests/sub_quantized_different_scales.mod.py.cpp"
#include "../generated/tests/sub_v1_2.mod.py.cpp"
#include "../generated/tests/sub_v1_2_broadcast.mod.py.cpp"
+#include "../generated/tests/svdf_bias_present_float16.mod.py.cpp"
#include "../generated/tests/svdf_float16.mod.py.cpp"
#include "../generated/tests/svdf_state_float16.mod.py.cpp"
#include "../generated/tests/tanh_v1_2.mod.py.cpp"
diff --git a/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp
index b32867e..0b54b3e 100644
--- a/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp
@@ -6057,6 +6057,46 @@
#endif
+// Generated from: svdf_bias_present.mod.py.
+namespace svdf_bias_present {
+// Generated svdf_bias_present test
+#include "examples/svdf_bias_present.example.cpp"
+// Generated model constructor
+#include "vts_models/svdf_bias_present.model.cpp"
+} // namespace svdf_bias_present
+
+TEST_F(NeuralnetworksHidlTest, svdf_bias_present) {
+ generated_tests::Execute(device,
+ svdf_bias_present::createTestModel,
+ svdf_bias_present::is_ignored,
+ svdf_bias_present::get_examples());
+}
+
+TEST_F(ValidationTest, svdf_bias_present) {
+ const Model model = svdf_bias_present::createTestModel();
+ const std::vector<Request> requests = createRequests(svdf_bias_present::get_examples());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, svdf_bias_present_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ svdf_bias_present::createTestModel_dynamic_output_shape,
+ svdf_bias_present::is_ignored_dynamic_output_shape,
+ svdf_bias_present::get_examples_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, svdf_bias_present_dynamic_output_shape) {
+ const Model model = svdf_bias_present::createTestModel_dynamic_output_shape();
+ const std::vector<Request> requests = createRequests(svdf_bias_present::get_examples_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: svdf_state.mod.py.
namespace svdf_state {
// Generated svdf_state test
diff --git a/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
index a04b4a6..203ded8 100644
--- a/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
@@ -6881,6 +6881,46 @@
#endif
+// Generated from: svdf_bias_present_relaxed.mod.py.
+namespace svdf_bias_present_relaxed {
+// Generated svdf_bias_present_relaxed test
+#include "examples/svdf_bias_present_relaxed.example.cpp"
+// Generated model constructor
+#include "vts_models/svdf_bias_present_relaxed.model.cpp"
+} // namespace svdf_bias_present_relaxed
+
+TEST_F(NeuralnetworksHidlTest, svdf_bias_present_relaxed) {
+ generated_tests::Execute(device,
+ svdf_bias_present_relaxed::createTestModel,
+ svdf_bias_present_relaxed::is_ignored,
+ svdf_bias_present_relaxed::get_examples());
+}
+
+TEST_F(ValidationTest, svdf_bias_present_relaxed) {
+ const Model model = svdf_bias_present_relaxed::createTestModel();
+ const std::vector<Request> requests = createRequests(svdf_bias_present_relaxed::get_examples());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, svdf_bias_present_relaxed_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ svdf_bias_present_relaxed::createTestModel_dynamic_output_shape,
+ svdf_bias_present_relaxed::is_ignored_dynamic_output_shape,
+ svdf_bias_present_relaxed::get_examples_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, svdf_bias_present_relaxed_dynamic_output_shape) {
+ const Model model = svdf_bias_present_relaxed::createTestModel_dynamic_output_shape();
+ const std::vector<Request> requests = createRequests(svdf_bias_present_relaxed::get_examples_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: svdf_relaxed.mod.py.
namespace svdf_relaxed {
// Generated svdf_relaxed test
diff --git a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index 98f495d..a9bb624 100644
--- a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -76113,6 +76113,46 @@
#endif
+// Generated from: svdf_bias_present_float16.mod.py.
+namespace svdf_bias_present_float16 {
+// Generated svdf_bias_present_float16 test
+#include "examples/svdf_bias_present_float16.example.cpp"
+// Generated model constructor
+#include "vts_models/svdf_bias_present_float16.model.cpp"
+} // namespace svdf_bias_present_float16
+
+TEST_F(NeuralnetworksHidlTest, svdf_bias_present_float16) {
+ generated_tests::Execute(device,
+ svdf_bias_present_float16::createTestModel,
+ svdf_bias_present_float16::is_ignored,
+ svdf_bias_present_float16::get_examples());
+}
+
+TEST_F(ValidationTest, svdf_bias_present_float16) {
+ const Model model = svdf_bias_present_float16::createTestModel();
+ const std::vector<Request> requests = createRequests(svdf_bias_present_float16::get_examples());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, svdf_bias_present_float16_dynamic_output_shape) {
+ generated_tests::Execute(device,
+ svdf_bias_present_float16::createTestModel_dynamic_output_shape,
+ svdf_bias_present_float16::is_ignored_dynamic_output_shape,
+ svdf_bias_present_float16::get_examples_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, svdf_bias_present_float16_dynamic_output_shape) {
+ const Model model = svdf_bias_present_float16::createTestModel_dynamic_output_shape();
+ const std::vector<Request> requests = createRequests(svdf_bias_present_float16::get_examples_dynamic_output_shape());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+#endif
// Generated from: svdf_float16.mod.py.
namespace svdf_float16 {
// Generated svdf_float16 test
diff --git a/nn/runtime/test/generated/examples/svdf_bias_present.example.cpp b/nn/runtime/test/generated/examples/svdf_bias_present.example.cpp
new file mode 100644
index 0000000..1d119ee
--- /dev/null
+++ b/nn/runtime/test/generated/examples/svdf_bias_present.example.cpp
@@ -0,0 +1,116 @@
+// clang-format off
+// Generated file (from: svdf_bias_present.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}, {1, {4, 3}}, {2, {4, 10}}, {3, {4}}, {4, {2, 40}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.12609188f, -0.46347019f, -0.89598465f, 0.12609188f, -0.46347019f, -0.89598465f}}, {1, {-0.31930989f, -0.36118156f, 0.0079667f, 0.37613347f, 0.22197971f, 0.12416199f, 0.27901134f, 0.27557442f, 0.3905206f, -0.36137494f, -0.06634006f, -0.10640851f}}, {2, {-0.31930989f, 0.37613347f, 0.27901134f, -0.36137494f, -0.36118156f, 0.22197971f, 0.27557442f, -0.06634006f, 0.0079667f, 0.12416199f, 0.3905206f, -0.10640851f, -0.0976817f, 0.15294972f, 0.39635518f, -0.02702999f, 0.39296314f, 0.15785322f, 0.21931258f, 0.31053296f, -0.36916667f, 0.38031587f, -0.21580373f, 0.27072677f, 0.23622236f, 0.34936687f, 0.18174365f, 0.35907319f, -0.17493086f, 0.324846f, -0.10781813f, 0.27201805f, 0.14324132f, -0.23681851f, -0.27115166f, -0.01580888f, -0.14943552f, 0.15465137f, 0.09784451f, -0.0337657f}}, {3, {1.0f, 2.0f, 3.0f, 4.0f}}, {4, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 40}}, {1, {2, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {1.014899f, 1.9482339f, 2.856275f, 3.99728117f, 1.014899f, 1.9482339f, 2.856275f, 3.99728117f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}, {1, {4, 3}}, {2, {4, 10}}, {3, {4}}, {4, {2, 40}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.12609188f, -0.46347019f, -0.89598465f, 0.12609188f, -0.46347019f, -0.89598465f}}, {1, {-0.31930989f, -0.36118156f, 0.0079667f, 0.37613347f, 0.22197971f, 0.12416199f, 0.27901134f, 0.27557442f, 0.3905206f, -0.36137494f, -0.06634006f, -0.10640851f}}, {2, {-0.31930989f, 0.37613347f, 0.27901134f, -0.36137494f, -0.36118156f, 0.22197971f, 0.27557442f, -0.06634006f, 0.0079667f, 0.12416199f, 0.3905206f, -0.10640851f, -0.0976817f, 0.15294972f, 0.39635518f, -0.02702999f, 0.39296314f, 0.15785322f, 0.21931258f, 0.31053296f, -0.36916667f, 0.38031587f, -0.21580373f, 0.27072677f, 0.23622236f, 0.34936687f, 0.18174365f, 0.35907319f, -0.17493086f, 0.324846f, -0.10781813f, 0.27201805f, 0.14324132f, -0.23681851f, -0.27115166f, -0.01580888f, -0.14943552f, 0.15465137f, 0.09784451f, -0.0337657f}}, {3, {1.0f, 2.0f, 3.0f, 4.0f}}, {4, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 40}}, {1, {2, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {1.014899f, 1.9482339f, 2.856275f, 3.99728117f, 1.014899f, 1.9482339f, 2.856275f, 3.99728117f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/examples/svdf_bias_present_float16.example.cpp b/nn/runtime/test/generated/examples/svdf_bias_present_float16.example.cpp
new file mode 100644
index 0000000..7934e2d
--- /dev/null
+++ b/nn/runtime/test/generated/examples/svdf_bias_present_float16.example.cpp
@@ -0,0 +1,116 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_float16.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}, {1, {4, 3}}, {2, {4, 10}}, {3, {4}}, {4, {2, 40}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.12609188f, -0.46347019f, -0.89598465f, 0.12609188f, -0.46347019f, -0.89598465f}}, {1, {-0.31930989f, -0.36118156f, 0.0079667f, 0.37613347f, 0.22197971f, 0.12416199f, 0.27901134f, 0.27557442f, 0.3905206f, -0.36137494f, -0.06634006f, -0.10640851f}}, {2, {-0.31930989f, 0.37613347f, 0.27901134f, -0.36137494f, -0.36118156f, 0.22197971f, 0.27557442f, -0.06634006f, 0.0079667f, 0.12416199f, 0.3905206f, -0.10640851f, -0.0976817f, 0.15294972f, 0.39635518f, -0.02702999f, 0.39296314f, 0.15785322f, 0.21931258f, 0.31053296f, -0.36916667f, 0.38031587f, -0.21580373f, 0.27072677f, 0.23622236f, 0.34936687f, 0.18174365f, 0.35907319f, -0.17493086f, 0.324846f, -0.10781813f, 0.27201805f, 0.14324132f, -0.23681851f, -0.27115166f, -0.01580888f, -0.14943552f, 0.15465137f, 0.09784451f, -0.0337657f}}, {3, {1.0f, 2.0f, 3.0f, 4.0f}}, {4, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 40}}, {1, {2, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {1.014899f, 1.9482339f, 2.856275f, 3.99728117f, 1.014899f, 1.9482339f, 2.856275f, 3.99728117f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}, {1, {4, 3}}, {2, {4, 10}}, {3, {4}}, {4, {2, 40}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.12609188f, -0.46347019f, -0.89598465f, 0.12609188f, -0.46347019f, -0.89598465f}}, {1, {-0.31930989f, -0.36118156f, 0.0079667f, 0.37613347f, 0.22197971f, 0.12416199f, 0.27901134f, 0.27557442f, 0.3905206f, -0.36137494f, -0.06634006f, -0.10640851f}}, {2, {-0.31930989f, 0.37613347f, 0.27901134f, -0.36137494f, -0.36118156f, 0.22197971f, 0.27557442f, -0.06634006f, 0.0079667f, 0.12416199f, 0.3905206f, -0.10640851f, -0.0976817f, 0.15294972f, 0.39635518f, -0.02702999f, 0.39296314f, 0.15785322f, 0.21931258f, 0.31053296f, -0.36916667f, 0.38031587f, -0.21580373f, 0.27072677f, 0.23622236f, 0.34936687f, 0.18174365f, 0.35907319f, -0.17493086f, 0.324846f, -0.10781813f, 0.27201805f, 0.14324132f, -0.23681851f, -0.27115166f, -0.01580888f, -0.14943552f, 0.15465137f, 0.09784451f, -0.0337657f}}, {3, {1.0f, 2.0f, 3.0f, 4.0f}}, {4, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 40}}, {1, {2, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {1.014899f, 1.9482339f, 2.856275f, 3.99728117f, 1.014899f, 1.9482339f, 2.856275f, 3.99728117f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/examples/svdf_bias_present_relaxed.example.cpp b/nn/runtime/test/generated/examples/svdf_bias_present_relaxed.example.cpp
new file mode 100644
index 0000000..ca1ca84
--- /dev/null
+++ b/nn/runtime/test/generated/examples/svdf_bias_present_relaxed.example.cpp
@@ -0,0 +1,116 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_relaxed.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}, {1, {4, 3}}, {2, {4, 10}}, {3, {4}}, {4, {2, 40}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.12609188f, -0.46347019f, -0.89598465f, 0.12609188f, -0.46347019f, -0.89598465f}}, {1, {-0.31930989f, -0.36118156f, 0.0079667f, 0.37613347f, 0.22197971f, 0.12416199f, 0.27901134f, 0.27557442f, 0.3905206f, -0.36137494f, -0.06634006f, -0.10640851f}}, {2, {-0.31930989f, 0.37613347f, 0.27901134f, -0.36137494f, -0.36118156f, 0.22197971f, 0.27557442f, -0.06634006f, 0.0079667f, 0.12416199f, 0.3905206f, -0.10640851f, -0.0976817f, 0.15294972f, 0.39635518f, -0.02702999f, 0.39296314f, 0.15785322f, 0.21931258f, 0.31053296f, -0.36916667f, 0.38031587f, -0.21580373f, 0.27072677f, 0.23622236f, 0.34936687f, 0.18174365f, 0.35907319f, -0.17493086f, 0.324846f, -0.10781813f, 0.27201805f, 0.14324132f, -0.23681851f, -0.27115166f, -0.01580888f, -0.14943552f, 0.15465137f, 0.09784451f, -0.0337657f}}, {3, {1.0f, 2.0f, 3.0f, 4.0f}}, {4, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 40}}, {1, {2, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {1.014899f, 1.9482339f, 2.856275f, 3.99728117f, 1.014899f, 1.9482339f, 2.856275f, 3.99728117f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}, {1, {4, 3}}, {2, {4, 10}}, {3, {4}}, {4, {2, 40}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.12609188f, -0.46347019f, -0.89598465f, 0.12609188f, -0.46347019f, -0.89598465f}}, {1, {-0.31930989f, -0.36118156f, 0.0079667f, 0.37613347f, 0.22197971f, 0.12416199f, 0.27901134f, 0.27557442f, 0.3905206f, -0.36137494f, -0.06634006f, -0.10640851f}}, {2, {-0.31930989f, 0.37613347f, 0.27901134f, -0.36137494f, -0.36118156f, 0.22197971f, 0.27557442f, -0.06634006f, 0.0079667f, 0.12416199f, 0.3905206f, -0.10640851f, -0.0976817f, 0.15294972f, 0.39635518f, -0.02702999f, 0.39296314f, 0.15785322f, 0.21931258f, 0.31053296f, -0.36916667f, 0.38031587f, -0.21580373f, 0.27072677f, 0.23622236f, 0.34936687f, 0.18174365f, 0.35907319f, -0.17493086f, 0.324846f, -0.10781813f, 0.27201805f, 0.14324132f, -0.23681851f, -0.27115166f, -0.01580888f, -0.14943552f, 0.15465137f, 0.09784451f, -0.0337657f}}, {3, {1.0f, 2.0f, 3.0f, 4.0f}}, {4, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 40}}, {1, {2, 4}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {1.014899f, 1.9482339f, 2.856275f, 3.99728117f, 1.014899f, 1.9482339f, 2.856275f, 3.99728117f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/models/svdf_bias_present.model.cpp b/nn/runtime/test/generated/models/svdf_bias_present.model.cpp
new file mode 100644
index 0000000..9392833
--- /dev/null
+++ b/nn/runtime/test/generated/models/svdf_bias_present.model.cpp
@@ -0,0 +1,74 @@
+// clang-format off
+// Generated file (from: svdf_bias_present.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {4, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {4, 10});
+ OperandType type3(Type::TENSOR_FLOAT32, {4});
+ OperandType type4(Type::TENSOR_FLOAT32, {2, 40});
+ OperandType type5(Type::INT32, {});
+ OperandType type6(Type::TENSOR_FLOAT32, {2, 4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto weights_feature = model->addOperand(&type1);
+ auto weights_time = model->addOperand(&type2);
+ auto bias = model->addOperand(&type3);
+ auto state_in = model->addOperand(&type4);
+ auto rank_param = model->addOperand(&type5);
+ auto activation_param = model->addOperand(&type5);
+ auto state_out = model->addOperand(&type4);
+ auto output = model->addOperand(&type6);
+ // Phase 2, operations
+ static int32_t rank_param_init[] = {1};
+ model->setOperandValue(rank_param, rank_param_init, sizeof(int32_t) * 1);
+ static int32_t activation_param_init[] = {0};
+ model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input, weights_feature, weights_time, bias, state_in},
+ {state_out, output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {4, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {4, 10});
+ OperandType type3(Type::TENSOR_FLOAT32, {4});
+ OperandType type4(Type::TENSOR_FLOAT32, {2, 40});
+ OperandType type5(Type::INT32, {});
+ OperandType type7(Type::TENSOR_FLOAT32, {0, 0});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto weights_feature = model->addOperand(&type1);
+ auto weights_time = model->addOperand(&type2);
+ auto bias = model->addOperand(&type3);
+ auto state_in = model->addOperand(&type4);
+ auto rank_param = model->addOperand(&type5);
+ auto activation_param = model->addOperand(&type5);
+ auto state_out = model->addOperand(&type7);
+ auto output = model->addOperand(&type7);
+ // Phase 2, operations
+ static int32_t rank_param_init[] = {1};
+ model->setOperandValue(rank_param, rank_param_init, sizeof(int32_t) * 1);
+ static int32_t activation_param_init[] = {0};
+ model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input, weights_feature, weights_time, bias, state_in},
+ {state_out, output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/svdf_bias_present_float16.model.cpp b/nn/runtime/test/generated/models/svdf_bias_present_float16.model.cpp
new file mode 100644
index 0000000..30a2c9d
--- /dev/null
+++ b/nn/runtime/test/generated/models/svdf_bias_present_float16.model.cpp
@@ -0,0 +1,74 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_float16.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT16, {4, 3});
+ OperandType type2(Type::TENSOR_FLOAT16, {4, 10});
+ OperandType type3(Type::TENSOR_FLOAT16, {4});
+ OperandType type4(Type::TENSOR_FLOAT16, {2, 40});
+ OperandType type5(Type::INT32, {});
+ OperandType type6(Type::TENSOR_FLOAT16, {2, 4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto weights_feature = model->addOperand(&type1);
+ auto weights_time = model->addOperand(&type2);
+ auto bias = model->addOperand(&type3);
+ auto state_in = model->addOperand(&type4);
+ auto rank_param = model->addOperand(&type5);
+ auto activation_param = model->addOperand(&type5);
+ auto state_out = model->addOperand(&type4);
+ auto output = model->addOperand(&type6);
+ // Phase 2, operations
+ static int32_t rank_param_init[] = {1};
+ model->setOperandValue(rank_param, rank_param_init, sizeof(int32_t) * 1);
+ static int32_t activation_param_init[] = {0};
+ model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input, weights_feature, weights_time, bias, state_in},
+ {state_out, output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT16, {4, 3});
+ OperandType type2(Type::TENSOR_FLOAT16, {4, 10});
+ OperandType type3(Type::TENSOR_FLOAT16, {4});
+ OperandType type4(Type::TENSOR_FLOAT16, {2, 40});
+ OperandType type5(Type::INT32, {});
+ OperandType type7(Type::TENSOR_FLOAT16, {0, 0});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto weights_feature = model->addOperand(&type1);
+ auto weights_time = model->addOperand(&type2);
+ auto bias = model->addOperand(&type3);
+ auto state_in = model->addOperand(&type4);
+ auto rank_param = model->addOperand(&type5);
+ auto activation_param = model->addOperand(&type5);
+ auto state_out = model->addOperand(&type7);
+ auto output = model->addOperand(&type7);
+ // Phase 2, operations
+ static int32_t rank_param_init[] = {1};
+ model->setOperandValue(rank_param, rank_param_init, sizeof(int32_t) * 1);
+ static int32_t activation_param_init[] = {0};
+ model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input, weights_feature, weights_time, bias, state_in},
+ {state_out, output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/svdf_bias_present_relaxed.model.cpp b/nn/runtime/test/generated/models/svdf_bias_present_relaxed.model.cpp
new file mode 100644
index 0000000..6ec9810
--- /dev/null
+++ b/nn/runtime/test/generated/models/svdf_bias_present_relaxed.model.cpp
@@ -0,0 +1,78 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_relaxed.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {4, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {4, 10});
+ OperandType type3(Type::TENSOR_FLOAT32, {4});
+ OperandType type4(Type::TENSOR_FLOAT32, {2, 40});
+ OperandType type5(Type::INT32, {});
+ OperandType type6(Type::TENSOR_FLOAT32, {2, 4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto weights_feature = model->addOperand(&type1);
+ auto weights_time = model->addOperand(&type2);
+ auto bias = model->addOperand(&type3);
+ auto state_in = model->addOperand(&type4);
+ auto rank_param = model->addOperand(&type5);
+ auto activation_param = model->addOperand(&type5);
+ auto state_out = model->addOperand(&type4);
+ auto output = model->addOperand(&type6);
+ // Phase 2, operations
+ static int32_t rank_param_init[] = {1};
+ model->setOperandValue(rank_param, rank_param_init, sizeof(int32_t) * 1);
+ static int32_t activation_param_init[] = {0};
+ model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input, weights_feature, weights_time, bias, state_in},
+ {state_out, output});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {4, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {4, 10});
+ OperandType type3(Type::TENSOR_FLOAT32, {4});
+ OperandType type4(Type::TENSOR_FLOAT32, {2, 40});
+ OperandType type5(Type::INT32, {});
+ OperandType type7(Type::TENSOR_FLOAT32, {0, 0});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto weights_feature = model->addOperand(&type1);
+ auto weights_time = model->addOperand(&type2);
+ auto bias = model->addOperand(&type3);
+ auto state_in = model->addOperand(&type4);
+ auto rank_param = model->addOperand(&type5);
+ auto activation_param = model->addOperand(&type5);
+ auto state_out = model->addOperand(&type7);
+ auto output = model->addOperand(&type7);
+ // Phase 2, operations
+ static int32_t rank_param_init[] = {1};
+ model->setOperandValue(rank_param, rank_param_init, sizeof(int32_t) * 1);
+ static int32_t activation_param_init[] = {0};
+ model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input, weights_feature, weights_time, bias, state_in},
+ {state_out, output});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/tests/svdf_bias_present.mod.py.cpp b/nn/runtime/test/generated/tests/svdf_bias_present.mod.py.cpp
new file mode 100644
index 0000000..4236ee8
--- /dev/null
+++ b/nn/runtime/test/generated/tests/svdf_bias_present.mod.py.cpp
@@ -0,0 +1,23 @@
+// clang-format off
+// Generated file (from: svdf_bias_present.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace svdf_bias_present {
+// Generated svdf_bias_present test
+#include "generated/examples/svdf_bias_present.example.cpp"
+// Generated model constructor
+#include "generated/models/svdf_bias_present.model.cpp"
+} // namespace svdf_bias_present
+
+TEST_F(GeneratedTests, svdf_bias_present) {
+ execute(svdf_bias_present::CreateModel,
+ svdf_bias_present::is_ignored,
+ svdf_bias_present::get_examples());
+}
+
+TEST_F(DynamicOutputShapeTest, svdf_bias_present_dynamic_output_shape) {
+ execute(svdf_bias_present::CreateModel_dynamic_output_shape,
+ svdf_bias_present::is_ignored_dynamic_output_shape,
+ svdf_bias_present::get_examples_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/tests/svdf_bias_present_float16.mod.py.cpp b/nn/runtime/test/generated/tests/svdf_bias_present_float16.mod.py.cpp
new file mode 100644
index 0000000..3b94f84
--- /dev/null
+++ b/nn/runtime/test/generated/tests/svdf_bias_present_float16.mod.py.cpp
@@ -0,0 +1,23 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_float16.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace svdf_bias_present_float16 {
+// Generated svdf_bias_present_float16 test
+#include "generated/examples/svdf_bias_present_float16.example.cpp"
+// Generated model constructor
+#include "generated/models/svdf_bias_present_float16.model.cpp"
+} // namespace svdf_bias_present_float16
+
+TEST_F(GeneratedTests, svdf_bias_present_float16) {
+ execute(svdf_bias_present_float16::CreateModel,
+ svdf_bias_present_float16::is_ignored,
+ svdf_bias_present_float16::get_examples());
+}
+
+TEST_F(DynamicOutputShapeTest, svdf_bias_present_float16_dynamic_output_shape) {
+ execute(svdf_bias_present_float16::CreateModel_dynamic_output_shape,
+ svdf_bias_present_float16::is_ignored_dynamic_output_shape,
+ svdf_bias_present_float16::get_examples_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/tests/svdf_bias_present_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/svdf_bias_present_relaxed.mod.py.cpp
new file mode 100644
index 0000000..5c566d8
--- /dev/null
+++ b/nn/runtime/test/generated/tests/svdf_bias_present_relaxed.mod.py.cpp
@@ -0,0 +1,23 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_relaxed.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace svdf_bias_present_relaxed {
+// Generated svdf_bias_present_relaxed test
+#include "generated/examples/svdf_bias_present_relaxed.example.cpp"
+// Generated model constructor
+#include "generated/models/svdf_bias_present_relaxed.model.cpp"
+} // namespace svdf_bias_present_relaxed
+
+TEST_F(GeneratedTests, svdf_bias_present_relaxed) {
+ execute(svdf_bias_present_relaxed::CreateModel,
+ svdf_bias_present_relaxed::is_ignored,
+ svdf_bias_present_relaxed::get_examples());
+}
+
+TEST_F(DynamicOutputShapeTest, svdf_bias_present_relaxed_dynamic_output_shape) {
+ execute(svdf_bias_present_relaxed::CreateModel_dynamic_output_shape,
+ svdf_bias_present_relaxed::is_ignored_dynamic_output_shape,
+ svdf_bias_present_relaxed::get_examples_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/vts_models/svdf_bias_present.model.cpp b/nn/runtime/test/generated/vts_models/svdf_bias_present.model.cpp
new file mode 100644
index 0000000..5317d54
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/svdf_bias_present.model.cpp
@@ -0,0 +1,234 @@
+// clang-format off
+// Generated file (from: svdf_bias_present.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 10},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 4, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 4},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::SVDF,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7, 8},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4};
+ const std::vector<uint32_t> outputIndexes = {7, 8};
+ std::vector<uint8_t> operandValues = {
+ 1, 0, 0, 0, 0, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 10},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 4, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::SVDF,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7, 8},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4};
+ const std::vector<uint32_t> outputIndexes = {7, 8};
+ std::vector<uint8_t> operandValues = {
+ 1, 0, 0, 0, 0, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/svdf_bias_present_float16.model.cpp b/nn/runtime/test/generated/vts_models/svdf_bias_present_float16.model.cpp
new file mode 100644
index 0000000..bfda48c
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/svdf_bias_present_float16.model.cpp
@@ -0,0 +1,234 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_float16.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4, 10},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 4, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2, 4},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::SVDF,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7, 8},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4};
+ const std::vector<uint32_t> outputIndexes = {7, 8};
+ std::vector<uint8_t> operandValues = {
+ 1, 0, 0, 0, 0, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4, 10},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 4, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::SVDF,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7, 8},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4};
+ const std::vector<uint32_t> outputIndexes = {7, 8};
+ std::vector<uint8_t> operandValues = {
+ 1, 0, 0, 0, 0, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/svdf_bias_present_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/svdf_bias_present_relaxed.model.cpp
new file mode 100644
index 0000000..9e7d398
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/svdf_bias_present_relaxed.model.cpp
@@ -0,0 +1,236 @@
+// clang-format off
+// Generated file (from: svdf_bias_present_relaxed.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 10},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 4, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 4},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::SVDF,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7, 8},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4};
+ const std::vector<uint32_t> outputIndexes = {7, 8};
+ std::vector<uint8_t> operandValues = {
+ 1, 0, 0, 0, 0, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ .relaxComputationFloat32toFloat16 = true,
+ };
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 10},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 40},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 4, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {0, 0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::SVDF,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7, 8},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4};
+ const std::vector<uint32_t> outputIndexes = {7, 8};
+ std::vector<uint8_t> operandValues = {
+ 1, 0, 0, 0, 0, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ .relaxComputationFloat32toFloat16 = true,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {0};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/specs/V1_0/svdf_bias_present.mod.py b/nn/runtime/test/specs/V1_0/svdf_bias_present.mod.py
new file mode 100644
index 0000000..ae7d1e7
--- /dev/null
+++ b/nn/runtime/test/specs/V1_0/svdf_bias_present.mod.py
@@ -0,0 +1,138 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+batches = 2
+features = 4
+rank = 1
+units = int(features / rank)
+input_size = 3
+memory_size = 10
+
+model = Model()
+
+input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size))
+weights_feature = Input("weights_feature", "TENSOR_FLOAT32", "{%d, %d}" % (features, input_size))
+weights_time = Input("weights_time", "TENSOR_FLOAT32", "{%d, %d}" % (features, memory_size))
+bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units))
+state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features))
+rank_param = Int32Scalar("rank_param", rank)
+activation_param = Int32Scalar("activation_param", 0)
+state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features))
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
+
+model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
+ rank_param, activation_param).To([state_out, output])
+
+input0 = {
+ input: [],
+ weights_feature: [
+ -0.31930989, -0.36118156, 0.0079667, 0.37613347,
+ 0.22197971, 0.12416199, 0.27901134, 0.27557442,
+ 0.3905206, -0.36137494, -0.06634006, -0.10640851
+ ],
+ weights_time: [
+ -0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657
+ ],
+ bias: [1.0, 2.0, 3.0, 4.0],
+ state_in: [0 for _ in range(batches * memory_size * features)],
+}
+
+test_inputs = [
+ 0.12609188, -0.46347019, -0.89598465,
+ 0.12609188, -0.46347019, -0.89598465,
+
+ 0.14278367, -1.64410412, -0.75222826,
+ 0.14278367, -1.64410412, -0.75222826,
+
+ 0.49837467, 0.19278903, 0.26584083,
+ 0.49837467, 0.19278903, 0.26584083,
+
+ -0.11186574, 0.13164264, -0.05349274,
+ -0.11186574, 0.13164264, -0.05349274,
+
+ -0.68892461, 0.37783599, 0.18263303,
+ -0.68892461, 0.37783599, 0.18263303,
+
+ -0.81299269, -0.86831826, 1.43940818,
+ -0.81299269, -0.86831826, 1.43940818,
+
+ -1.45006323, -0.82251364, -1.69082689,
+ -1.45006323, -0.82251364, -1.69082689,
+
+ 0.03966608, -0.24936394, -0.77526885,
+ 0.03966608, -0.24936394, -0.77526885,
+
+ 0.11771342, -0.23761693, -0.65898693,
+ 0.11771342, -0.23761693, -0.65898693,
+
+ -0.89477462, 1.67204106, -0.53235275,
+ -0.89477462, 1.67204106, -0.53235275
+]
+
+golden_outputs = [
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+
+ 1.419114, 1.762176, 2.577373, 4.175115,
+ 1.419114, 1.762176, 2.577373, 4.175115,
+
+ 1.36726, 1.477697, 2.543498, 3.824525,
+ 1.36726, 1.477697, 2.543498, 3.824525
+]
+
+output0 = {state_out: [0 for _ in range(batches * memory_size * features)],
+ output: []}
+
+# TODO: enable more data points after fixing the reference issue
+for i in range(1):
+ batch_start = i * input_size * batches
+ batch_end = batch_start + input_size * batches
+ input0[input] = test_inputs[batch_start:batch_end]
+ golden_start = i * units * batches
+ golden_end = golden_start + units * batches
+ output0[output] = golden_outputs[golden_start:golden_end]
+ Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_1/svdf_bias_present_relaxed.mod.py b/nn/runtime/test/specs/V1_1/svdf_bias_present_relaxed.mod.py
new file mode 100644
index 0000000..7bff435
--- /dev/null
+++ b/nn/runtime/test/specs/V1_1/svdf_bias_present_relaxed.mod.py
@@ -0,0 +1,139 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+batches = 2
+features = 4
+rank = 1
+units = int(features / rank)
+input_size = 3
+memory_size = 10
+
+model = Model()
+
+input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size))
+weights_feature = Input("weights_feature", "TENSOR_FLOAT32", "{%d, %d}" % (features, input_size))
+weights_time = Input("weights_time", "TENSOR_FLOAT32", "{%d, %d}" % (features, memory_size))
+bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units))
+state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features))
+rank_param = Int32Scalar("rank_param", rank)
+activation_param = Int32Scalar("activation_param", 0)
+state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features))
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
+
+model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
+ rank_param, activation_param).To([state_out, output])
+model = model.RelaxedExecution(True)
+
+input0 = {
+ input: [],
+ weights_feature: [
+ -0.31930989, -0.36118156, 0.0079667, 0.37613347,
+ 0.22197971, 0.12416199, 0.27901134, 0.27557442,
+ 0.3905206, -0.36137494, -0.06634006, -0.10640851
+ ],
+ weights_time: [
+ -0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657
+ ],
+ bias: [1.0, 2.0, 3.0, 4.0],
+ state_in: [0 for _ in range(batches * memory_size * features)],
+}
+
+test_inputs = [
+ 0.12609188, -0.46347019, -0.89598465,
+ 0.12609188, -0.46347019, -0.89598465,
+
+ 0.14278367, -1.64410412, -0.75222826,
+ 0.14278367, -1.64410412, -0.75222826,
+
+ 0.49837467, 0.19278903, 0.26584083,
+ 0.49837467, 0.19278903, 0.26584083,
+
+ -0.11186574, 0.13164264, -0.05349274,
+ -0.11186574, 0.13164264, -0.05349274,
+
+ -0.68892461, 0.37783599, 0.18263303,
+ -0.68892461, 0.37783599, 0.18263303,
+
+ -0.81299269, -0.86831826, 1.43940818,
+ -0.81299269, -0.86831826, 1.43940818,
+
+ -1.45006323, -0.82251364, -1.69082689,
+ -1.45006323, -0.82251364, -1.69082689,
+
+ 0.03966608, -0.24936394, -0.77526885,
+ 0.03966608, -0.24936394, -0.77526885,
+
+ 0.11771342, -0.23761693, -0.65898693,
+ 0.11771342, -0.23761693, -0.65898693,
+
+ -0.89477462, 1.67204106, -0.53235275,
+ -0.89477462, 1.67204106, -0.53235275
+]
+
+golden_outputs = [
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+
+ 1.419114, 1.762176, 2.577373, 4.175115,
+ 1.419114, 1.762176, 2.577373, 4.175115,
+
+ 1.36726, 1.477697, 2.543498, 3.824525,
+ 1.36726, 1.477697, 2.543498, 3.824525
+]
+
+output0 = {state_out: [0 for _ in range(batches * memory_size * features)],
+ output: []}
+
+# TODO: enable more data points after fixing the reference issue
+for i in range(1):
+ batch_start = i * input_size * batches
+ batch_end = batch_start + input_size * batches
+ input0[input] = test_inputs[batch_start:batch_end]
+ golden_start = i * units * batches
+ golden_end = golden_start + units * batches
+ output0[output] = golden_outputs[golden_start:golden_end]
+ Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_2/svdf_bias_present_float16.mod.py b/nn/runtime/test/specs/V1_2/svdf_bias_present_float16.mod.py
new file mode 100644
index 0000000..4dc6914
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/svdf_bias_present_float16.mod.py
@@ -0,0 +1,138 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+batches = 2
+features = 4
+rank = 1
+units = int(features / rank)
+input_size = 3
+memory_size = 10
+
+model = Model()
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size))
+weights_feature = Input("weights_feature", "TENSOR_FLOAT16", "{%d, %d}" % (features, input_size))
+weights_time = Input("weights_time", "TENSOR_FLOAT16", "{%d, %d}" % (features, memory_size))
+bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units))
+state_in = Input("state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features))
+rank_param = Int32Scalar("rank_param", rank)
+activation_param = Int32Scalar("activation_param", 0)
+state_out = IgnoredOutput("state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
+
+model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
+ rank_param, activation_param).To([state_out, output])
+
+input0 = {
+ input: [],
+ weights_feature: [
+ -0.31930989, -0.36118156, 0.0079667, 0.37613347,
+ 0.22197971, 0.12416199, 0.27901134, 0.27557442,
+ 0.3905206, -0.36137494, -0.06634006, -0.10640851
+ ],
+ weights_time: [
+ -0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657
+ ],
+ bias: [1.0, 2.0, 3.0, 4.0],
+ state_in: [0 for _ in range(batches * memory_size * features)],
+}
+
+test_inputs = [
+ 0.12609188, -0.46347019, -0.89598465,
+ 0.12609188, -0.46347019, -0.89598465,
+
+ 0.14278367, -1.64410412, -0.75222826,
+ 0.14278367, -1.64410412, -0.75222826,
+
+ 0.49837467, 0.19278903, 0.26584083,
+ 0.49837467, 0.19278903, 0.26584083,
+
+ -0.11186574, 0.13164264, -0.05349274,
+ -0.11186574, 0.13164264, -0.05349274,
+
+ -0.68892461, 0.37783599, 0.18263303,
+ -0.68892461, 0.37783599, 0.18263303,
+
+ -0.81299269, -0.86831826, 1.43940818,
+ -0.81299269, -0.86831826, 1.43940818,
+
+ -1.45006323, -0.82251364, -1.69082689,
+ -1.45006323, -0.82251364, -1.69082689,
+
+ 0.03966608, -0.24936394, -0.77526885,
+ 0.03966608, -0.24936394, -0.77526885,
+
+ 0.11771342, -0.23761693, -0.65898693,
+ 0.11771342, -0.23761693, -0.65898693,
+
+ -0.89477462, 1.67204106, -0.53235275,
+ -0.89477462, 1.67204106, -0.53235275
+]
+
+golden_outputs = [
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+
+ 1.419114, 1.762176, 2.577373, 4.175115,
+ 1.419114, 1.762176, 2.577373, 4.175115,
+
+ 1.36726, 1.477697, 2.543498, 3.824525,
+ 1.36726, 1.477697, 2.543498, 3.824525
+]
+
+output0 = {state_out: [0 for _ in range(batches * memory_size * features)],
+ output: []}
+
+# TODO: enable more data points after fixing the reference issue
+for i in range(1):
+ batch_start = i * input_size * batches
+ batch_end = batch_start + input_size * batches
+ input0[input] = test_inputs[batch_start:batch_end]
+ golden_start = i * units * batches
+ golden_end = golden_start + units * batches
+ output0[output] = golden_outputs[golden_start:golden_end]
+ Example((input0, output0))