Re-generate CTS tests from test specs

Bug: 63905942
Test: NeuralNetworksTest on Angler
Change-Id: I002efa72d08282ec1a9fa7625f2df4a36dd16fb2
diff --git a/nn/runtime/test/generated/all_generated_tests.cpp b/nn/runtime/test/generated/all_generated_tests.cpp
index 416114d..8e79e55 100644
--- a/nn/runtime/test/generated/all_generated_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_tests.cpp
@@ -99,6 +99,62 @@
             conv_quant8::examples);
 }
 
+namespace depth_to_space_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated depth_to_space_float_1 test
+#include "generated/examples/depth_to_space_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depth_to_space_float_1.model.cpp"
+} // namespace depth_to_space_float_1
+TEST_F(GeneratedTests, depth_to_space_float_1) {
+    Execute(depth_to_space_float_1::CreateModel,
+            depth_to_space_float_1::is_ignored,
+            depth_to_space_float_1::examples);
+}
+
+namespace depth_to_space_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated depth_to_space_float_2 test
+#include "generated/examples/depth_to_space_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depth_to_space_float_2.model.cpp"
+} // namespace depth_to_space_float_2
+TEST_F(GeneratedTests, depth_to_space_float_2) {
+    Execute(depth_to_space_float_2::CreateModel,
+            depth_to_space_float_2::is_ignored,
+            depth_to_space_float_2::examples);
+}
+
+namespace depth_to_space_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated depth_to_space_quant8_1 test
+#include "generated/examples/depth_to_space_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depth_to_space_quant8_1.model.cpp"
+} // namespace depth_to_space_quant8_1
+TEST_F(GeneratedTests, depth_to_space_quant8_1) {
+    Execute(depth_to_space_quant8_1::CreateModel,
+            depth_to_space_quant8_1::is_ignored,
+            depth_to_space_quant8_1::examples);
+}
+
+namespace depth_to_space_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated depth_to_space_quant8_2 test
+#include "generated/examples/depth_to_space_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depth_to_space_quant8_2.model.cpp"
+} // namespace depth_to_space_quant8_2
+TEST_F(GeneratedTests, depth_to_space_quant8_2) {
+    Execute(depth_to_space_quant8_2::CreateModel,
+            depth_to_space_quant8_2::is_ignored,
+            depth_to_space_quant8_2::examples);
+}
+
 namespace depthwise_conv_2d {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv_2d test
@@ -169,6 +225,34 @@
             floor::examples);
 }
 
+namespace fully_connected_float {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float test
+#include "generated/examples/fully_connected_float.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float.model.cpp"
+} // namespace fully_connected_float
+TEST_F(GeneratedTests, fully_connected_float) {
+    Execute(fully_connected_float::CreateModel,
+            fully_connected_float::is_ignored,
+            fully_connected_float::examples);
+}
+
+namespace fully_connected_quant8 {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_quant8 test
+#include "generated/examples/fully_connected_quant8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_quant8.model.cpp"
+} // namespace fully_connected_quant8
+TEST_F(GeneratedTests, fully_connected_quant8) {
+    Execute(fully_connected_quant8::CreateModel,
+            fully_connected_quant8::is_ignored,
+            fully_connected_quant8::examples);
+}
+
 namespace hashtable_lookup_float {
 std::vector<MixedTypedExample> examples = {
 // Generated hashtable_lookup_float test
@@ -225,6 +309,62 @@
             l2_pool_float::examples);
 }
 
+namespace local_response_norm_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated local_response_norm_float_1 test
+#include "generated/examples/local_response_norm_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/local_response_norm_float_1.model.cpp"
+} // namespace local_response_norm_float_1
+TEST_F(GeneratedTests, local_response_norm_float_1) {
+    Execute(local_response_norm_float_1::CreateModel,
+            local_response_norm_float_1::is_ignored,
+            local_response_norm_float_1::examples);
+}
+
+namespace local_response_norm_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated local_response_norm_float_2 test
+#include "generated/examples/local_response_norm_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/local_response_norm_float_2.model.cpp"
+} // namespace local_response_norm_float_2
+TEST_F(GeneratedTests, local_response_norm_float_2) {
+    Execute(local_response_norm_float_2::CreateModel,
+            local_response_norm_float_2::is_ignored,
+            local_response_norm_float_2::examples);
+}
+
+namespace local_response_norm_float_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated local_response_norm_float_3 test
+#include "generated/examples/local_response_norm_float_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/local_response_norm_float_3.model.cpp"
+} // namespace local_response_norm_float_3
+TEST_F(GeneratedTests, local_response_norm_float_3) {
+    Execute(local_response_norm_float_3::CreateModel,
+            local_response_norm_float_3::is_ignored,
+            local_response_norm_float_3::examples);
+}
+
+namespace local_response_norm_float_4 {
+std::vector<MixedTypedExample> examples = {
+// Generated local_response_norm_float_4 test
+#include "generated/examples/local_response_norm_float_4.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/local_response_norm_float_4.model.cpp"
+} // namespace local_response_norm_float_4
+TEST_F(GeneratedTests, local_response_norm_float_4) {
+    Execute(local_response_norm_float_4::CreateModel,
+            local_response_norm_float_4::is_ignored,
+            local_response_norm_float_4::examples);
+}
+
 namespace lsh_projection {
 std::vector<MixedTypedExample> examples = {
 // Generated lsh_projection test
@@ -379,6 +519,20 @@
             relu6_float::examples);
 }
 
+namespace relu6_quant8 {
+std::vector<MixedTypedExample> examples = {
+// Generated relu6_quant8 test
+#include "generated/examples/relu6_quant8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/relu6_quant8.model.cpp"
+} // namespace relu6_quant8
+TEST_F(GeneratedTests, relu6_quant8) {
+    Execute(relu6_quant8::CreateModel,
+            relu6_quant8::is_ignored,
+            relu6_quant8::examples);
+}
+
 namespace relu_float {
 std::vector<MixedTypedExample> examples = {
 // Generated relu_float test
@@ -463,6 +617,146 @@
             rnn::examples);
 }
 
+namespace sigmoid_float {
+std::vector<MixedTypedExample> examples = {
+// Generated sigmoid_float test
+#include "generated/examples/sigmoid_float.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/sigmoid_float.model.cpp"
+} // namespace sigmoid_float
+TEST_F(GeneratedTests, sigmoid_float) {
+    Execute(sigmoid_float::CreateModel,
+            sigmoid_float::is_ignored,
+            sigmoid_float::examples);
+}
+
+namespace sigmoid_quant8 {
+std::vector<MixedTypedExample> examples = {
+// Generated sigmoid_quant8 test
+#include "generated/examples/sigmoid_quant8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/sigmoid_quant8.model.cpp"
+} // namespace sigmoid_quant8
+TEST_F(GeneratedTests, sigmoid_quant8) {
+    Execute(sigmoid_quant8::CreateModel,
+            sigmoid_quant8::is_ignored,
+            sigmoid_quant8::examples);
+}
+
+namespace softmax_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated softmax_float_1 test
+#include "generated/examples/softmax_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/softmax_float_1.model.cpp"
+} // namespace softmax_float_1
+TEST_F(GeneratedTests, softmax_float_1) {
+    Execute(softmax_float_1::CreateModel,
+            softmax_float_1::is_ignored,
+            softmax_float_1::examples);
+}
+
+namespace softmax_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated softmax_float_2 test
+#include "generated/examples/softmax_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/softmax_float_2.model.cpp"
+} // namespace softmax_float_2
+TEST_F(GeneratedTests, softmax_float_2) {
+    Execute(softmax_float_2::CreateModel,
+            softmax_float_2::is_ignored,
+            softmax_float_2::examples);
+}
+
+namespace softmax_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated softmax_quant8_1 test
+#include "generated/examples/softmax_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/softmax_quant8_1.model.cpp"
+} // namespace softmax_quant8_1
+TEST_F(GeneratedTests, softmax_quant8_1) {
+    Execute(softmax_quant8_1::CreateModel,
+            softmax_quant8_1::is_ignored,
+            softmax_quant8_1::examples);
+}
+
+namespace softmax_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated softmax_quant8_2 test
+#include "generated/examples/softmax_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/softmax_quant8_2.model.cpp"
+} // namespace softmax_quant8_2
+TEST_F(GeneratedTests, softmax_quant8_2) {
+    Execute(softmax_quant8_2::CreateModel,
+            softmax_quant8_2::is_ignored,
+            softmax_quant8_2::examples);
+}
+
+namespace space_to_depth_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_depth_float_1 test
+#include "generated/examples/space_to_depth_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_depth_float_1.model.cpp"
+} // namespace space_to_depth_float_1
+TEST_F(GeneratedTests, space_to_depth_float_1) {
+    Execute(space_to_depth_float_1::CreateModel,
+            space_to_depth_float_1::is_ignored,
+            space_to_depth_float_1::examples);
+}
+
+namespace space_to_depth_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_depth_float_2 test
+#include "generated/examples/space_to_depth_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_depth_float_2.model.cpp"
+} // namespace space_to_depth_float_2
+TEST_F(GeneratedTests, space_to_depth_float_2) {
+    Execute(space_to_depth_float_2::CreateModel,
+            space_to_depth_float_2::is_ignored,
+            space_to_depth_float_2::examples);
+}
+
+namespace space_to_depth_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_depth_quant8_1 test
+#include "generated/examples/space_to_depth_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_depth_quant8_1.model.cpp"
+} // namespace space_to_depth_quant8_1
+TEST_F(GeneratedTests, space_to_depth_quant8_1) {
+    Execute(space_to_depth_quant8_1::CreateModel,
+            space_to_depth_quant8_1::is_ignored,
+            space_to_depth_quant8_1::examples);
+}
+
+namespace space_to_depth_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_depth_quant8_2 test
+#include "generated/examples/space_to_depth_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_depth_quant8_2.model.cpp"
+} // namespace space_to_depth_quant8_2
+TEST_F(GeneratedTests, space_to_depth_quant8_2) {
+    Execute(space_to_depth_quant8_2::CreateModel,
+            space_to_depth_quant8_2::is_ignored,
+            space_to_depth_quant8_2::examples);
+}
+
 namespace svdf {
 std::vector<MixedTypedExample> examples = {
 // Generated svdf test
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_1.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_1.example.cpp
new file mode 100644
index 0000000..57b2057
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_1.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depth_to_space_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_2.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_2.example.cpp
new file mode 100644
index 0000000..2b3e251
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depth_to_space_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 5.0f, 6.0f, 3.0f, 4.0f, 7.0f, 8.0f, 9.0f, 10.0f, 13.0f, 14.0f, 11.0f, 12.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depth_to_space_quant8_1.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_quant8_1.example.cpp
new file mode 100644
index 0000000..a292e96
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depth_to_space_quant8_1.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depth_to_space_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 252, 253, 254, 255}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 252, 253, 254, 255}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depth_to_space_quant8_2.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_quant8_2.example.cpp
new file mode 100644
index 0000000..849243c
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depth_to_space_quant8_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depth_to_space_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 4, 5, 2, 3, 6, 7, 248, 249, 252, 253, 250, 251, 254, 255}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 2, 3, 4, 5, 6, 7, 248, 249, 250, 251, 252, 253, 254, 255}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/fully_connected_float.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
new file mode 100644
index 0000000..c1167c2
--- /dev/null
+++ b/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_float.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2, 32, 16}}, {1, {2}}, {2, {4}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8, 68, 36}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
new file mode 100644
index 0000000..e2a2a3d
--- /dev/null
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_quant8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {2, 32, 16}}, {1, {2}}, {2, {4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {2, 17, 9}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_1.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_1.example.cpp
new file mode 100644
index 0000000..25f7590
--- /dev/null
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_1.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: local_response_norm_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.1f, 0.6f, 0.7f, 1.2f, -0.7f, 0.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-0.22f, 0.12f, 0.14f, 0.24f, -0.14f, 0.02f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_2.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_2.example.cpp
new file mode 100644
index 0000000..bef5e6b
--- /dev/null
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: local_response_norm_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.1f, 0.6f, 0.7f, 1.2f, -0.7f, 0.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-0.55f, 0.3f, 0.35f, 0.6f, -0.35f, 0.05f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_3.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_3.example.cpp
new file mode 100644
index 0000000..210d6e8
--- /dev/null
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_3.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: local_response_norm_float_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.1f, 0.6f, 0.7f, 1.2f, -0.7f, 0.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-0.275f, 0.15f, 0.175f, 0.3f, -0.175f, 0.025f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_4.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_4.example.cpp
new file mode 100644
index 0000000..6cdb818
--- /dev/null
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_4.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: local_response_norm_float_4.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.1f, 0.6f, 0.7f, 1.2f, -0.7f, 0.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-0.26492569f, 0.12510864f, 0.14011213f, 0.26726127f, -0.16178755f, 0.0244266f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/relu6_quant8.example.cpp b/nn/runtime/test/generated/examples/relu6_quant8.example.cpp
new file mode 100644
index 0000000..18105fb
--- /dev/null
+++ b/nn/runtime/test/generated/examples/relu6_quant8.example.cpp
@@ -0,0 +1,43 @@
+// Generated file (from: relu6_quant8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 11, 12}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 11, 12}}}
+}
+}, // End of an example
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {13, 14, 254, 255}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {12, 12, 12, 12}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/sigmoid_float.example.cpp b/nn/runtime/test/generated/examples/sigmoid_float.example.cpp
new file mode 100644
index 0000000..bf49a95
--- /dev/null
+++ b/nn/runtime/test/generated/examples/sigmoid_float.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: sigmoid_float.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 8.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.7310585975646973f, 0.8807970285415649f, 0.9820137619972229f, 0.9996646642684937f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/sigmoid_quant8.example.cpp b/nn/runtime/test/generated/examples/sigmoid_quant8.example.cpp
new file mode 100644
index 0000000..3957148
--- /dev/null
+++ b/nn/runtime/test/generated/examples/sigmoid_quant8.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: sigmoid_quant8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 2, 127}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {128, 159, 187, 255}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/softmax_float_1.example.cpp b/nn/runtime/test/generated/examples/softmax_float_1.example.cpp
new file mode 100644
index 0000000..d2cf06a
--- /dev/null
+++ b/nn/runtime/test/generated/examples/softmax_float_1.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: softmax_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 10.0f, 20.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.25f, 0.25f, 0.25f, 0.25f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/softmax_float_2.example.cpp b/nn/runtime/test/generated/examples/softmax_float_2.example.cpp
new file mode 100644
index 0000000..deba11a
--- /dev/null
+++ b/nn/runtime/test/generated/examples/softmax_float_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: softmax_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/softmax_quant8_1.example.cpp b/nn/runtime/test/generated/examples/softmax_quant8_1.example.cpp
new file mode 100644
index 0000000..6d8398e
--- /dev/null
+++ b/nn/runtime/test/generated/examples/softmax_quant8_1.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: softmax_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 10, 20}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 0, 0, 0}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/softmax_quant8_2.example.cpp b/nn/runtime/test/generated/examples/softmax_quant8_2.example.cpp
new file mode 100644
index 0000000..0116a0f
--- /dev/null
+++ b/nn/runtime/test/generated/examples/softmax_quant8_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: softmax_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 255, 254, 253, 252, 251}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_1.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_1.example.cpp
new file mode 100644
index 0000000..d76f8e7
--- /dev/null
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_1.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_depth_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_2.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_2.example.cpp
new file mode 100644
index 0000000..452df7a
--- /dev/null
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_depth_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 5.0f, 6.0f, 3.0f, 4.0f, 7.0f, 8.0f, 9.0f, 10.0f, 13.0f, 14.0f, 11.0f, 12.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/space_to_depth_quant8_1.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_quant8_1.example.cpp
new file mode 100644
index 0000000..5c82f5d
--- /dev/null
+++ b/nn/runtime/test/generated/examples/space_to_depth_quant8_1.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_depth_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 252, 253, 254, 255}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 252, 253, 254, 255}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/space_to_depth_quant8_2.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_quant8_2.example.cpp
new file mode 100644
index 0000000..9cc30e7
--- /dev/null
+++ b/nn/runtime/test/generated/examples/space_to_depth_quant8_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_depth_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 2, 3, 4, 5, 6, 7, 248, 249, 250, 251, 252, 253, 254, 255}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 4, 5, 2, 3, 6, 7, 248, 249, 252, 253, 250, 251, 254, 255}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/models/depth_to_space_float_1.model.cpp b/nn/runtime/test/generated/models/depth_to_space_float_1.model.cpp
new file mode 100644
index 0000000..800d130
--- /dev/null
+++ b/nn/runtime/test/generated/models/depth_to_space_float_1.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: depth_to_space_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 8});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depth_to_space_float_2.model.cpp b/nn/runtime/test/generated/models/depth_to_space_float_2.model.cpp
new file mode 100644
index 0000000..edba9f5
--- /dev/null
+++ b/nn/runtime/test/generated/models/depth_to_space_float_2.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: depth_to_space_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp b/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
new file mode 100644
index 0000000..53aa350
--- /dev/null
+++ b/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: depth_to_space_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {1, 1, 1, 8});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp b/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
new file mode 100644
index 0000000..a37459a
--- /dev/null
+++ b/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: depth_to_space_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_float.model.cpp b/nn/runtime/test/generated/models/fully_connected_float.model.cpp
new file mode 100644
index 0000000..4170687
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_float.model.cpp
@@ -0,0 +1,27 @@
+// Generated file (from: fully_connected_float.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {1});
+  OperandType type0(Type::TENSOR_FLOAT32, {3});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto b0 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type0);
+  auto act = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {op1, op2, b0},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
new file mode 100644
index 0000000..2b2e8a5
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
@@ -0,0 +1,28 @@
+// Generated file (from: fully_connected_quant8.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type4(Type::INT32, {});
+  OperandType type1(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {1, 1});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {3});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, 0.0f, 255.0f,{3});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, 0.0f, 63.75f, {1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto b0 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type3);
+  auto act = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {op1, op2, b0},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/local_response_norm_float_1.model.cpp b/nn/runtime/test/generated/models/local_response_norm_float_1.model.cpp
new file mode 100644
index 0000000..462884a
--- /dev/null
+++ b/nn/runtime/test/generated/models/local_response_norm_float_1.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: local_response_norm_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::FLOAT32, {});
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 6});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto bias = model->addOperand(&type2);
+  auto alpha = model->addOperand(&type2);
+  auto beta = model->addOperand(&type2);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t radius_init[] = {20};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  static float bias_init[] = {9.0f};
+  model->setOperandValue(bias, bias_init, sizeof(float) * 1);
+  static float alpha_init[] = {4.0f};
+  model->setOperandValue(alpha, alpha_init, sizeof(float) * 1);
+  static float beta_init[] = {0.5f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION, {input, radius, bias, alpha, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/local_response_norm_float_2.model.cpp b/nn/runtime/test/generated/models/local_response_norm_float_2.model.cpp
new file mode 100644
index 0000000..fa5e133
--- /dev/null
+++ b/nn/runtime/test/generated/models/local_response_norm_float_2.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: local_response_norm_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::FLOAT32, {});
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 6});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto bias = model->addOperand(&type2);
+  auto alpha = model->addOperand(&type2);
+  auto beta = model->addOperand(&type2);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t radius_init[] = {20};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  static float bias_init[] = {0.0f};
+  model->setOperandValue(bias, bias_init, sizeof(float) * 1);
+  static float alpha_init[] = {1.0f};
+  model->setOperandValue(alpha, alpha_init, sizeof(float) * 1);
+  static float beta_init[] = {0.5f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION, {input, radius, bias, alpha, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/local_response_norm_float_3.model.cpp b/nn/runtime/test/generated/models/local_response_norm_float_3.model.cpp
new file mode 100644
index 0000000..b73099f
--- /dev/null
+++ b/nn/runtime/test/generated/models/local_response_norm_float_3.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: local_response_norm_float_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::FLOAT32, {});
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 6});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto bias = model->addOperand(&type2);
+  auto alpha = model->addOperand(&type2);
+  auto beta = model->addOperand(&type2);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t radius_init[] = {20};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  static float bias_init[] = {0.0f};
+  model->setOperandValue(bias, bias_init, sizeof(float) * 1);
+  static float alpha_init[] = {4.0f};
+  model->setOperandValue(alpha, alpha_init, sizeof(float) * 1);
+  static float beta_init[] = {0.5f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION, {input, radius, bias, alpha, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/local_response_norm_float_4.model.cpp b/nn/runtime/test/generated/models/local_response_norm_float_4.model.cpp
new file mode 100644
index 0000000..4b1a109
--- /dev/null
+++ b/nn/runtime/test/generated/models/local_response_norm_float_4.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: local_response_norm_float_4.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::FLOAT32, {});
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 6});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto bias = model->addOperand(&type2);
+  auto alpha = model->addOperand(&type2);
+  auto beta = model->addOperand(&type2);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  static float bias_init[] = {9.0f};
+  model->setOperandValue(bias, bias_init, sizeof(float) * 1);
+  static float alpha_init[] = {4.0f};
+  model->setOperandValue(alpha, alpha_init, sizeof(float) * 1);
+  static float beta_init[] = {0.5f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION, {input, radius, bias, alpha, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/relu6_quant8.model.cpp b/nn/runtime/test/generated/models/relu6_quant8.model.cpp
new file mode 100644
index 0000000..c8d3e15
--- /dev/null
+++ b/nn/runtime/test/generated/models/relu6_quant8.model.cpp
@@ -0,0 +1,19 @@
+// Generated file (from: relu6_quant8.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {1, 2, 2, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_RELU6, {op1}, {op2});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {op1},
+    {op2});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/sigmoid_float.model.cpp b/nn/runtime/test/generated/models/sigmoid_float.model.cpp
new file mode 100644
index 0000000..a27fce6
--- /dev/null
+++ b/nn/runtime/test/generated/models/sigmoid_float.model.cpp
@@ -0,0 +1,19 @@
+// Generated file (from: sigmoid_float.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op3 = model->addOperand(&type0);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_LOGISTIC, {op1}, {op3});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/sigmoid_quant8.model.cpp b/nn/runtime/test/generated/models/sigmoid_quant8.model.cpp
new file mode 100644
index 0000000..4b864e6
--- /dev/null
+++ b/nn/runtime/test/generated/models/sigmoid_quant8.model.cpp
@@ -0,0 +1,19 @@
+// Generated file (from: sigmoid_quant8.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.0f, {1, 2, 2, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op3 = model->addOperand(&type0);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_LOGISTIC, {op1}, {op3});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/softmax_float_1.model.cpp b/nn/runtime/test/generated/models/softmax_float_1.model.cpp
new file mode 100644
index 0000000..435b1a4
--- /dev/null
+++ b/nn/runtime/test/generated/models/softmax_float_1.model.cpp
@@ -0,0 +1,23 @@
+// Generated file (from: softmax_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::FLOAT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 4});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto beta = model->addOperand(&type1);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static float beta_init[] = {0.0f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/softmax_float_2.model.cpp b/nn/runtime/test/generated/models/softmax_float_2.model.cpp
new file mode 100644
index 0000000..52c6ab4
--- /dev/null
+++ b/nn/runtime/test/generated/models/softmax_float_2.model.cpp
@@ -0,0 +1,23 @@
+// Generated file (from: softmax_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::FLOAT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 5});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto beta = model->addOperand(&type1);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static float beta_init[] = {1.0f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/softmax_quant8_1.model.cpp b/nn/runtime/test/generated/models/softmax_quant8_1.model.cpp
new file mode 100644
index 0000000..ba66adc
--- /dev/null
+++ b/nn/runtime/test/generated/models/softmax_quant8_1.model.cpp
@@ -0,0 +1,23 @@
+// Generated file (from: softmax_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::FLOAT32, {});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {1, 4});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto beta = model->addOperand(&type1);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static float beta_init[] = {0.0f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/softmax_quant8_2.model.cpp b/nn/runtime/test/generated/models/softmax_quant8_2.model.cpp
new file mode 100644
index 0000000..74f66f0
--- /dev/null
+++ b/nn/runtime/test/generated/models/softmax_quant8_2.model.cpp
@@ -0,0 +1,23 @@
+// Generated file (from: softmax_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::FLOAT32, {});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {2, 5});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto beta = model->addOperand(&type1);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static float beta_init[] = {1.0f};
+  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/space_to_depth_float_1.model.cpp b/nn/runtime/test/generated/models/space_to_depth_float_1.model.cpp
new file mode 100644
index 0000000..203f439
--- /dev/null
+++ b/nn/runtime/test/generated/models/space_to_depth_float_1.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: space_to_depth_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 1, 1, 8});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/space_to_depth_float_2.model.cpp b/nn/runtime/test/generated/models/space_to_depth_float_2.model.cpp
new file mode 100644
index 0000000..0b2154d
--- /dev/null
+++ b/nn/runtime/test/generated/models/space_to_depth_float_2.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: space_to_depth_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp b/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
new file mode 100644
index 0000000..c9b1a4b
--- /dev/null
+++ b/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: space_to_depth_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {1, 2, 2, 2});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp b/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
new file mode 100644
index 0000000..060c611
--- /dev/null
+++ b/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
@@ -0,0 +1,24 @@
+// Generated file (from: space_to_depth_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto radius = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t radius_init[] = {2};
+  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {input, radius}, {output});
+  // Phase 3, inputs and outputs
+  model->setInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}