Force CpuExecutor validating user-provided model output operands.

  - For operands with OperandLifeTime::MODEL_OUTPUT, the dimensions,
    type, and other meta-data must match the output Shape calculated
    from the operation preparation step.
  - Fix the ill-defined tests caught by the added validation.
  - Incidental changes: generated more tests from tests specs.

Bug: 67390841
Test: NeuralNetworksTests pass
Change-Id: I40d35db0f7a868feae773dbf7e12cf4bf5f5e275
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index b8988b5..480d6cb 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -82,6 +82,20 @@
 // Updates the RunTimeOperandInfo with the newly calculated shape.
 // Allocate the buffer if we need to.
 static bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape) {
+    // For user-provided model output operands, the parameters must match the Shape
+    // calculated from the preparation step.
+    if (info->lifetime == OperandLifeTime::MODEL_OUTPUT) {
+        if (info->type != shape.type ||
+            info->dimensions != shape.dimensions) {
+            LOG(ERROR) << "Invalid type or dimensions for model output";
+            return false;
+        }
+        if (info->type == OperandType::TENSOR_QUANT8_ASYMM &&
+            (info->scale != shape.scale || info->zeroPoint != shape.offset)) {
+            LOG(ERROR) << "Invalid scale or zeroPoint for model output";
+            return false;
+        }
+    }
     info->type = shape.type;
     info->dimensions = shape.dimensions;
     info->scale = shape.scale;
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index 537581a..d5beafc 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -505,7 +505,7 @@
     outputShape->type = valueShape.type;
     outputShape->dimensions = { lookups, columns };
     for (uint32_t i = 2; i < getNumberOfDimensions(valueShape); i++) {
-        outputShape->dimensions[i] = getSizeOfDimension(valueShape, i);
+        outputShape->dimensions.push_back(getSizeOfDimension(valueShape, i));
     }
     outputShape->offset = valueShape.offset;
     outputShape->scale = valueShape.scale;
@@ -528,7 +528,7 @@
     outputShape->type = valueShape.type;
     outputShape->dimensions = { lookups };
     for (uint32_t i = 1; i < getNumberOfDimensions(valueShape); i++) {
-        outputShape->dimensions[i] = getSizeOfDimension(valueShape, i);
+        outputShape->dimensions.push_back(getSizeOfDimension(valueShape, i));
     }
     outputShape->offset = valueShape.offset;
     outputShape->scale = valueShape.scale;
diff --git a/nn/runtime/test/generated/all_generated_tests.cpp b/nn/runtime/test/generated/all_generated_tests.cpp
index 2381bc6..269edf6 100644
--- a/nn/runtime/test/generated/all_generated_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_tests.cpp
@@ -253,6 +253,20 @@
             conv_float_channels::examples);
 }
 
+namespace conv_float_channels_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_float_channels_weights_as_inputs test
+#include "generated/examples/conv_float_channels_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/conv_float_channels_weights_as_inputs.model.cpp"
+} // namespace conv_float_channels_weights_as_inputs
+TEST_F(GeneratedTests, conv_float_channels_weights_as_inputs) {
+    Execute(conv_float_channels_weights_as_inputs::CreateModel,
+            conv_float_channels_weights_as_inputs::is_ignored,
+            conv_float_channels_weights_as_inputs::examples);
+}
+
 namespace conv_float_large {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_float_large test
@@ -267,6 +281,20 @@
             conv_float_large::examples);
 }
 
+namespace conv_float_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_float_large_weights_as_inputs test
+#include "generated/examples/conv_float_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/conv_float_large_weights_as_inputs.model.cpp"
+} // namespace conv_float_large_weights_as_inputs
+TEST_F(GeneratedTests, conv_float_large_weights_as_inputs) {
+    Execute(conv_float_large_weights_as_inputs::CreateModel,
+            conv_float_large_weights_as_inputs::is_ignored,
+            conv_float_large_weights_as_inputs::examples);
+}
+
 namespace conv_float {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_float test
@@ -281,6 +309,20 @@
             conv_float::examples);
 }
 
+namespace conv_float_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_float_weights_as_inputs test
+#include "generated/examples/conv_float_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/conv_float_weights_as_inputs.model.cpp"
+} // namespace conv_float_weights_as_inputs
+TEST_F(GeneratedTests, conv_float_weights_as_inputs) {
+    Execute(conv_float_weights_as_inputs::CreateModel,
+            conv_float_weights_as_inputs::is_ignored,
+            conv_float_weights_as_inputs::examples);
+}
+
 namespace conv_quant8_channels {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_quant8_channels test
@@ -295,6 +337,20 @@
             conv_quant8_channels::examples);
 }
 
+namespace conv_quant8_channels_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_channels_weights_as_inputs test
+#include "generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/conv_quant8_channels_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_channels_weights_as_inputs
+TEST_F(GeneratedTests, conv_quant8_channels_weights_as_inputs) {
+    Execute(conv_quant8_channels_weights_as_inputs::CreateModel,
+            conv_quant8_channels_weights_as_inputs::is_ignored,
+            conv_quant8_channels_weights_as_inputs::examples);
+}
+
 namespace conv_quant8_large {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_quant8_large test
@@ -309,6 +365,20 @@
             conv_quant8_large::examples);
 }
 
+namespace conv_quant8_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_large_weights_as_inputs test
+#include "generated/examples/conv_quant8_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/conv_quant8_large_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_large_weights_as_inputs
+TEST_F(GeneratedTests, conv_quant8_large_weights_as_inputs) {
+    Execute(conv_quant8_large_weights_as_inputs::CreateModel,
+            conv_quant8_large_weights_as_inputs::is_ignored,
+            conv_quant8_large_weights_as_inputs::examples);
+}
+
 namespace conv_quant8 {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_quant8 test
@@ -337,6 +407,34 @@
             conv_quant8_overflow::examples);
 }
 
+namespace conv_quant8_overflow_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_overflow_weights_as_inputs test
+#include "generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/conv_quant8_overflow_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_overflow_weights_as_inputs
+TEST_F(GeneratedTests, conv_quant8_overflow_weights_as_inputs) {
+    Execute(conv_quant8_overflow_weights_as_inputs::CreateModel,
+            conv_quant8_overflow_weights_as_inputs::is_ignored,
+            conv_quant8_overflow_weights_as_inputs::examples);
+}
+
+namespace conv_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_weights_as_inputs test
+#include "generated/examples/conv_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/conv_quant8_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_weights_as_inputs
+TEST_F(GeneratedTests, conv_quant8_weights_as_inputs) {
+    Execute(conv_quant8_weights_as_inputs::CreateModel,
+            conv_quant8_weights_as_inputs::is_ignored,
+            conv_quant8_weights_as_inputs::examples);
+}
+
 namespace depth_to_space_float_1 {
 std::vector<MixedTypedExample> examples = {
 // Generated depth_to_space_float_1 test
@@ -407,6 +505,20 @@
             depthwise_conv2d_float_large_2::examples);
 }
 
+namespace depthwise_conv2d_float_large_2_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_float_large_2_weights_as_inputs test
+#include "generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_float_large_2_weights_as_inputs
+TEST_F(GeneratedTests, depthwise_conv2d_float_large_2_weights_as_inputs) {
+    Execute(depthwise_conv2d_float_large_2_weights_as_inputs::CreateModel,
+            depthwise_conv2d_float_large_2_weights_as_inputs::is_ignored,
+            depthwise_conv2d_float_large_2_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_float_large {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_float_large test
@@ -421,6 +533,20 @@
             depthwise_conv2d_float_large::examples);
 }
 
+namespace depthwise_conv2d_float_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_float_large_weights_as_inputs test
+#include "generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_float_large_weights_as_inputs
+TEST_F(GeneratedTests, depthwise_conv2d_float_large_weights_as_inputs) {
+    Execute(depthwise_conv2d_float_large_weights_as_inputs::CreateModel,
+            depthwise_conv2d_float_large_weights_as_inputs::is_ignored,
+            depthwise_conv2d_float_large_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_float {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_float test
@@ -435,6 +561,20 @@
             depthwise_conv2d_float::examples);
 }
 
+namespace depthwise_conv2d_float_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_float_weights_as_inputs test
+#include "generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depthwise_conv2d_float_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_float_weights_as_inputs
+TEST_F(GeneratedTests, depthwise_conv2d_float_weights_as_inputs) {
+    Execute(depthwise_conv2d_float_weights_as_inputs::CreateModel,
+            depthwise_conv2d_float_weights_as_inputs::is_ignored,
+            depthwise_conv2d_float_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_quant8_large {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_quant8_large test
@@ -449,6 +589,20 @@
             depthwise_conv2d_quant8_large::examples);
 }
 
+namespace depthwise_conv2d_quant8_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_quant8_large_weights_as_inputs test
+#include "generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_quant8_large_weights_as_inputs
+TEST_F(GeneratedTests, depthwise_conv2d_quant8_large_weights_as_inputs) {
+    Execute(depthwise_conv2d_quant8_large_weights_as_inputs::CreateModel,
+            depthwise_conv2d_quant8_large_weights_as_inputs::is_ignored,
+            depthwise_conv2d_quant8_large_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_quant8 {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_quant8 test
@@ -463,6 +617,20 @@
             depthwise_conv2d_quant8::examples);
 }
 
+namespace depthwise_conv2d_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_quant8_weights_as_inputs test
+#include "generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_quant8_weights_as_inputs
+TEST_F(GeneratedTests, depthwise_conv2d_quant8_weights_as_inputs) {
+    Execute(depthwise_conv2d_quant8_weights_as_inputs::CreateModel,
+            depthwise_conv2d_quant8_weights_as_inputs::is_ignored,
+            depthwise_conv2d_quant8_weights_as_inputs::examples);
+}
+
 namespace dequantize {
 std::vector<MixedTypedExample> examples = {
 // Generated dequantize test
@@ -519,6 +687,20 @@
             fully_connected_float_large::examples);
 }
 
+namespace fully_connected_float_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_large_weights_as_inputs test
+#include "generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float_large_weights_as_inputs.model.cpp"
+} // namespace fully_connected_float_large_weights_as_inputs
+TEST_F(GeneratedTests, fully_connected_float_large_weights_as_inputs) {
+    Execute(fully_connected_float_large_weights_as_inputs::CreateModel,
+            fully_connected_float_large_weights_as_inputs::is_ignored,
+            fully_connected_float_large_weights_as_inputs::examples);
+}
+
 namespace fully_connected_float {
 std::vector<MixedTypedExample> examples = {
 // Generated fully_connected_float test
@@ -533,6 +715,20 @@
             fully_connected_float::examples);
 }
 
+namespace fully_connected_float_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_weights_as_inputs test
+#include "generated/examples/fully_connected_float_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float_weights_as_inputs.model.cpp"
+} // namespace fully_connected_float_weights_as_inputs
+TEST_F(GeneratedTests, fully_connected_float_weights_as_inputs) {
+    Execute(fully_connected_float_weights_as_inputs::CreateModel,
+            fully_connected_float_weights_as_inputs::is_ignored,
+            fully_connected_float_weights_as_inputs::examples);
+}
+
 namespace fully_connected_quant8_large {
 std::vector<MixedTypedExample> examples = {
 // Generated fully_connected_quant8_large test
@@ -547,6 +743,20 @@
             fully_connected_quant8_large::examples);
 }
 
+namespace fully_connected_quant8_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_quant8_large_weights_as_inputs test
+#include "generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_quant8_large_weights_as_inputs.model.cpp"
+} // namespace fully_connected_quant8_large_weights_as_inputs
+TEST_F(GeneratedTests, fully_connected_quant8_large_weights_as_inputs) {
+    Execute(fully_connected_quant8_large_weights_as_inputs::CreateModel,
+            fully_connected_quant8_large_weights_as_inputs::is_ignored,
+            fully_connected_quant8_large_weights_as_inputs::examples);
+}
+
 namespace fully_connected_quant8 {
 std::vector<MixedTypedExample> examples = {
 // Generated fully_connected_quant8 test
@@ -561,6 +771,20 @@
             fully_connected_quant8::examples);
 }
 
+namespace fully_connected_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_quant8_weights_as_inputs test
+#include "generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_quant8_weights_as_inputs.model.cpp"
+} // namespace fully_connected_quant8_weights_as_inputs
+TEST_F(GeneratedTests, fully_connected_quant8_weights_as_inputs) {
+    Execute(fully_connected_quant8_weights_as_inputs::CreateModel,
+            fully_connected_quant8_weights_as_inputs::is_ignored,
+            fully_connected_quant8_weights_as_inputs::examples);
+}
+
 namespace hashtable_lookup_float {
 std::vector<MixedTypedExample> examples = {
 // Generated hashtable_lookup_float test
@@ -757,6 +981,20 @@
             logistic_quant8_2::examples);
 }
 
+namespace lsh_projection_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated lsh_projection_2 test
+#include "generated/examples/lsh_projection_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/lsh_projection_2.model.cpp"
+} // namespace lsh_projection_2
+TEST_F(GeneratedTests, lsh_projection_2) {
+    Execute(lsh_projection_2::CreateModel,
+            lsh_projection_2::is_ignored,
+            lsh_projection_2::examples);
+}
+
 namespace lsh_projection {
 std::vector<MixedTypedExample> examples = {
 // Generated lsh_projection test
@@ -771,6 +1009,20 @@
             lsh_projection::examples);
 }
 
+namespace lsh_projection_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated lsh_projection_weights_as_inputs test
+#include "generated/examples/lsh_projection_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/lsh_projection_weights_as_inputs.model.cpp"
+} // namespace lsh_projection_weights_as_inputs
+TEST_F(GeneratedTests, lsh_projection_weights_as_inputs) {
+    Execute(lsh_projection_weights_as_inputs::CreateModel,
+            lsh_projection_weights_as_inputs::is_ignored,
+            lsh_projection_weights_as_inputs::examples);
+}
+
 namespace lstm2 {
 std::vector<MixedTypedExample> examples = {
 // Generated lstm2 test
@@ -1163,6 +1415,34 @@
             reshape_quant8::examples);
 }
 
+namespace reshape_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated reshape_quant8_weights_as_inputs test
+#include "generated/examples/reshape_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/reshape_quant8_weights_as_inputs.model.cpp"
+} // namespace reshape_quant8_weights_as_inputs
+TEST_F(GeneratedTests, reshape_quant8_weights_as_inputs) {
+    Execute(reshape_quant8_weights_as_inputs::CreateModel,
+            reshape_quant8_weights_as_inputs::is_ignored,
+            reshape_quant8_weights_as_inputs::examples);
+}
+
+namespace reshape_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated reshape_weights_as_inputs test
+#include "generated/examples/reshape_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/reshape_weights_as_inputs.model.cpp"
+} // namespace reshape_weights_as_inputs
+TEST_F(GeneratedTests, reshape_weights_as_inputs) {
+    Execute(reshape_weights_as_inputs::CreateModel,
+            reshape_weights_as_inputs::is_ignored,
+            reshape_weights_as_inputs::examples);
+}
+
 namespace resize_bilinear {
 std::vector<MixedTypedExample> examples = {
 // Generated resize_bilinear test
diff --git a/nn/runtime/test/generated/all_generated_vts_tests.cpp b/nn/runtime/test/generated/all_generated_vts_tests.cpp
index fe67060..01013f3 100644
--- a/nn/runtime/test/generated/all_generated_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_vts_tests.cpp
@@ -271,6 +271,21 @@
                              conv_float_channels::examples);
 }
 
+namespace conv_float_channels_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_float_channels_weights_as_inputs test
+#include "examples/conv_float_channels_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/conv_float_channels_weights_as_inputs.model.cpp"
+} // namespace conv_float_channels_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, conv_float_channels_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             conv_float_channels_weights_as_inputs::createTestModel,
+                             conv_float_channels_weights_as_inputs::is_ignored,
+                             conv_float_channels_weights_as_inputs::examples);
+}
+
 namespace conv_float_large {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_float_large test
@@ -286,6 +301,21 @@
                              conv_float_large::examples);
 }
 
+namespace conv_float_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_float_large_weights_as_inputs test
+#include "examples/conv_float_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/conv_float_large_weights_as_inputs.model.cpp"
+} // namespace conv_float_large_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, conv_float_large_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             conv_float_large_weights_as_inputs::createTestModel,
+                             conv_float_large_weights_as_inputs::is_ignored,
+                             conv_float_large_weights_as_inputs::examples);
+}
+
 namespace conv_float {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_float test
@@ -301,6 +331,21 @@
                              conv_float::examples);
 }
 
+namespace conv_float_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_float_weights_as_inputs test
+#include "examples/conv_float_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/conv_float_weights_as_inputs.model.cpp"
+} // namespace conv_float_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, conv_float_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             conv_float_weights_as_inputs::createTestModel,
+                             conv_float_weights_as_inputs::is_ignored,
+                             conv_float_weights_as_inputs::examples);
+}
+
 namespace conv_quant8_channels {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_quant8_channels test
@@ -316,6 +361,21 @@
                              conv_quant8_channels::examples);
 }
 
+namespace conv_quant8_channels_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_channels_weights_as_inputs test
+#include "examples/conv_quant8_channels_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/conv_quant8_channels_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_channels_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, conv_quant8_channels_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             conv_quant8_channels_weights_as_inputs::createTestModel,
+                             conv_quant8_channels_weights_as_inputs::is_ignored,
+                             conv_quant8_channels_weights_as_inputs::examples);
+}
+
 namespace conv_quant8_large {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_quant8_large test
@@ -331,6 +391,21 @@
                              conv_quant8_large::examples);
 }
 
+namespace conv_quant8_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_large_weights_as_inputs test
+#include "examples/conv_quant8_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/conv_quant8_large_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_large_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, conv_quant8_large_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             conv_quant8_large_weights_as_inputs::createTestModel,
+                             conv_quant8_large_weights_as_inputs::is_ignored,
+                             conv_quant8_large_weights_as_inputs::examples);
+}
+
 namespace conv_quant8 {
 std::vector<MixedTypedExample> examples = {
 // Generated conv_quant8 test
@@ -361,6 +436,36 @@
                              conv_quant8_overflow::examples);
 }
 
+namespace conv_quant8_overflow_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_overflow_weights_as_inputs test
+#include "examples/conv_quant8_overflow_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/conv_quant8_overflow_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_overflow_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, conv_quant8_overflow_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             conv_quant8_overflow_weights_as_inputs::createTestModel,
+                             conv_quant8_overflow_weights_as_inputs::is_ignored,
+                             conv_quant8_overflow_weights_as_inputs::examples);
+}
+
+namespace conv_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated conv_quant8_weights_as_inputs test
+#include "examples/conv_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/conv_quant8_weights_as_inputs.model.cpp"
+} // namespace conv_quant8_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, conv_quant8_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             conv_quant8_weights_as_inputs::createTestModel,
+                             conv_quant8_weights_as_inputs::is_ignored,
+                             conv_quant8_weights_as_inputs::examples);
+}
+
 namespace depth_to_space_float_1 {
 std::vector<MixedTypedExample> examples = {
 // Generated depth_to_space_float_1 test
@@ -436,6 +541,21 @@
                              depthwise_conv2d_float_large_2::examples);
 }
 
+namespace depthwise_conv2d_float_large_2_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_float_large_2_weights_as_inputs test
+#include "examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_float_large_2_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large_2_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             depthwise_conv2d_float_large_2_weights_as_inputs::createTestModel,
+                             depthwise_conv2d_float_large_2_weights_as_inputs::is_ignored,
+                             depthwise_conv2d_float_large_2_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_float_large {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_float_large test
@@ -451,6 +571,21 @@
                              depthwise_conv2d_float_large::examples);
 }
 
+namespace depthwise_conv2d_float_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_float_large_weights_as_inputs test
+#include "examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_float_large_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             depthwise_conv2d_float_large_weights_as_inputs::createTestModel,
+                             depthwise_conv2d_float_large_weights_as_inputs::is_ignored,
+                             depthwise_conv2d_float_large_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_float {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_float test
@@ -466,6 +601,21 @@
                              depthwise_conv2d_float::examples);
 }
 
+namespace depthwise_conv2d_float_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_float_weights_as_inputs test
+#include "examples/depthwise_conv2d_float_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/depthwise_conv2d_float_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_float_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             depthwise_conv2d_float_weights_as_inputs::createTestModel,
+                             depthwise_conv2d_float_weights_as_inputs::is_ignored,
+                             depthwise_conv2d_float_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_quant8_large {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_quant8_large test
@@ -481,6 +631,21 @@
                              depthwise_conv2d_quant8_large::examples);
 }
 
+namespace depthwise_conv2d_quant8_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_quant8_large_weights_as_inputs test
+#include "examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_quant8_large_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8_large_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             depthwise_conv2d_quant8_large_weights_as_inputs::createTestModel,
+                             depthwise_conv2d_quant8_large_weights_as_inputs::is_ignored,
+                             depthwise_conv2d_quant8_large_weights_as_inputs::examples);
+}
+
 namespace depthwise_conv2d_quant8 {
 std::vector<MixedTypedExample> examples = {
 // Generated depthwise_conv2d_quant8 test
@@ -496,6 +661,21 @@
                              depthwise_conv2d_quant8::examples);
 }
 
+namespace depthwise_conv2d_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated depthwise_conv2d_quant8_weights_as_inputs test
+#include "examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp"
+} // namespace depthwise_conv2d_quant8_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             depthwise_conv2d_quant8_weights_as_inputs::createTestModel,
+                             depthwise_conv2d_quant8_weights_as_inputs::is_ignored,
+                             depthwise_conv2d_quant8_weights_as_inputs::examples);
+}
+
 namespace dequantize {
 std::vector<MixedTypedExample> examples = {
 // Generated dequantize test
@@ -556,6 +736,21 @@
                              fully_connected_float_large::examples);
 }
 
+namespace fully_connected_float_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_large_weights_as_inputs test
+#include "examples/fully_connected_float_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/fully_connected_float_large_weights_as_inputs.model.cpp"
+} // namespace fully_connected_float_large_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, fully_connected_float_large_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             fully_connected_float_large_weights_as_inputs::createTestModel,
+                             fully_connected_float_large_weights_as_inputs::is_ignored,
+                             fully_connected_float_large_weights_as_inputs::examples);
+}
+
 namespace fully_connected_float {
 std::vector<MixedTypedExample> examples = {
 // Generated fully_connected_float test
@@ -571,6 +766,21 @@
                              fully_connected_float::examples);
 }
 
+namespace fully_connected_float_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_weights_as_inputs test
+#include "examples/fully_connected_float_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/fully_connected_float_weights_as_inputs.model.cpp"
+} // namespace fully_connected_float_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, fully_connected_float_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             fully_connected_float_weights_as_inputs::createTestModel,
+                             fully_connected_float_weights_as_inputs::is_ignored,
+                             fully_connected_float_weights_as_inputs::examples);
+}
+
 namespace fully_connected_quant8_large {
 std::vector<MixedTypedExample> examples = {
 // Generated fully_connected_quant8_large test
@@ -586,6 +796,21 @@
                              fully_connected_quant8_large::examples);
 }
 
+namespace fully_connected_quant8_large_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_quant8_large_weights_as_inputs test
+#include "examples/fully_connected_quant8_large_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/fully_connected_quant8_large_weights_as_inputs.model.cpp"
+} // namespace fully_connected_quant8_large_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, fully_connected_quant8_large_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             fully_connected_quant8_large_weights_as_inputs::createTestModel,
+                             fully_connected_quant8_large_weights_as_inputs::is_ignored,
+                             fully_connected_quant8_large_weights_as_inputs::examples);
+}
+
 namespace fully_connected_quant8 {
 std::vector<MixedTypedExample> examples = {
 // Generated fully_connected_quant8 test
@@ -601,6 +826,21 @@
                              fully_connected_quant8::examples);
 }
 
+namespace fully_connected_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_quant8_weights_as_inputs test
+#include "examples/fully_connected_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/fully_connected_quant8_weights_as_inputs.model.cpp"
+} // namespace fully_connected_quant8_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, fully_connected_quant8_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             fully_connected_quant8_weights_as_inputs::createTestModel,
+                             fully_connected_quant8_weights_as_inputs::is_ignored,
+                             fully_connected_quant8_weights_as_inputs::examples);
+}
+
 namespace hashtable_lookup_float {
 std::vector<MixedTypedExample> examples = {
 // Generated hashtable_lookup_float test
@@ -811,6 +1051,21 @@
                              logistic_quant8_2::examples);
 }
 
+namespace lsh_projection_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated lsh_projection_2 test
+#include "examples/lsh_projection_2.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/lsh_projection_2.model.cpp"
+} // namespace lsh_projection_2
+TEST_F(NeuralnetworksHidlTest, lsh_projection_2) {
+    generated_tests::Execute(device,
+                             lsh_projection_2::createTestModel,
+                             lsh_projection_2::is_ignored,
+                             lsh_projection_2::examples);
+}
+
 namespace lsh_projection {
 std::vector<MixedTypedExample> examples = {
 // Generated lsh_projection test
@@ -826,6 +1081,21 @@
                              lsh_projection::examples);
 }
 
+namespace lsh_projection_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated lsh_projection_weights_as_inputs test
+#include "examples/lsh_projection_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/lsh_projection_weights_as_inputs.model.cpp"
+} // namespace lsh_projection_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, lsh_projection_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             lsh_projection_weights_as_inputs::createTestModel,
+                             lsh_projection_weights_as_inputs::is_ignored,
+                             lsh_projection_weights_as_inputs::examples);
+}
+
 namespace lstm2 {
 std::vector<MixedTypedExample> examples = {
 // Generated lstm2 test
@@ -1231,6 +1501,36 @@
                              reshape_quant8::examples);
 }
 
+namespace reshape_quant8_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated reshape_quant8_weights_as_inputs test
+#include "examples/reshape_quant8_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/reshape_quant8_weights_as_inputs.model.cpp"
+} // namespace reshape_quant8_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, reshape_quant8_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             reshape_quant8_weights_as_inputs::createTestModel,
+                             reshape_quant8_weights_as_inputs::is_ignored,
+                             reshape_quant8_weights_as_inputs::examples);
+}
+
+namespace reshape_weights_as_inputs {
+std::vector<MixedTypedExample> examples = {
+// Generated reshape_weights_as_inputs test
+#include "examples/reshape_weights_as_inputs.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/reshape_weights_as_inputs.model.cpp"
+} // namespace reshape_weights_as_inputs
+TEST_F(NeuralnetworksHidlTest, reshape_weights_as_inputs) {
+    generated_tests::Execute(device,
+                             reshape_weights_as_inputs::createTestModel,
+                             reshape_weights_as_inputs::is_ignored,
+                             reshape_weights_as_inputs::examples);
+}
+
 namespace resize_bilinear {
 std::vector<MixedTypedExample> examples = {
 // Generated resize_bilinear test
diff --git a/nn/runtime/test/generated/examples/conv_float.example.cpp b/nn/runtime/test/generated/examples/conv_float.example.cpp
index 0f37863..f316049 100644
--- a/nn/runtime/test/generated/examples/conv_float.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {1.0f, 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f}}, {1, {0.25f, 0.25f, 0.25f, 0.25f}}, {2, {0}}},
+  {{0, {1.0f, 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/conv_float_channels.example.cpp b/nn/runtime/test/generated/examples/conv_float_channels.example.cpp
index c4f6ad8..f7c110f 100644
--- a/nn/runtime/test/generated/examples/conv_float_channels.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_channels.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {99.0f, 99.0f, 99.0f}}, {1, {1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f}}, {2, {0.0f, 0.0f, 0.0f}}},
+  {{0, {99.0f, 99.0f, 99.0f}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..44b4453
--- /dev/null
+++ b/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: conv_float_channels_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {99.0f, 99.0f, 99.0f}}, {1, {1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f}}, {2, {0.0f, 0.0f, 0.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {297.0f, 594.0f, 891.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/conv_float_large.example.cpp b/nn/runtime/test/generated/examples/conv_float_large.example.cpp
index 2f4002f..9650e54 100644
--- a/nn/runtime/test/generated/examples/conv_float_large.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_large.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f}}, {1, {1.0f, 4.0f, 7.0f, 2.0f, 5.0f, 8.0f, 3.0f, 6.0f, 9.0f}}, {2, {0.0f, 0.0f, 0.0f}}},
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..94441a3
--- /dev/null
+++ b/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: conv_float_large_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f}}, {1, {1.0f, 4.0f, 7.0f, 2.0f, 5.0f, 8.0f, 3.0f, 6.0f, 9.0f}}, {2, {0.0f, 0.0f, 0.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {30.0f, 36.0f, 42.0f, 66.0f, 81.0f, 96.0f, 102.0f, 126.0f, 150.0f, 138.0f, 171.0f, 204.0f, 174.0f, 216.0f, 258.0f, 210.0f, 261.0f, 312.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/conv_float_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_float_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..55ec464
--- /dev/null
+++ b/nn/runtime/test/generated/examples/conv_float_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: conv_float_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f}}, {1, {0.25f, 0.25f, 0.25f, 0.25f}}, {2, {0}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.875f, 0.875f, 0.875f, 0.875f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/conv_quant8.example.cpp b/nn/runtime/test/generated/examples/conv_quant8.example.cpp
index be6750c..34ab5c1 100644
--- a/nn/runtime/test/generated/examples/conv_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {4}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {8, 8, 8, 8, 4, 8, 8, 8, 8}}, {1, {2, 2, 2, 2}}}
+  {{0, {8, 8, 8, 8, 4, 8, 8, 8, 8}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp
index f19715e..d657972 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {0, 0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {10, 10, 10}}, {1, {1, 2, 3, 4, 5, 6, 7, 8, 9}}}
+  {{0, {10, 10, 10}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..bea2210
--- /dev/null
+++ b/nn/runtime/test/generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: conv_quant8_channels_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {0, 0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {10, 10, 10}}, {1, {1, 2, 3, 4, 5, 6, 7, 8, 9}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {15, 38, 60}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp
index 3d23e34..cd778a5 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {0, 0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}, {1, {1, 4, 7, 2, 5, 8, 3, 6, 9}}}
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/conv_quant8_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_large_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..12c0ef4
--- /dev/null
+++ b/nn/runtime/test/generated/examples/conv_quant8_large_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: conv_quant8_large_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {0, 0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}, {1, {1, 4, 7, 2, 5, 8, 3, 6, 9}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {8, 9, 11, 17, 21, 24, 26, 32, 38, 35, 43, 51, 44, 54, 65, 53, 66, 78}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp
index d0fadf4..a9aa2b2 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {0, 0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}, {1, {10, 40, 70, 20, 50, 80, 30, 60, 90}}}
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..78c7308
--- /dev/null
+++ b/nn/runtime/test/generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: conv_quant8_overflow_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {0, 0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}, {1, {10, 40, 70, 20, 50, 80, 30, 60, 90}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {75, 90, 105, 165, 203, 240, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/conv_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..b7de7fa
--- /dev/null
+++ b/nn/runtime/test/generated/examples/conv_quant8_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: conv_quant8_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {4}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {8, 8, 8, 8, 4, 8, 8, 8, 8}}, {1, {2, 2, 2, 2}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {15, 15, 15, 15}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp
index 1c20efd..be9dc51 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {10, 21, 10, 22, 10, 23, 10, 24}}, {1, {0.25f, 0, 0.25f, 1, 0.25f, 0, 0.25f, 1}}, {2, {0, 4}}},
+  {{0, {10, 21, 10, 22, 10, 23, 10, 24, 10, 25, 10, 26, 10, 27, 10, 28, 10, 29}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
@@ -13,7 +13,7 @@
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {10, 50}}},
+  {{0, {11, 3, 7.2f, 10.6f, 11, 3, 7.4f, 10.9f, 11, 3, 7.8f, 11.5f, 11, 3, 8.0f, 11.8f}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp
index 14dff4d..0746826 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {10, 21, 100, 10, 22, 200, 10, 23, 300, 10, 24, 400}}, {1, {0.25f, 0, 0.25f, 1, 0.25f, 0, 0.25f, 1}}, {2, {100, 200}}},
+  {{0, {10, 21, 10, 22, 10, 23, 10, 24}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp
index 8add6ed..5716a17 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {10, 21, 100, 10, 22, 200, 10, 23, 300, 10, 24, 400}}, {1, {0.25f, 0, 10, 100, 0.25f, 1, 20, 100, 0.25f, 0, 30, 100, 0.25f, 1, 40, 100}}, {2, {600000, 700000, 800000, 900000}}},
+  {{0, {10, 21, 100, 0, 10, 22, 200, 0, 10, 23, 300, 0, 10, 24, 400, 0}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..9b097ac
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depthwise_conv2d_float_large_2_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {10, 21, 100, 10, 22, 200, 10, 23, 300, 10, 24, 400}}, {1, {0.25f, 0, 10, 100, 0.25f, 1, 20, 100, 0.25f, 0, 30, 100, 0.25f, 1, 40, 100}}, {2, {600000, 700000, 800000, 900000}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {600010, 700046, 830000, 900000}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..10873a7
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depthwise_conv2d_float_large_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {10, 21, 100, 10, 22, 200, 10, 23, 300, 10, 24, 400}}, {1, {0.25f, 0, 0.25f, 1, 0.25f, 0, 0.25f, 1}}, {2, {100, 200}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {110, 246}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..6731073
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depthwise_conv2d_float_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {10, 21, 10, 22, 10, 23, 10, 24, 10, 25, 10, 26, 10, 27, 10, 28, 10, 29}}, {1, {0.25f, 0, 0.2f, 0, 0.25f, 0, 0, 0.3f, 0.25f, 0, 0, 0, 0.25f, 0.1f, 0, 0}}, {2, {1, 2, 3, 4}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {11, 3, 7.2f, 10.6f, 11, 3, 7.4f, 10.9f, 11, 3, 7.8f, 11.5f, 11, 3, 8.0f, 11.8f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp
index 658a8b5..1079fda 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {4, 16, 4, 32, 4, 64, 4, 128}}, {1, {2, 4, 2, 0, 2, 2, 2, 0}}}
+  {{0, {4, 16, 4, 32, 4, 64, 4, 128}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp
index 328c994..d8bab86 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {4, 16, 4, 32, 4, 64, 4, 128}}, {1, {2, 4, 2, 0, 2, 2, 2, 0}}}
+  {{0, {4, 16, 4, 32, 4, 64, 4, 128}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..4b3905c
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depthwise_conv2d_quant8_large_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {4, 16, 4, 32, 4, 64, 4, 128}}, {1, {2, 4, 2, 0, 2, 2, 2, 0}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {8, 48}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..b8094ca
--- /dev/null
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: depthwise_conv2d_quant8_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {4, 16, 4, 32, 4, 64, 4, 128}}, {1, {2, 4, 2, 0, 2, 2, 2, 0}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {8, 48}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/fully_connected_float.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
index c1167c2..4af8806 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {2, 32, 16}}, {1, {2}}, {2, {4}}},
+  {{0, {2, 32, 16}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp
index 1cd4afd..3ad2caa 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {1, 10, 100, 1000, 10000}}, {1, {2, 3, 4, 5, 6}}, {2, {900000}}},
+  {{0, {1, 10, 100, 1000, 10000}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..fdc7db1
--- /dev/null
+++ b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_float_large_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 10, 100, 1000, 10000}}, {1, {2, 3, 4, 5, 6}}, {2, {900000}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {965432}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..83c5527
--- /dev/null
+++ b/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_float_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2, 32, 16}}, {1, {2}}, {2, {4}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8, 68, 36}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
index 86d493c..18d2098 100644
--- a/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {4}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {2, 32, 16}}, {1, {2}}}
+  {{0, {2, 32, 16}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp
index 9df3955..c9a5aa1 100644
--- a/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp
@@ -6,9 +6,9 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{2, {10}}},
+  {},
   // int -> QUANT8_ASYMM map
-  {{0, {10, 10, 10, 10, 10}}, {1, {10, 20, 20, 20, 10}}}
+  {{0, {10, 10, 10, 10, 10}}}
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..14bd00d
--- /dev/null
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_quant8_large_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {10}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {10, 10, 10, 10, 10}}, {1, {10, 20, 20, 20, 10}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {32}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..3aca9fd
--- /dev/null
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_quant8_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{2, {4}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {2, 32, 16}}, {1, {2}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {2, 17, 9}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/lsh_projection.example.cpp b/nn/runtime/test/generated/examples/lsh_projection.example.cpp
index 90b6a85..4996fff 100644
--- a/nn/runtime/test/generated/examples/lsh_projection.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection.example.cpp
@@ -4,9 +4,9 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f}}, {2, {0.12f, 0.34f, 0.56f}}},
+  {{1, {0.12f, 0.34f, 0.56f}}},
   // int -> INT32 map
-  {{1, {12345, 54321, 67890, 9876, -12345678, -87654321}}, {3, {2}}},
+  {{0, {12345, 54321, 67890, 9876, -12345678, -87654321}}},
   // int -> QUANT8_ASYMM map
   {}
 },
@@ -20,24 +20,3 @@
   {}
 }
 }, // End of an example
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f}}, {2, {}}},
-  // int -> INT32 map
-  {{1, {12345, 54321, 67890, 9876, -12345678, -87654321}}, {3, {1}}},
-  // int -> QUANT8_ASYMM map
-  {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 2, 2, 0}}},
-  // int -> QUANT8_ASYMM map
-  {}
-}
-}, // End of an example
diff --git a/nn/runtime/test/generated/examples/lsh_projection_2.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_2.example.cpp
new file mode 100644
index 0000000..aa9de45
--- /dev/null
+++ b/nn/runtime/test/generated/examples/lsh_projection_2.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: lsh_projection_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{1, {}}},
+  // int -> INT32 map
+  {{0, {12345, 54321, 67890, 9876, -12345678, -87654321}}},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 2, 2, 0}}},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..cc9bd06
--- /dev/null
+++ b/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: lsh_projection_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f}}, {2, {0.12f, 0.34f, 0.56f}}},
+  // int -> INT32 map
+  {{1, {12345, 54321, 67890, 9876, -12345678, -87654321}}, {3, {2}}},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 0, 1, 1, 1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/reshape.example.cpp b/nn/runtime/test/generated/examples/reshape.example.cpp
index 400d1eb..054b2f4 100644
--- a/nn/runtime/test/generated/examples/reshape.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape.example.cpp
@@ -6,7 +6,7 @@
   // int -> FLOAT32 map
   {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9}}},
   // int -> INT32 map
-  {{1, {-1}}},
+  {},
   // int -> QUANT8_ASYMM map
   {}
 },
diff --git a/nn/runtime/test/generated/examples/reshape_quant8.example.cpp b/nn/runtime/test/generated/examples/reshape_quant8.example.cpp
index 5942706..e959af7 100644
--- a/nn/runtime/test/generated/examples/reshape_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape_quant8.example.cpp
@@ -6,7 +6,7 @@
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{1, {-1}}},
+  {},
   // int -> QUANT8_ASYMM map
   {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9}}}
 },
diff --git a/nn/runtime/test/generated/examples/reshape_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/reshape_quant8_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..40260cf
--- /dev/null
+++ b/nn/runtime/test/generated/examples/reshape_quant8_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: reshape_quant8_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {-1}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9}}}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/examples/reshape_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/reshape_weights_as_inputs.example.cpp
new file mode 100644
index 0000000..d97b03e
--- /dev/null
+++ b/nn/runtime/test/generated/examples/reshape_weights_as_inputs.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: reshape_weights_as_inputs.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9}}},
+  // int -> INT32 map
+  {{1, {-1}}},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/models/conv_float.model.cpp b/nn/runtime/test/generated/models/conv_float.model.cpp
index b179b3b..0b97e31 100644
--- a/nn/runtime/test/generated/models/conv_float.model.cpp
+++ b/nn/runtime/test/generated/models/conv_float.model.cpp
@@ -13,6 +13,10 @@
   auto stride = model->addOperand(&type3);
   auto op4 = model->addOperand(&type1);
   // Phase 2, operations
+  static float op2_init[] = {0.25f, 0.25f, 0.25f, 0.25f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 4);
+  static float op3_init[] = {0.0f};
+  model->setOperandValue(op3, op3_init, sizeof(float) * 1);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -22,7 +26,7 @@
   model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/conv_float_channels.model.cpp b/nn/runtime/test/generated/models/conv_float_channels.model.cpp
index bbcfd29..05321fb 100644
--- a/nn/runtime/test/generated/models/conv_float_channels.model.cpp
+++ b/nn/runtime/test/generated/models/conv_float_channels.model.cpp
@@ -13,6 +13,10 @@
   auto stride = model->addOperand(&type3);
   auto op4 = model->addOperand(&type0);
   // Phase 2, operations
+  static float op2_init[] = {1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 9);
+  static float op3_init[] = {0.0f, 0.0f, 0.0f};
+  model->setOperandValue(op3, op3_init, sizeof(float) * 3);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -22,7 +26,7 @@
   model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/conv_float_channels_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_float_channels_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..b3e7da0
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_float_channels_weights_as_inputs.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: conv_float_channels_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 3});
+  OperandType type1(Type::TENSOR_FLOAT32, {3, 1, 1, 3});
+  OperandType type2(Type::TENSOR_FLOAT32, {3});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_float_large.model.cpp b/nn/runtime/test/generated/models/conv_float_large.model.cpp
index c629c06..165b3bb 100644
--- a/nn/runtime/test/generated/models/conv_float_large.model.cpp
+++ b/nn/runtime/test/generated/models/conv_float_large.model.cpp
@@ -1,7 +1,6 @@
 // Generated file (from: conv_float_large.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type3(Type::INT32, {});
-  OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 1, 3});
   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
   OperandType type1(Type::TENSOR_FLOAT32, {3, 1, 1, 3});
   OperandType type2(Type::TENSOR_FLOAT32, {3});
@@ -12,8 +11,12 @@
   auto pad0 = model->addOperand(&type3);
   auto act = model->addOperand(&type3);
   auto stride = model->addOperand(&type3);
-  auto op4 = model->addOperand(&type4);
+  auto op4 = model->addOperand(&type0);
   // Phase 2, operations
+  static float op2_init[] = {1.0f, 4.0f, 7.0f, 2.0f, 5.0f, 8.0f, 3.0f, 6.0f, 9.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 9);
+  static float op3_init[] = {0.0f, 0.0f, 0.0f};
+  model->setOperandValue(op3, op3_init, sizeof(float) * 3);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -23,7 +26,7 @@
   model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/conv_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d3a2b44
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: conv_float_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
+  OperandType type1(Type::TENSOR_FLOAT32, {3, 1, 1, 3});
+  OperandType type2(Type::TENSOR_FLOAT32, {3});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..f4a1e2d
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_float_weights_as_inputs.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: conv_float_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8.model.cpp b/nn/runtime/test/generated/models/conv_quant8.model.cpp
index 46a62a3..81e31be 100644
--- a/nn/runtime/test/generated/models/conv_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8.model.cpp
@@ -14,6 +14,10 @@
   auto stride = model->addOperand(&type3);
   auto op4 = model->addOperand(&type4);
   // Phase 2, operations
+  static uint8_t op2_init[] = {2, 2, 2, 2};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 4);
+  static int32_t op3_init[] = {4};
+  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
   model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp b/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp
index 04b9bed..cbd3927 100644
--- a/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp
@@ -14,6 +14,10 @@
   auto stride = model->addOperand(&type3);
   auto op4 = model->addOperand(&type4);
   // Phase 2, operations
+  static uint8_t op2_init[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 9);
+  static int32_t op3_init[] = {0, 0, 0};
+  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 3);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
   model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/conv_quant8_channels_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_channels_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..56f5fbd
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_channels_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_channels_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {3}, 0.25, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 0.5f, 0);
+  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 1.0, 0);
+  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8_large.model.cpp b/nn/runtime/test/generated/models/conv_quant8_large.model.cpp
index 7bd13d6..d1a97ca 100644
--- a/nn/runtime/test/generated/models/conv_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8_large.model.cpp
@@ -14,6 +14,10 @@
   auto stride = model->addOperand(&type3);
   auto op4 = model->addOperand(&type4);
   // Phase 2, operations
+  static uint8_t op2_init[] = {1, 4, 7, 2, 5, 8, 3, 6, 9};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 9);
+  static int32_t op3_init[] = {0, 0, 0};
+  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 3);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
   model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/conv_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..e98c4ca
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {3}, 0.25, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0);
+  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0);
+  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp b/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp
index 960233a..dca8a0e 100644
--- a/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp
@@ -14,6 +14,10 @@
   auto stride = model->addOperand(&type3);
   auto op4 = model->addOperand(&type4);
   // Phase 2, operations
+  static uint8_t op2_init[] = {10, 40, 70, 20, 50, 80, 30, 60, 90};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 9);
+  static int32_t op3_init[] = {0, 0, 0};
+  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 3);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
   model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/conv_quant8_overflow_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_overflow_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..8a3155f
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_overflow_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_overflow_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {3}, 0.25, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0);
+  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0);
+  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..76d13a6
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {1}, 0.25f, 0);
+  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.5f, 0);
+  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.f, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 1}, 0.5f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp b/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
index 791cb75..4d8aae8 100644
--- a/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
+++ b/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
@@ -2,7 +2,7 @@
 void CreateModel(Model *model) {
   OperandType type1(Type::INT32, {});
   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.5f, 0);
-  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
   // Phase 1, operands
   auto input = model->addOperand(&type0);
   auto radius = model->addOperand(&type1);
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp
index bfcabcd..ace293d 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp
@@ -1,30 +1,35 @@
 // Generated file (from: depthwise_conv2d_float.mod.py). Do not edit
 void CreateModel(Model *model) {
-  OperandType type2(Type::INT32, {});
-  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type1(Type::TENSOR_FLOAT32, {2});
+  OperandType type3(Type::INT32, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {4});
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type0);
-  auto op3 = model->addOperand(&type1);
-  auto pad0 = model->addOperand(&type2);
-  auto act = model->addOperand(&type2);
-  auto stride = model->addOperand(&type2);
-  auto channelMultiplier = model->addOperand(&type2);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto channelMultiplier = model->addOperand(&type3);
   auto op4 = model->addOperand(&type1);
   // Phase 2, operations
+  static float op2_init[] = {0.25f, 0.0f, 0.2f, 0.0f, 0.25f, 0.0f, 0.0f, 0.3f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.1f, 0.0f, 0.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 16);
+  static float op3_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op3, op3_init, sizeof(float) * 4);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
   static int32_t stride_init[] = {1};
   model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
-  static int32_t channelMultiplier_init[] = {1};
+  static int32_t channelMultiplier_init[] = {2};
   model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp
index 9a9852a..98f40e3 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp
@@ -1,19 +1,23 @@
 // Generated file (from: depthwise_conv2d_float_large.mod.py). Do not edit
 void CreateModel(Model *model) {
-  OperandType type3(Type::INT32, {});
-  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
-  OperandType type2(Type::TENSOR_FLOAT32, {2});
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {2});
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type2);
-  auto pad0 = model->addOperand(&type3);
-  auto act = model->addOperand(&type3);
-  auto stride = model->addOperand(&type3);
-  auto channelMultiplier = model->addOperand(&type3);
-  auto op4 = model->addOperand(&type2);
+  auto op2 = model->addOperand(&type0);
+  auto op3 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type2);
+  auto act = model->addOperand(&type2);
+  auto stride = model->addOperand(&type2);
+  auto channelMultiplier = model->addOperand(&type2);
+  auto op4 = model->addOperand(&type3);
   // Phase 2, operations
+  static float op2_init[] = {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 8);
+  static float op3_init[] = {100.0f, 200.0f};
+  model->setOperandValue(op3, op3_init, sizeof(float) * 2);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
   model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp
index 740f500..521eb3f 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp
@@ -1,19 +1,23 @@
 // Generated file (from: depthwise_conv2d_float_large_2.mod.py). Do not edit
 void CreateModel(Model *model) {
-  OperandType type3(Type::INT32, {});
-  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
-  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
-  OperandType type2(Type::TENSOR_FLOAT32, {4});
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 4});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+  OperandType type1(Type::TENSOR_FLOAT32, {4});
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type2);
-  auto pad0 = model->addOperand(&type3);
-  auto act = model->addOperand(&type3);
-  auto stride = model->addOperand(&type3);
-  auto channelMultiplier = model->addOperand(&type3);
-  auto op4 = model->addOperand(&type2);
+  auto op2 = model->addOperand(&type0);
+  auto op3 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type2);
+  auto act = model->addOperand(&type2);
+  auto stride = model->addOperand(&type2);
+  auto channelMultiplier = model->addOperand(&type2);
+  auto op4 = model->addOperand(&type3);
   // Phase 2, operations
+  static float op2_init[] = {0.25f, 0.0f, 10.0f, 100.0f, 0.25f, 1.0f, 20.0f, 100.0f, 0.25f, 0.0f, 30.0f, 100.0f, 0.25f, 1.0f, 40.0f, 100.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 16);
+  static float op3_init[] = {600000.0f, 700000.0f, 800000.0f, 900000.0f};
+  model->setOperandValue(op3, op3_init, sizeof(float) * 4);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
   model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..19de705
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp
@@ -0,0 +1,37 @@
+// Generated file (from: depthwise_conv2d_float_large_2_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 1, 4});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+  OperandType type2(Type::TENSOR_FLOAT32, {4});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto channelMultiplier = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  static int32_t channelMultiplier_init[] = {1};
+  model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..80bf5b1
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,37 @@
+// Generated file (from: depthwise_conv2d_float_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
+  OperandType type2(Type::TENSOR_FLOAT32, {2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto channelMultiplier = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  static int32_t channelMultiplier_init[] = {1};
+  model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d45063c
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_weights_as_inputs.model.cpp
@@ -0,0 +1,36 @@
+// Generated file (from: depthwise_conv2d_float_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {4});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto pad0 = model->addOperand(&type3);
+  auto act = model->addOperand(&type3);
+  auto stride = model->addOperand(&type3);
+  auto channelMultiplier = model->addOperand(&type3);
+  auto op4 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  static int32_t channelMultiplier_init[] = {2};
+  model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp
index 7f2b4fc..5a6932f 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp
@@ -3,7 +3,7 @@
   OperandType type2(Type::INT32, {});
   OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
-  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.f, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1,1,1,2}, 1.f, 0);
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
   auto op2 = model->addOperand(&type0);
@@ -14,6 +14,10 @@
   auto channelMultiplier = model->addOperand(&type2);
   auto op4 = model->addOperand(&type3);
   // Phase 2, operations
+  static uint8_t op2_init[] = {2, 4, 2, 0, 2, 2, 2, 0};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 8);
+  static int32_t op3_init[] = {0, 0};
+  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 2);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
   model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp
index 4060e4c..dbabdf5 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp
@@ -2,8 +2,8 @@
 void CreateModel(Model *model) {
   OperandType type2(Type::INT32, {});
   OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 1.f, 0);
   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
-  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.f, 0);
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
   auto op2 = model->addOperand(&type0);
@@ -14,6 +14,10 @@
   auto channelMultiplier = model->addOperand(&type2);
   auto op4 = model->addOperand(&type3);
   // Phase 2, operations
+  static uint8_t op2_init[] = {2, 4, 2, 0, 2, 2, 2, 0};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 8);
+  static int32_t op3_init[] = {0, 0};
+  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 2);
   static int32_t pad0_init[] = {0};
   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
   model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, op3},
+    {op1},
     {op4});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..b965949
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,36 @@
+// Generated file (from: depthwise_conv2d_quant8_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 1.f, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto op3 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type2);
+  auto act = model->addOperand(&type2);
+  auto stride = model->addOperand(&type2);
+  auto channelMultiplier = model->addOperand(&type2);
+  auto op4 = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  static int32_t channelMultiplier_init[] = {1};
+  model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..959fed1
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,36 @@
+// Generated file (from: depthwise_conv2d_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1,1,1,2}, 1.f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto op3 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type2);
+  auto act = model->addOperand(&type2);
+  auto stride = model->addOperand(&type2);
+  auto channelMultiplier = model->addOperand(&type2);
+  auto op4 = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  static int32_t stride_init[] = {1};
+  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+  static int32_t channelMultiplier_init[] = {1};
+  model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_float.model.cpp b/nn/runtime/test/generated/models/fully_connected_float.model.cpp
index bae1fcd..598c4cf 100644
--- a/nn/runtime/test/generated/models/fully_connected_float.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_float.model.cpp
@@ -11,12 +11,16 @@
   auto op3 = model->addOperand(&type0);
   auto act = model->addOperand(&type3);
   // Phase 2, operations
+  static float op2_init[] = {2.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 1);
+  static float b0_init[] = {4.0f};
+  model->setOperandValue(b0, b0_init, sizeof(float) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, b0},
+    {op1},
     {op3});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp
index ab0ff4f..cb17f7b 100644
--- a/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp
@@ -11,12 +11,16 @@
   auto op3 = model->addOperand(&type2);
   auto act = model->addOperand(&type3);
   // Phase 2, operations
+  static float op2_init[] = {2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 5);
+  static float b0_init[] = {900000.0f};
+  model->setOperandValue(b0, b0_init, sizeof(float) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, b0},
+    {op1},
     {op3});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/fully_connected_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..2b1f586
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,27 @@
+// Generated file (from: fully_connected_float_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 1});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 5});
+  OperandType type1(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto b0 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto act = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, b0},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..745eafa
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_float_weights_as_inputs.model.cpp
@@ -0,0 +1,27 @@
+// Generated file (from: fully_connected_float_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {1});
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto b0 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type0);
+  auto act = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, b0},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
index 6311c1b..cdb9119 100644
--- a/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
@@ -12,12 +12,16 @@
   auto op3 = model->addOperand(&type3);
   auto act = model->addOperand(&type4);
   // Phase 2, operations
+  static uint8_t op2_init[] = {2};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 1);
+  static int32_t b0_init[] = {4};
+  model->setOperandValue(b0, b0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, b0},
+    {op1},
     {op3});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp
index ec18a16..761f257 100644
--- a/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp
@@ -11,12 +11,16 @@
   auto op3 = model->addOperand(&type2);
   auto act = model->addOperand(&type3);
   // Phase 2, operations
+  static uint8_t op2_init[] = {10, 20, 20, 20, 10};
+  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 5);
+  static int32_t b0_init[] = {10};
+  model->setOperandValue(b0, b0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2, b0},
+    {op1},
     {op3});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..5d70dc9
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,27 @@
+// Generated file (from: fully_connected_quant8_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1}, 0.04, 0);
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1}, 1.f, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 5}, 0.2, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto b0 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  auto act = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, b0},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..ae0b2e1
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,28 @@
+// Generated file (from: fully_connected_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type4(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {1}, 0.25f, 0);
+  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 1}, 0.5f, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 0.5f, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 1.f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto b0 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type3);
+  auto act = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, b0},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/lsh_projection.model.cpp b/nn/runtime/test/generated/models/lsh_projection.model.cpp
index e445abd..d5c1357 100644
--- a/nn/runtime/test/generated/models/lsh_projection.model.cpp
+++ b/nn/runtime/test/generated/models/lsh_projection.model.cpp
@@ -1,10 +1,10 @@
 // Generated file (from: lsh_projection.mod.py). Do not edit
 void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
   OperandType type2(Type::TENSOR_FLOAT32, {3});
   OperandType type0(Type::TENSOR_FLOAT32, {4, 2});
-  OperandType type3(Type::TENSOR_INT32, {1});
   OperandType type1(Type::TENSOR_INT32, {3, 2});
-  OperandType type4(Type::TENSOR_INT32, {4, 2});
+  OperandType type4(Type::TENSOR_INT32, {8});
   // Phase 1, operands
   auto hash = model->addOperand(&type0);
   auto lookup = model->addOperand(&type1);
@@ -12,10 +12,14 @@
   auto type_param = model->addOperand(&type3);
   auto output = model->addOperand(&type4);
   // Phase 2, operations
+  static float hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
+  model->setOperandValue(hash, hash_init, sizeof(float) * 8);
+  static int32_t type_param_init[] = {2};
+  model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {hash, lookup, weight, type_param},
+    {lookup, weight},
     {output});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/lsh_projection_2.model.cpp b/nn/runtime/test/generated/models/lsh_projection_2.model.cpp
new file mode 100644
index 0000000..25e9d8b
--- /dev/null
+++ b/nn/runtime/test/generated/models/lsh_projection_2.model.cpp
@@ -0,0 +1,30 @@
+// Generated file (from: lsh_projection_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type3(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT32, {3});
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 2});
+  OperandType type1(Type::TENSOR_INT32, {3, 2});
+  OperandType type4(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto hash = model->addOperand(&type0);
+  auto lookup = model->addOperand(&type1);
+  auto weight = model->addOperand(&type2);
+  auto type_param = model->addOperand(&type3);
+  auto output = model->addOperand(&type4);
+  // Phase 2, operations
+  static float hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
+  model->setOperandValue(hash, hash_init, sizeof(float) * 8);
+  static int32_t type_param_init[] = {1};
+  model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {lookup, weight},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/lsh_projection_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/lsh_projection_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d73645f
--- /dev/null
+++ b/nn/runtime/test/generated/models/lsh_projection_weights_as_inputs.model.cpp
@@ -0,0 +1,26 @@
+// Generated file (from: lsh_projection_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::TENSOR_FLOAT32, {3});
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 2});
+  OperandType type3(Type::TENSOR_INT32, {1});
+  OperandType type1(Type::TENSOR_INT32, {3, 2});
+  OperandType type4(Type::TENSOR_INT32, {8});
+  // Phase 1, operands
+  auto hash = model->addOperand(&type0);
+  auto lookup = model->addOperand(&type1);
+  auto weight = model->addOperand(&type2);
+  auto type_param = model->addOperand(&type3);
+  auto output = model->addOperand(&type4);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {hash, lookup, weight, type_param},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/lstm.model.cpp b/nn/runtime/test/generated/models/lstm.model.cpp
index e5baa17..528bc5f 100644
--- a/nn/runtime/test/generated/models/lstm.model.cpp
+++ b/nn/runtime/test/generated/models/lstm.model.cpp
@@ -2,8 +2,8 @@
 void CreateModel(Model *model) {
   OperandType type5(Type::TENSOR_FLOAT32, {0,0});
   OperandType type3(Type::TENSOR_FLOAT32, {0});
+  OperandType type9(Type::TENSOR_FLOAT32, {1, 16});
   OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
-  OperandType type9(Type::TENSOR_FLOAT32, {1, 4, 4});
   OperandType type6(Type::TENSOR_FLOAT32, {1, 4});
   OperandType type8(Type::TENSOR_FLOAT32, {1});
   OperandType type1(Type::TENSOR_FLOAT32, {4, 2});
@@ -39,11 +39,11 @@
   auto cell_state_out = model->addOperand(&type6);
   auto output = model->addOperand(&type6);
   // Phase 2, operations
-  model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {output_state_out, cell_state_out, output, scratch_buffer});
+  model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param},
-    {output_state_out, cell_state_out, output, scratch_buffer});
+    {scratch_buffer, output_state_out, cell_state_out, output});
   assert(model->isValid());
 }
 
diff --git a/nn/runtime/test/generated/models/lstm2.model.cpp b/nn/runtime/test/generated/models/lstm2.model.cpp
index 14d7baf..4286acd 100644
--- a/nn/runtime/test/generated/models/lstm2.model.cpp
+++ b/nn/runtime/test/generated/models/lstm2.model.cpp
@@ -2,8 +2,8 @@
 void CreateModel(Model *model) {
   OperandType type5(Type::TENSOR_FLOAT32, {0,0});
   OperandType type3(Type::TENSOR_FLOAT32, {0});
+  OperandType type9(Type::TENSOR_FLOAT32, {1, 12});
   OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
-  OperandType type9(Type::TENSOR_FLOAT32, {1, 4, 4});
   OperandType type6(Type::TENSOR_FLOAT32, {1, 4});
   OperandType type8(Type::TENSOR_FLOAT32, {1});
   OperandType type1(Type::TENSOR_FLOAT32, {4, 2});
@@ -39,11 +39,11 @@
   auto cell_state_out = model->addOperand(&type6);
   auto output = model->addOperand(&type6);
   // Phase 2, operations
-  model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {output_state_out, cell_state_out, output, scratch_buffer});
+  model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param},
-    {output_state_out, cell_state_out, output, scratch_buffer});
+    {scratch_buffer, output_state_out, cell_state_out, output});
   assert(model->isValid());
 }
 
diff --git a/nn/runtime/test/generated/models/lstm3.model.cpp b/nn/runtime/test/generated/models/lstm3.model.cpp
index 16f0662..64b0056 100644
--- a/nn/runtime/test/generated/models/lstm3.model.cpp
+++ b/nn/runtime/test/generated/models/lstm3.model.cpp
@@ -4,9 +4,9 @@
   OperandType type4(Type::TENSOR_FLOAT32, {16,20});
   OperandType type9(Type::TENSOR_FLOAT32, {1});
   OperandType type6(Type::TENSOR_FLOAT32, {2, 16});
-  OperandType type10(Type::TENSOR_FLOAT32, {2, 20, 4});
   OperandType type7(Type::TENSOR_FLOAT32, {2, 20});
   OperandType type0(Type::TENSOR_FLOAT32, {2, 5});
+  OperandType type10(Type::TENSOR_FLOAT32, {2, 80});
   OperandType type2(Type::TENSOR_FLOAT32, {20, 16});
   OperandType type1(Type::TENSOR_FLOAT32, {20, 5});
   OperandType type3(Type::TENSOR_FLOAT32, {20});
@@ -40,11 +40,11 @@
   auto cell_state_out = model->addOperand(&type7);
   auto output = model->addOperand(&type6);
   // Phase 2, operations
-  model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {output_state_out, cell_state_out, output, scratch_buffer});
+  model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param},
-    {output_state_out, cell_state_out, output, scratch_buffer});
+    {scratch_buffer, output_state_out, cell_state_out, output});
   assert(model->isValid());
 }
 
diff --git a/nn/runtime/test/generated/models/reshape.model.cpp b/nn/runtime/test/generated/models/reshape.model.cpp
index ec856e2..e7efea3 100644
--- a/nn/runtime/test/generated/models/reshape.model.cpp
+++ b/nn/runtime/test/generated/models/reshape.model.cpp
@@ -8,10 +8,12 @@
   auto op2 = model->addOperand(&type1);
   auto op3 = model->addOperand(&type2);
   // Phase 2, operations
+  static int32_t op2_init[] = {-1};
+  model->setOperandValue(op2, op2_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2},
+    {op1},
     {op3});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/reshape_quant8.model.cpp b/nn/runtime/test/generated/models/reshape_quant8.model.cpp
index afaf5bb..cb89320 100644
--- a/nn/runtime/test/generated/models/reshape_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/reshape_quant8.model.cpp
@@ -8,10 +8,12 @@
   auto op2 = model->addOperand(&type1);
   auto op3 = model->addOperand(&type2);
   // Phase 2, operations
+  static int32_t op2_init[] = {-1};
+  model->setOperandValue(op2, op2_init, sizeof(int32_t) * 1);
   model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
-    {op1, op2},
+    {op1},
     {op3});
   assert(model->isValid());
 }
diff --git a/nn/runtime/test/generated/models/reshape_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/reshape_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..9f6dafd
--- /dev/null
+++ b/nn/runtime/test/generated/models/reshape_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: reshape_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3, 3}, 1.f, 0);
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {9}, 1.f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/reshape_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/reshape_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..cf4396a
--- /dev/null
+++ b/nn/runtime/test/generated/models/reshape_weights_as_inputs.model.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: reshape_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 3, 3});
+  OperandType type2(Type::TENSOR_FLOAT32, {9});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp b/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
index 8f0dbad..92d080b 100644
--- a/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
+++ b/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
@@ -1,7 +1,7 @@
 // Generated file (from: space_to_depth_quant8_1.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.5f, 0);
   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
   // Phase 1, operands
   auto input = model->addOperand(&type0);
diff --git a/nn/runtime/test/generated/models/svdf.model.cpp b/nn/runtime/test/generated/models/svdf.model.cpp
index 41533c2..54b1410 100644
--- a/nn/runtime/test/generated/models/svdf.model.cpp
+++ b/nn/runtime/test/generated/models/svdf.model.cpp
@@ -1,7 +1,7 @@
 // Generated file (from: svdf.mod.py). Do not edit
 void CreateModel(Model *model) {
-  OperandType type4(Type::TENSOR_FLOAT32, {2, 36});
   OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type4(Type::TENSOR_FLOAT32, {2, 40});
   OperandType type6(Type::TENSOR_FLOAT32, {2, 4});
   OperandType type2(Type::TENSOR_FLOAT32, {4, 10});
   OperandType type1(Type::TENSOR_FLOAT32, {4, 3});
@@ -18,11 +18,11 @@
   auto state_out = model->addOperand(&type4);
   auto output = model->addOperand(&type6);
   // Phase 2, operations
-  model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {output, state_out});
+  model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param},
-    {output, state_out});
+    {state_out, output});
   assert(model->isValid());
 }
 
diff --git a/nn/runtime/test/generated/vts_models/conv_float.model.cpp b/nn/runtime/test/generated/vts_models/conv_float.model.cpp
index e23133a..be37301 100644
--- a/nn/runtime/test/generated/vts_models/conv_float.model.cpp
+++ b/nn/runtime/test/generated/vts_models/conv_float.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -75,10 +75,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {6};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+      0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/conv_float_channels.model.cpp b/nn/runtime/test/generated/vts_models/conv_float_channels.model.cpp
index 0a2cac0..3495355 100644
--- a/nn/runtime/test/generated/vts_models/conv_float_channels.model.cpp
+++ b/nn/runtime/test/generated/vts_models/conv_float_channels.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 36},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 12},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -75,10 +75,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {6};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+      0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 0, 64, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 64, 64, 0, 0, 64, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/conv_float_channels_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/conv_float_channels_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..0a2cac0
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/conv_float_channels_weights_as_inputs.model.cpp
@@ -0,0 +1,98 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1, 1, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 4},
+            .outputs = {6},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {6};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/conv_float_large.model.cpp b/nn/runtime/test/generated/vts_models/conv_float_large.model.cpp
index 08b8645..e01baf5 100644
--- a/nn/runtime/test/generated/vts_models/conv_float_large.model.cpp
+++ b/nn/runtime/test/generated/vts_models/conv_float_large.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 36},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 12},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,11 +54,11 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 1, 1, 3},
+            .dimensions = {1, 2, 3, 3},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -75,10 +75,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {6};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+      0, 0, 128, 63, 0, 0, 128, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 160, 64, 0, 0, 0, 65, 0, 0, 64, 64, 0, 0, 192, 64, 0, 0, 16, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/conv_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/conv_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..9e4803f
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/conv_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,98 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1, 1, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 4},
+            .outputs = {6},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {6};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/conv_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/conv_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..e23133a
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/conv_float_weights_as_inputs.model.cpp
@@ -0,0 +1,98 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 4},
+            .outputs = {6},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {6};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8.model.cpp
index 8480ba8..7741db9 100644
--- a/nn/runtime/test/generated/vts_models/conv_quant8.model.cpp
+++ b/nn/runtime/test/generated/vts_models/conv_quant8.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.5f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,24 +26,6 @@
             .numberOfConsumers = 1,
             .scale = 0.25f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
             .location = {.poolIndex = 0, .offset = 4, .length = 4},
         },
@@ -57,6 +39,24 @@
             .location = {.poolIndex = 0, .offset = 8, .length = 4},
         },
         {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 2, 2, 1},
             .numberOfConsumers = 0,
@@ -75,10 +75,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {6};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+      2, 2, 2, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8_channels.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8_channels.model.cpp
index cdb903d..9b77838 100644
--- a/nn/runtime/test/generated/vts_models/conv_quant8_channels.model.cpp
+++ b/nn/runtime/test/generated/vts_models/conv_quant8_channels.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.5f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 9},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.25f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 9, .length = 12},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 21, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 25, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 29, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
@@ -75,10 +75,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {6};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8_channels_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8_channels_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..cdb903d
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/conv_quant8_channels_weights_as_inputs.model.cpp
@@ -0,0 +1,98 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 1, 1, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 3},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 4},
+            .outputs = {6},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {6};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8_large.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8_large.model.cpp
index 25896a6..261c759 100644
--- a/nn/runtime/test/generated/vts_models/conv_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/vts_models/conv_quant8_large.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.5f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 9},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.25f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 9, .length = 12},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 21, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 25, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 29, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
@@ -75,10 +75,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {6};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+      1, 4, 7, 2, 5, 8, 3, 6, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..25896a6
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/conv_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,98 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 1, 1, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 4},
+            .outputs = {6},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {6};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8_overflow.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8_overflow.model.cpp
index 25896a6..fdd3978 100644
--- a/nn/runtime/test/generated/vts_models/conv_quant8_overflow.model.cpp
+++ b/nn/runtime/test/generated/vts_models/conv_quant8_overflow.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.5f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 9},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.25f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 9, .length = 12},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 21, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 25, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 29, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
@@ -75,10 +75,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {6};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+      10, 40, 70, 20, 50, 80, 30, 60, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8_overflow_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8_overflow_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..25896a6
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/conv_quant8_overflow_weights_as_inputs.model.cpp
@@ -0,0 +1,98 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 1, 1, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 4},
+            .outputs = {6},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {6};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/conv_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/conv_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..8480ba8
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/conv_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,98 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 3, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 4},
+            .outputs = {6},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {6};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/depth_to_space_quant8_1.model.cpp b/nn/runtime/test/generated/vts_models/depth_to_space_quant8_1.model.cpp
index 9735bab..feb5c19 100644
--- a/nn/runtime/test/generated/vts_models/depth_to_space_quant8_1.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depth_to_space_quant8_1.model.cpp
@@ -24,7 +24,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 2, 2, 2},
             .numberOfConsumers = 0,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float.model.cpp
index 09cdd57..22ffa9f 100644
--- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float.model.cpp
@@ -4,7 +4,7 @@
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 2, 2, 2},
+            .dimensions = {1, 3, 3, 2},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -13,21 +13,21 @@
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 2, 2, 2},
+            .dimensions = {1, 2, 2, 4},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 64},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2},
+            .dimensions = {4},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 16},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 80, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 84, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 88, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -63,11 +63,11 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+            .location = {.poolIndex = 0, .offset = 92, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2},
+            .dimensions = {1, 2, 2, 4},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -84,10 +84,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {7};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+      0, 0, 128, 62, 0, 0, 0, 0, 205, 204, 76, 62, 0, 0, 0, 0, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 0, 0, 154, 153, 153, 62, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 62, 205, 204, 204, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large.model.cpp
index c60c173..50c6a10 100644
--- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large.model.cpp
@@ -4,7 +4,7 @@
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 2, 2, 3},
+            .dimensions = {1, 2, 2, 2},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 8},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -63,11 +63,11 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2},
+            .dimensions = {1, 1, 1, 2},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -84,10 +84,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {7};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+      0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 200, 66, 0, 0, 72, 67, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2.model.cpp
index d573770..ef5906b 100644
--- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2.model.cpp
@@ -4,7 +4,7 @@
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 2, 2, 3},
+            .dimensions = {1, 2, 2, 4},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 64},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 16},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 80, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 84, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 88, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -63,11 +63,11 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+            .location = {.poolIndex = 0, .offset = 92, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {4},
+            .dimensions = {1, 1, 1, 4},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -84,10 +84,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {7};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+      0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 160, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 240, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 32, 66, 0, 0, 200, 66, 0, 124, 18, 73, 0, 230, 42, 73, 0, 80, 67, 73, 0, 186, 91, 73, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..a2adfba
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp
@@ -0,0 +1,107 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 4},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..aa34efc
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,107 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 4},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..50877dc
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_weights_as_inputs.model.cpp
@@ -0,0 +1,107 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 4},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8.model.cpp
index 594db01..7396b99 100644
--- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.5f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.25f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 8},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -63,11 +63,11 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
-            .dimensions = {2},
+            .dimensions = {1,1,1,2},
             .numberOfConsumers = 0,
             .scale = 1.0f,
             .zeroPoint = 0,
@@ -84,10 +84,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {7};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+      2, 4, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_large.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_large.model.cpp
index 594db01..4c1a86d 100644
--- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_large.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.5f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.25f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 8},
         },
         {
             .type = OperandType::INT32,
@@ -36,7 +36,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -54,7 +54,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
         },
         {
             .type = OperandType::INT32,
@@ -63,11 +63,11 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
-            .dimensions = {2},
+            .dimensions = {1, 1, 1, 2},
             .numberOfConsumers = 0,
             .scale = 1.0f,
             .zeroPoint = 0,
@@ -84,10 +84,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {7};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+      2, 4, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..be0b691
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,107 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 4},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d232eb9
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,107 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1,1,1,2},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 4},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_float.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_float.model.cpp
index 78e2cc9..e0118b9 100644
--- a/nn/runtime/test/generated/vts_models/fully_connected_float.model.cpp
+++ b/nn/runtime/test/generated/vts_models/fully_connected_float.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
         }
     };
 
@@ -57,10 +57,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {3};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
+      0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_float_large.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_float_large.model.cpp
index d2f9495..5db446d 100644
--- a/nn/runtime/test/generated/vts_models/fully_connected_float_large.model.cpp
+++ b/nn/runtime/test/generated/vts_models/fully_connected_float_large.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 20},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
         }
     };
 
@@ -57,10 +57,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {3};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
+      0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 186, 91, 73, 0, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d2f9495
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/fully_connected_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,80 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::FULLY_CONNECTED,
+            .inputs = {0, 1, 2, 4},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..78e2cc9
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/fully_connected_float_weights_as_inputs.model.cpp
@@ -0,0 +1,80 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::FULLY_CONNECTED,
+            .inputs = {0, 1, 2, 4},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_quant8.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_quant8.model.cpp
index 1fa69cd..f9d7422 100644
--- a/nn/runtime/test/generated/vts_models/fully_connected_quant8.model.cpp
+++ b/nn/runtime/test/generated/vts_models/fully_connected_quant8.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.5f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 1},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.25f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 1, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 5, .length = 4},
         }
     };
 
@@ -57,10 +57,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {3};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
+      2, 4, 0, 0, 0, 0, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_quant8_large.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_quant8_large.model.cpp
index 274621a..9ec84fc 100644
--- a/nn/runtime/test/generated/vts_models/fully_connected_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/vts_models/fully_connected_quant8_large.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.2f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 5},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -26,8 +26,8 @@
             .numberOfConsumers = 1,
             .scale = 0.04f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 5, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
@@ -45,7 +45,7 @@
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 9, .length = 4},
         }
     };
 
@@ -57,10 +57,10 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {3};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
+      10, 20, 20, 20, 10, 10, 0, 0, 0, 0, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..274621a
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/fully_connected_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,80 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.2f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.2f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.04f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::FULLY_CONNECTED,
+            .inputs = {0, 1, 2, 4},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..1fa69cd
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/fully_connected_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,80 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 1},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::FULLY_CONNECTED,
+            .inputs = {0, 1, 2, 4},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/lsh_projection.model.cpp b/nn/runtime/test/generated/vts_models/lsh_projection.model.cpp
index 3c6b709..f2fcfb4 100644
--- a/nn/runtime/test/generated/vts_models/lsh_projection.model.cpp
+++ b/nn/runtime/test/generated/vts_models/lsh_projection.model.cpp
@@ -8,8 +8,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
         },
         {
             .type = OperandType::TENSOR_INT32,
@@ -30,17 +30,17 @@
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
         {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {1},
+            .type = OperandType::INT32,
+            .dimensions = {},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
         },
         {
             .type = OperandType::TENSOR_INT32,
-            .dimensions = {4, 2},
+            .dimensions = {8},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -57,9 +57,11 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3};
+    const std::vector<uint32_t> inputIndexes = {1, 2};
     const std::vector<uint32_t> outputIndexes = {4};
-    std::vector<uint8_t> operandValues = {};
+    std::vector<uint8_t> operandValues = {
+      109, 231, 251, 61, 213, 120, 233, 62, 29, 90, 164, 190, 139, 108, 39, 191, 182, 243, 157, 63, 45, 178, 181, 64, 162, 69, 138, 192, 113, 61, 12, 193, 2, 0, 0, 0
+    };
     const std::vector<hidl_memory> pools = {};
 
     return {
diff --git a/nn/runtime/test/generated/vts_models/lsh_projection_2.model.cpp b/nn/runtime/test/generated/vts_models/lsh_projection_2.model.cpp
new file mode 100644
index 0000000..1aa040a
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/lsh_projection_2.model.cpp
@@ -0,0 +1,80 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::LSH_PROJECTION,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {1, 2};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      109, 231, 251, 61, 213, 120, 233, 62, 29, 90, 164, 190, 139, 108, 39, 191, 182, 243, 157, 63, 45, 178, 181, 64, 162, 69, 138, 192, 113, 61, 12, 193, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/lsh_projection_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/lsh_projection_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..b088432
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/lsh_projection_weights_as_inputs.model.cpp
@@ -0,0 +1,78 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {8},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::LSH_PROJECTION,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/lstm.model.cpp b/nn/runtime/test/generated/vts_models/lstm.model.cpp
index 61a1968..33d890b 100644
--- a/nn/runtime/test/generated/vts_models/lstm.model.cpp
+++ b/nn/runtime/test/generated/vts_models/lstm.model.cpp
@@ -211,7 +211,7 @@
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 4, 4},
+            .dimensions = {1, 16},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -251,12 +251,12 @@
         {
             .type = OperationType::LSTM,
             .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22},
-            .outputs = {24, 25, 26, 23},
+            .outputs = {23, 24, 25, 26},
         }
     };
 
     const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
-    const std::vector<uint32_t> outputIndexes = {24, 25, 26, 23};
+    const std::vector<uint32_t> outputIndexes = {23, 24, 25, 26};
     std::vector<uint8_t> operandValues = {};
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/lstm2.model.cpp b/nn/runtime/test/generated/vts_models/lstm2.model.cpp
index 6634acf..95fc7ca 100644
--- a/nn/runtime/test/generated/vts_models/lstm2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/lstm2.model.cpp
@@ -211,7 +211,7 @@
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 4, 4},
+            .dimensions = {1, 12},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -251,12 +251,12 @@
         {
             .type = OperationType::LSTM,
             .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22},
-            .outputs = {24, 25, 26, 23},
+            .outputs = {23, 24, 25, 26},
         }
     };
 
     const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
-    const std::vector<uint32_t> outputIndexes = {24, 25, 26, 23};
+    const std::vector<uint32_t> outputIndexes = {23, 24, 25, 26};
     std::vector<uint8_t> operandValues = {};
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/lstm3.model.cpp b/nn/runtime/test/generated/vts_models/lstm3.model.cpp
index ed70a33..565772f 100644
--- a/nn/runtime/test/generated/vts_models/lstm3.model.cpp
+++ b/nn/runtime/test/generated/vts_models/lstm3.model.cpp
@@ -211,7 +211,7 @@
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 20, 4},
+            .dimensions = {2, 80},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -251,12 +251,12 @@
         {
             .type = OperationType::LSTM,
             .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22},
-            .outputs = {24, 25, 26, 23},
+            .outputs = {23, 24, 25, 26},
         }
     };
 
     const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
-    const std::vector<uint32_t> outputIndexes = {24, 25, 26, 23};
+    const std::vector<uint32_t> outputIndexes = {23, 24, 25, 26};
     std::vector<uint8_t> operandValues = {};
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/reshape.model.cpp b/nn/runtime/test/generated/vts_models/reshape.model.cpp
index 7a3296e..0a5e578 100644
--- a/nn/runtime/test/generated/vts_models/reshape.model.cpp
+++ b/nn/runtime/test/generated/vts_models/reshape.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
@@ -39,9 +39,11 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
     const std::vector<hidl_memory> pools = {};
 
     return {
diff --git a/nn/runtime/test/generated/vts_models/reshape_quant8.model.cpp b/nn/runtime/test/generated/vts_models/reshape_quant8.model.cpp
index 8a44723..9ee7363 100644
--- a/nn/runtime/test/generated/vts_models/reshape_quant8.model.cpp
+++ b/nn/runtime/test/generated/vts_models/reshape_quant8.model.cpp
@@ -17,8 +17,8 @@
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
         },
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
@@ -39,9 +39,11 @@
         }
     };
 
-    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
     const std::vector<hidl_memory> pools = {};
 
     return {
diff --git a/nn/runtime/test/generated/vts_models/reshape_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/reshape_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..8a44723
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/reshape_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,60 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 3, 3},
+            .numberOfConsumers = 1,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {9},
+            .numberOfConsumers = 0,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::RESHAPE,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/reshape_weights_as_inputs.model.cpp b/nn/runtime/test/generated/vts_models/reshape_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..7a3296e
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/reshape_weights_as_inputs.model.cpp
@@ -0,0 +1,60 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 3, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {9},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::RESHAPE,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/space_to_depth_quant8_1.model.cpp b/nn/runtime/test/generated/vts_models/space_to_depth_quant8_1.model.cpp
index 3bf7fd2..f1c1f9a 100644
--- a/nn/runtime/test/generated/vts_models/space_to_depth_quant8_1.model.cpp
+++ b/nn/runtime/test/generated/vts_models/space_to_depth_quant8_1.model.cpp
@@ -24,7 +24,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 1, 1, 8},
             .numberOfConsumers = 0,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
diff --git a/nn/runtime/test/generated/vts_models/svdf.model.cpp b/nn/runtime/test/generated/vts_models/svdf.model.cpp
index 2fa8fbd..87f5251 100644
--- a/nn/runtime/test/generated/vts_models/svdf.model.cpp
+++ b/nn/runtime/test/generated/vts_models/svdf.model.cpp
@@ -40,7 +40,7 @@
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 36},
+            .dimensions = {2, 40},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -67,7 +67,7 @@
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 36},
+            .dimensions = {2, 40},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -89,12 +89,12 @@
         {
             .type = OperationType::SVDF,
             .inputs = {0, 1, 2, 3, 4, 5, 6},
-            .outputs = {8, 7},
+            .outputs = {7, 8},
         }
     };
 
     const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4, 5, 6};
-    const std::vector<uint32_t> outputIndexes = {8, 7};
+    const std::vector<uint32_t> outputIndexes = {7, 8};
     std::vector<uint8_t> operandValues = {};
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/specs/conv_float_large.mod.py b/nn/runtime/test/specs/conv_float_large.mod.py
index 32c6832..febe125 100644
--- a/nn/runtime/test/specs/conv_float_large.mod.py
+++ b/nn/runtime/test/specs/conv_float_large.mod.py
@@ -23,7 +23,7 @@
 stride = Int32Scalar("stride", 1)
 # output dimension:
 #     (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
-output = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
 
 model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
 
diff --git a/nn/runtime/test/specs/conv_float_large_weights_as_inputs.mod.py b/nn/runtime/test/specs/conv_float_large_weights_as_inputs.mod.py
index aeed75a..cc0eaa9 100644
--- a/nn/runtime/test/specs/conv_float_large_weights_as_inputs.mod.py
+++ b/nn/runtime/test/specs/conv_float_large_weights_as_inputs.mod.py
@@ -23,7 +23,7 @@
 stride = Int32Scalar("stride", 1)
 # output dimension:
 #     (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
-output = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
 
 model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
 
diff --git a/nn/runtime/test/specs/depth_to_space_quant8_1.mod.py b/nn/runtime/test/specs/depth_to_space_quant8_1.mod.py
index c9414a8..6ae42f4 100644
--- a/nn/runtime/test/specs/depth_to_space_quant8_1.mod.py
+++ b/nn/runtime/test/specs/depth_to_space_quant8_1.mod.py
@@ -1,7 +1,7 @@
 model = Model()
 i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 8}, 0.5f, 0")
 block = Int32Scalar("radius", 2)
-output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}")
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
 
 model = model.Operation("DEPTH_TO_SPACE", i1, block).To(output)
 
diff --git a/nn/runtime/test/specs/depthwise_conv2d_float_large.mod.py b/nn/runtime/test/specs/depthwise_conv2d_float_large.mod.py
index 38d1865..9e8b297 100644
--- a/nn/runtime/test/specs/depthwise_conv2d_float_large.mod.py
+++ b/nn/runtime/test/specs/depthwise_conv2d_float_large.mod.py
@@ -15,7 +15,7 @@
 #
 
 model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # depth_in = 3
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # depth_in = 2
 f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 2}", [.25, 0, .25, 1, .25, 0, .25, 1]) # depth_out = 2
 b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [100, 200]) # depth_out = 2
 pad0 = Int32Scalar("pad0", 0)
@@ -33,8 +33,8 @@
 # Example 1. Input in operand 0,
 input0 = {
     i1: [ # input 0
-     10, 21, 100, 10, 22, 200,
-     10, 23, 300, 10, 24, 400]
+     10, 21, 10, 22,
+     10, 23, 10, 24],
   }
 # (i1 (conv) f1) + b1
 output0 = {output: # output 0
diff --git a/nn/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py b/nn/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py
index a5ad8f6..416279a 100644
--- a/nn/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py
+++ b/nn/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py
@@ -15,7 +15,7 @@
 #
 
 model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # depth_in = 3
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}") # depth_in = 4
 f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0, 10, 100, .25, 1, 20, 100, .25, 0, 30, 100, .25, 1, 40, 100]) # depth_out = 4
 b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [600000, 700000, 800000, 900000]) # depth_out = 4
 pad0 = Int32Scalar("pad0", 0)
@@ -33,10 +33,10 @@
 # Example 1. Input in operand 0,
 input0 = {
     i1: [ # input 0
-     10, 21, 100,
-     10, 22, 200,
-     10, 23, 300,
-     10, 24, 400]
+     10, 21, 100, 0,
+     10, 22, 200, 0,
+     10, 23, 300, 0,
+     10, 24, 400, 0],
   }
 # (i1 (conv) f1) + b1
 output0 = {output: # output 0
diff --git a/nn/runtime/test/specs/lsh_projection.mod.py b/nn/runtime/test/specs/lsh_projection.mod.py
index cc46b83..50dfcd4 100644
--- a/nn/runtime/test/specs/lsh_projection.mod.py
+++ b/nn/runtime/test/specs/lsh_projection.mod.py
@@ -25,7 +25,7 @@
 lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
 weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
 type_param = Int32Scalar("type_param", 2)  # DENSE
-output = Output("output", "TENSOR_INT32", "{%d, %d}" % (num_hash, num_bits))
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash * num_bits))
 model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
                         type_param).To(output)
 
@@ -37,3 +37,4 @@
 output0 = {output: [1, 1, 1, 0, 1, 1, 1, 0]}
 
 Example((input0, output0))
+
diff --git a/nn/runtime/test/specs/lsh_projection_2.mod.py b/nn/runtime/test/specs/lsh_projection_2.mod.py
index b39d8bb..a297db2 100644
--- a/nn/runtime/test/specs/lsh_projection_2.mod.py
+++ b/nn/runtime/test/specs/lsh_projection_2.mod.py
@@ -25,7 +25,7 @@
 lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
 weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
 type_param = Int32Scalar("type_param", 1)  # SPARSE
-output = Output("output", "TENSOR_INT32", "{%d, %d}" % (num_hash, num_bits))
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash))
 model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
                         type_param).To(output)
 
diff --git a/nn/runtime/test/specs/lsh_projection_weights_as_inputs.mod.py b/nn/runtime/test/specs/lsh_projection_weights_as_inputs.mod.py
index 5a3eff1..85a57ad 100644
--- a/nn/runtime/test/specs/lsh_projection_weights_as_inputs.mod.py
+++ b/nn/runtime/test/specs/lsh_projection_weights_as_inputs.mod.py
@@ -24,7 +24,7 @@
 lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
 weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
 type_param = Input("type_param", "TENSOR_INT32", "{1}")
-output = Output("output", "TENSOR_INT32", "{%d, %d}" % (num_hash, num_bits))
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash * num_bits))
 model = model.Operation("LSH_PROJECTION", hhash, lookup, weight, type_param).To(output)
 
 input0 = {lookup:  [12345, 54321, 67890, 9876, -12345678, -87654321],
@@ -37,14 +37,3 @@
 
 Example((input0, output0))
 
-# Omit weight, since this is a sparse projection, for which the optional weight
-# input should be left unset.
-input1 = {lookup:  [12345, 54321, 67890, 9876, -12345678, -87654321],
-          hhash: [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765],
-          weight: [],
-          type_param: [1],  # SPARSE
-          }
-
-output1 = {output: [1,2,2,0]}
-
-Example((input1, output1))
diff --git a/nn/runtime/test/specs/lstm.mod.py b/nn/runtime/test/specs/lstm.mod.py
index 4791c68..cb1bf60 100644
--- a/nn/runtime/test/specs/lstm.mod.py
+++ b/nn/runtime/test/specs/lstm.mod.py
@@ -55,7 +55,7 @@
 cell_clip_param = Input("cell_clip_param", "TENSOR_FLOAT32", "{1}")
 proj_clip_param = Input("proj_clip_param", "TENSOR_FLOAT32", "{1}")
 
-scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d, %d}" % (n_batch, n_cell, 4))
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4)))
 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
diff --git a/nn/runtime/test/specs/lstm2.mod.py b/nn/runtime/test/specs/lstm2.mod.py
index f5a219f..d5afb0c 100644
--- a/nn/runtime/test/specs/lstm2.mod.py
+++ b/nn/runtime/test/specs/lstm2.mod.py
@@ -55,7 +55,7 @@
 cell_clip_param = Input("cell_clip_param", "TENSOR_FLOAT32", "{1}")
 proj_clip_param = Input("proj_clip_param", "TENSOR_FLOAT32", "{1}")
 
-scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d, %d}" % (n_batch, n_cell, 4))
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell * 3))
 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
diff --git a/nn/runtime/test/specs/lstm3.mod.py b/nn/runtime/test/specs/lstm3.mod.py
index 05d3f34..23e2b2e 100644
--- a/nn/runtime/test/specs/lstm3.mod.py
+++ b/nn/runtime/test/specs/lstm3.mod.py
@@ -55,7 +55,7 @@
 cell_clip_param = Input("cell_clip_param", "TENSOR_FLOAT32", "{1}")
 proj_clip_param = Input("proj_clip_param", "TENSOR_FLOAT32", "{1}")
 
-scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d, %d}" % (n_batch, n_cell, 4))
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4)))
 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
diff --git a/nn/runtime/test/specs/space_to_depth_quant8_1.mod.py b/nn/runtime/test/specs/space_to_depth_quant8_1.mod.py
index c0d56af..844a77f 100644
--- a/nn/runtime/test/specs/space_to_depth_quant8_1.mod.py
+++ b/nn/runtime/test/specs/space_to_depth_quant8_1.mod.py
@@ -1,7 +1,7 @@
 model = Model()
 i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
 block = Int32Scalar("radius", 2)
-output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 8}")
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 8}, 0.5f, 0")
 
 model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)
 
diff --git a/nn/runtime/test/specs/svdf.mod.py b/nn/runtime/test/specs/svdf.mod.py
index 3a96fe3..0843ff1 100644
--- a/nn/runtime/test/specs/svdf.mod.py
+++ b/nn/runtime/test/specs/svdf.mod.py
@@ -25,10 +25,10 @@
 weights_feature = Input("weights_feature", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size))
 weights_time = Input("weights_time", "TENSOR_FLOAT32", "{%d, %d}" % (units, memory_size))
 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units))
-state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, (memory_size-1)*units))
+state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*units))
 rank_param = Input("rank_param", "TENSOR_INT32", "{1}")
 activation_param = Input("activation_param", "TENSOR_INT32", "{1}")
-state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, (memory_size-1)*units))
+state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*units))
 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
 
 model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,