Force CpuExecutor validating user-provided model output operands.
- For operands with OperandLifeTime::MODEL_OUTPUT, the dimensions,
type, and other meta-data must match the output Shape calculated
from the operation preparation step.
- Fix the ill-defined tests caught by the added validation.
- Incidental changes: generated more tests from tests specs.
Bug: 67390841
Test: NeuralNetworksTests pass
Change-Id: I40d35db0f7a868feae773dbf7e12cf4bf5f5e275
diff --git a/nn/runtime/test/generated/models/conv_float.model.cpp b/nn/runtime/test/generated/models/conv_float.model.cpp
index b179b3b..0b97e31 100644
--- a/nn/runtime/test/generated/models/conv_float.model.cpp
+++ b/nn/runtime/test/generated/models/conv_float.model.cpp
@@ -13,6 +13,10 @@
auto stride = model->addOperand(&type3);
auto op4 = model->addOperand(&type1);
// Phase 2, operations
+ static float op2_init[] = {0.25f, 0.25f, 0.25f, 0.25f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 4);
+ static float op3_init[] = {0.0f};
+ model->setOperandValue(op3, op3_init, sizeof(float) * 1);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -22,7 +26,7 @@
model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/conv_float_channels.model.cpp b/nn/runtime/test/generated/models/conv_float_channels.model.cpp
index bbcfd29..05321fb 100644
--- a/nn/runtime/test/generated/models/conv_float_channels.model.cpp
+++ b/nn/runtime/test/generated/models/conv_float_channels.model.cpp
@@ -13,6 +13,10 @@
auto stride = model->addOperand(&type3);
auto op4 = model->addOperand(&type0);
// Phase 2, operations
+ static float op2_init[] = {1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 9);
+ static float op3_init[] = {0.0f, 0.0f, 0.0f};
+ model->setOperandValue(op3, op3_init, sizeof(float) * 3);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -22,7 +26,7 @@
model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/conv_float_channels_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_float_channels_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..b3e7da0
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_float_channels_weights_as_inputs.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: conv_float_channels_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {3, 1, 1, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {3});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_float_large.model.cpp b/nn/runtime/test/generated/models/conv_float_large.model.cpp
index c629c06..165b3bb 100644
--- a/nn/runtime/test/generated/models/conv_float_large.model.cpp
+++ b/nn/runtime/test/generated/models/conv_float_large.model.cpp
@@ -1,7 +1,6 @@
// Generated file (from: conv_float_large.mod.py). Do not edit
void CreateModel(Model *model) {
OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 1, 3});
OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
OperandType type1(Type::TENSOR_FLOAT32, {3, 1, 1, 3});
OperandType type2(Type::TENSOR_FLOAT32, {3});
@@ -12,8 +11,12 @@
auto pad0 = model->addOperand(&type3);
auto act = model->addOperand(&type3);
auto stride = model->addOperand(&type3);
- auto op4 = model->addOperand(&type4);
+ auto op4 = model->addOperand(&type0);
// Phase 2, operations
+ static float op2_init[] = {1.0f, 4.0f, 7.0f, 2.0f, 5.0f, 8.0f, 3.0f, 6.0f, 9.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 9);
+ static float op3_init[] = {0.0f, 0.0f, 0.0f};
+ model->setOperandValue(op3, op3_init, sizeof(float) * 3);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -23,7 +26,7 @@
model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/conv_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d3a2b44
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: conv_float_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {3, 1, 1, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {3});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..f4a1e2d
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_float_weights_as_inputs.model.cpp
@@ -0,0 +1,33 @@
+// Generated file (from: conv_float_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 1});
+ OperandType type2(Type::TENSOR_FLOAT32, {1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type1);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8.model.cpp b/nn/runtime/test/generated/models/conv_quant8.model.cpp
index 46a62a3..81e31be 100644
--- a/nn/runtime/test/generated/models/conv_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8.model.cpp
@@ -14,6 +14,10 @@
auto stride = model->addOperand(&type3);
auto op4 = model->addOperand(&type4);
// Phase 2, operations
+ static uint8_t op2_init[] = {2, 2, 2, 2};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 4);
+ static int32_t op3_init[] = {4};
+ model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp b/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp
index 04b9bed..cbd3927 100644
--- a/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8_channels.model.cpp
@@ -14,6 +14,10 @@
auto stride = model->addOperand(&type3);
auto op4 = model->addOperand(&type4);
// Phase 2, operations
+ static uint8_t op2_init[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 9);
+ static int32_t op3_init[] = {0, 0, 0};
+ model->setOperandValue(op3, op3_init, sizeof(int32_t) * 3);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/conv_quant8_channels_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_channels_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..56f5fbd
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_channels_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_channels_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {3}, 0.25, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 0.5f, 0);
+ OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 1.0, 0);
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5f, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8_large.model.cpp b/nn/runtime/test/generated/models/conv_quant8_large.model.cpp
index 7bd13d6..d1a97ca 100644
--- a/nn/runtime/test/generated/models/conv_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8_large.model.cpp
@@ -14,6 +14,10 @@
auto stride = model->addOperand(&type3);
auto op4 = model->addOperand(&type4);
// Phase 2, operations
+ static uint8_t op2_init[] = {1, 4, 7, 2, 5, 8, 3, 6, 9};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 9);
+ static int32_t op3_init[] = {0, 0, 0};
+ model->setOperandValue(op3, op3_init, sizeof(int32_t) * 3);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/conv_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..e98c4ca
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {3}, 0.25, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0);
+ OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0);
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp b/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp
index 960233a..dca8a0e 100644
--- a/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp
+++ b/nn/runtime/test/generated/models/conv_quant8_overflow.model.cpp
@@ -14,6 +14,10 @@
auto stride = model->addOperand(&type3);
auto op4 = model->addOperand(&type4);
// Phase 2, operations
+ static uint8_t op2_init[] = {10, 40, 70, 20, 50, 80, 30, 60, 90};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 9);
+ static int32_t op3_init[] = {0, 0, 0};
+ model->setOperandValue(op3, op3_init, sizeof(int32_t) * 3);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -23,7 +27,7 @@
model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/conv_quant8_overflow_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_overflow_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..8a3155f
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_overflow_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_overflow_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {3}, 0.25, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0);
+ OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0);
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/conv_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/conv_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..76d13a6
--- /dev/null
+++ b/nn/runtime/test/generated/models/conv_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: conv_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {1}, 0.25f, 0);
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.5f, 0);
+ OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.f, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 1}, 0.5f, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp b/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
index 791cb75..4d8aae8 100644
--- a/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
+++ b/nn/runtime/test/generated/models/depth_to_space_quant8_1.model.cpp
@@ -2,7 +2,7 @@
void CreateModel(Model *model) {
OperandType type1(Type::INT32, {});
OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.5f, 0);
- OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2});
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
// Phase 1, operands
auto input = model->addOperand(&type0);
auto radius = model->addOperand(&type1);
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp
index bfcabcd..ace293d 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float.model.cpp
@@ -1,30 +1,35 @@
// Generated file (from: depthwise_conv2d_float.mod.py). Do not edit
void CreateModel(Model *model) {
- OperandType type2(Type::INT32, {});
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2});
+ OperandType type3(Type::INT32, {});
+ OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 2});
+ OperandType type2(Type::TENSOR_FLOAT32, {4});
// Phase 1, operands
auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type0);
- auto op3 = model->addOperand(&type1);
- auto pad0 = model->addOperand(&type2);
- auto act = model->addOperand(&type2);
- auto stride = model->addOperand(&type2);
- auto channelMultiplier = model->addOperand(&type2);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto channelMultiplier = model->addOperand(&type3);
auto op4 = model->addOperand(&type1);
// Phase 2, operations
+ static float op2_init[] = {0.25f, 0.0f, 0.2f, 0.0f, 0.25f, 0.0f, 0.0f, 0.3f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.1f, 0.0f, 0.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 16);
+ static float op3_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+ model->setOperandValue(op3, op3_init, sizeof(float) * 4);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
static int32_t stride_init[] = {1};
model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
- static int32_t channelMultiplier_init[] = {1};
+ static int32_t channelMultiplier_init[] = {2};
model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp
index 9a9852a..98f40e3 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large.model.cpp
@@ -1,19 +1,23 @@
// Generated file (from: depthwise_conv2d_float_large.mod.py). Do not edit
void CreateModel(Model *model) {
- OperandType type3(Type::INT32, {});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
- OperandType type2(Type::TENSOR_FLOAT32, {2});
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+ OperandType type1(Type::TENSOR_FLOAT32, {2});
// Phase 1, operands
auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto pad0 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto stride = model->addOperand(&type3);
- auto channelMultiplier = model->addOperand(&type3);
- auto op4 = model->addOperand(&type2);
+ auto op2 = model->addOperand(&type0);
+ auto op3 = model->addOperand(&type1);
+ auto pad0 = model->addOperand(&type2);
+ auto act = model->addOperand(&type2);
+ auto stride = model->addOperand(&type2);
+ auto channelMultiplier = model->addOperand(&type2);
+ auto op4 = model->addOperand(&type3);
// Phase 2, operations
+ static float op2_init[] = {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 8);
+ static float op3_init[] = {100.0f, 200.0f};
+ model->setOperandValue(op3, op3_init, sizeof(float) * 2);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp
index 740f500..521eb3f 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2.model.cpp
@@ -1,19 +1,23 @@
// Generated file (from: depthwise_conv2d_float_large_2.mod.py). Do not edit
void CreateModel(Model *model) {
- OperandType type3(Type::INT32, {});
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
- OperandType type2(Type::TENSOR_FLOAT32, {4});
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 4});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+ OperandType type1(Type::TENSOR_FLOAT32, {4});
// Phase 1, operands
auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto pad0 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto stride = model->addOperand(&type3);
- auto channelMultiplier = model->addOperand(&type3);
- auto op4 = model->addOperand(&type2);
+ auto op2 = model->addOperand(&type0);
+ auto op3 = model->addOperand(&type1);
+ auto pad0 = model->addOperand(&type2);
+ auto act = model->addOperand(&type2);
+ auto stride = model->addOperand(&type2);
+ auto channelMultiplier = model->addOperand(&type2);
+ auto op4 = model->addOperand(&type3);
// Phase 2, operations
+ static float op2_init[] = {0.25f, 0.0f, 10.0f, 100.0f, 0.25f, 1.0f, 20.0f, 100.0f, 0.25f, 0.0f, 30.0f, 100.0f, 0.25f, 1.0f, 40.0f, 100.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 16);
+ static float op3_init[] = {600000.0f, 700000.0f, 800000.0f, 900000.0f};
+ model->setOperandValue(op3, op3_init, sizeof(float) * 4);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..19de705
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp
@@ -0,0 +1,37 @@
+// Generated file (from: depthwise_conv2d_float_large_2_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 1, 4});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+ OperandType type2(Type::TENSOR_FLOAT32, {4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto channelMultiplier = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ static int32_t channelMultiplier_init[] = {1};
+ model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..80bf5b1
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,37 @@
+// Generated file (from: depthwise_conv2d_float_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+ OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {2});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto channelMultiplier = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ static int32_t channelMultiplier_init[] = {1};
+ model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d45063c
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_weights_as_inputs.model.cpp
@@ -0,0 +1,36 @@
+// Generated file (from: depthwise_conv2d_float_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 2});
+ OperandType type2(Type::TENSOR_FLOAT32, {4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto pad0 = model->addOperand(&type3);
+ auto act = model->addOperand(&type3);
+ auto stride = model->addOperand(&type3);
+ auto channelMultiplier = model->addOperand(&type3);
+ auto op4 = model->addOperand(&type1);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ static int32_t channelMultiplier_init[] = {2};
+ model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp
index 7f2b4fc..5a6932f 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8.model.cpp
@@ -3,7 +3,7 @@
OperandType type2(Type::INT32, {});
OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
- OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.f, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1,1,1,2}, 1.f, 0);
// Phase 1, operands
auto op1 = model->addOperand(&type0);
auto op2 = model->addOperand(&type0);
@@ -14,6 +14,10 @@
auto channelMultiplier = model->addOperand(&type2);
auto op4 = model->addOperand(&type3);
// Phase 2, operations
+ static uint8_t op2_init[] = {2, 4, 2, 0, 2, 2, 2, 0};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 8);
+ static int32_t op3_init[] = {0, 0};
+ model->setOperandValue(op3, op3_init, sizeof(int32_t) * 2);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp
index 4060e4c..dbabdf5 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large.model.cpp
@@ -2,8 +2,8 @@
void CreateModel(Model *model) {
OperandType type2(Type::INT32, {});
OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 1.f, 0);
OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
- OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.f, 0);
// Phase 1, operands
auto op1 = model->addOperand(&type0);
auto op2 = model->addOperand(&type0);
@@ -14,6 +14,10 @@
auto channelMultiplier = model->addOperand(&type2);
auto op4 = model->addOperand(&type3);
// Phase 2, operations
+ static uint8_t op2_init[] = {2, 4, 2, 0, 2, 2, 2, 0};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 8);
+ static int32_t op3_init[] = {0, 0};
+ model->setOperandValue(op3, op3_init, sizeof(int32_t) * 2);
static int32_t pad0_init[] = {0};
model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
@@ -25,7 +29,7 @@
model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, op3},
+ {op1},
{op4});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..b965949
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,36 @@
+// Generated file (from: depthwise_conv2d_quant8_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 1.f, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type0);
+ auto op3 = model->addOperand(&type1);
+ auto pad0 = model->addOperand(&type2);
+ auto act = model->addOperand(&type2);
+ auto stride = model->addOperand(&type2);
+ auto channelMultiplier = model->addOperand(&type2);
+ auto op4 = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ static int32_t channelMultiplier_init[] = {1};
+ model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..959fed1
--- /dev/null
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,36 @@
+// Generated file (from: depthwise_conv2d_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1,1,1,2}, 1.f, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type0);
+ auto op3 = model->addOperand(&type1);
+ auto pad0 = model->addOperand(&type2);
+ auto act = model->addOperand(&type2);
+ auto stride = model->addOperand(&type2);
+ auto channelMultiplier = model->addOperand(&type2);
+ auto op4 = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t pad0_init[] = {0};
+ model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ static int32_t stride_init[] = {1};
+ model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
+ static int32_t channelMultiplier_init[] = {1};
+ model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, op3},
+ {op4});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_float.model.cpp b/nn/runtime/test/generated/models/fully_connected_float.model.cpp
index bae1fcd..598c4cf 100644
--- a/nn/runtime/test/generated/models/fully_connected_float.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_float.model.cpp
@@ -11,12 +11,16 @@
auto op3 = model->addOperand(&type0);
auto act = model->addOperand(&type3);
// Phase 2, operations
+ static float op2_init[] = {2.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 1);
+ static float b0_init[] = {4.0f};
+ model->setOperandValue(b0, b0_init, sizeof(float) * 1);
static int32_t act_init[] = {0};
model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, b0},
+ {op1},
{op3});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp
index ab0ff4f..cb17f7b 100644
--- a/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_float_large.model.cpp
@@ -11,12 +11,16 @@
auto op3 = model->addOperand(&type2);
auto act = model->addOperand(&type3);
// Phase 2, operations
+ static float op2_init[] = {2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 5);
+ static float b0_init[] = {900000.0f};
+ model->setOperandValue(b0, b0_init, sizeof(float) * 1);
static int32_t act_init[] = {0};
model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, b0},
+ {op1},
{op3});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/fully_connected_float_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..2b1f586
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_float_large_weights_as_inputs.model.cpp
@@ -0,0 +1,27 @@
+// Generated file (from: fully_connected_float_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 5});
+ OperandType type1(Type::TENSOR_FLOAT32, {1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type0);
+ auto b0 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto act = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, b0},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_float_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..745eafa
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_float_weights_as_inputs.model.cpp
@@ -0,0 +1,27 @@
+// Generated file (from: fully_connected_float_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type1(Type::TENSOR_FLOAT32, {1, 1});
+ OperandType type2(Type::TENSOR_FLOAT32, {1});
+ OperandType type0(Type::TENSOR_FLOAT32, {3, 1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto b0 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type0);
+ auto act = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, b0},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
index 6311c1b..cdb9119 100644
--- a/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_quant8.model.cpp
@@ -12,12 +12,16 @@
auto op3 = model->addOperand(&type3);
auto act = model->addOperand(&type4);
// Phase 2, operations
+ static uint8_t op2_init[] = {2};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 1);
+ static int32_t b0_init[] = {4};
+ model->setOperandValue(b0, b0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, b0},
+ {op1},
{op3});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp
index ec18a16..761f257 100644
--- a/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp
+++ b/nn/runtime/test/generated/models/fully_connected_quant8_large.model.cpp
@@ -11,12 +11,16 @@
auto op3 = model->addOperand(&type2);
auto act = model->addOperand(&type3);
// Phase 2, operations
+ static uint8_t op2_init[] = {10, 20, 20, 20, 10};
+ model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 5);
+ static int32_t b0_init[] = {10};
+ model->setOperandValue(b0, b0_init, sizeof(int32_t) * 1);
static int32_t act_init[] = {0};
model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2, b0},
+ {op1},
{op3});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8_large_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8_large_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..5d70dc9
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_quant8_large_weights_as_inputs.model.cpp
@@ -0,0 +1,27 @@
+// Generated file (from: fully_connected_quant8_large_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1}, 0.04, 0);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1}, 1.f, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 5}, 0.2, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type0);
+ auto b0 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ auto act = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, b0},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/fully_connected_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/fully_connected_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..ae0b2e1
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,28 @@
+// Generated file (from: fully_connected_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type4(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {1}, 0.25f, 0);
+ OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 1}, 0.5f, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 0.5f, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 1.f, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto b0 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type3);
+ auto act = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2, b0},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/lsh_projection.model.cpp b/nn/runtime/test/generated/models/lsh_projection.model.cpp
index e445abd..d5c1357 100644
--- a/nn/runtime/test/generated/models/lsh_projection.model.cpp
+++ b/nn/runtime/test/generated/models/lsh_projection.model.cpp
@@ -1,10 +1,10 @@
// Generated file (from: lsh_projection.mod.py). Do not edit
void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
OperandType type2(Type::TENSOR_FLOAT32, {3});
OperandType type0(Type::TENSOR_FLOAT32, {4, 2});
- OperandType type3(Type::TENSOR_INT32, {1});
OperandType type1(Type::TENSOR_INT32, {3, 2});
- OperandType type4(Type::TENSOR_INT32, {4, 2});
+ OperandType type4(Type::TENSOR_INT32, {8});
// Phase 1, operands
auto hash = model->addOperand(&type0);
auto lookup = model->addOperand(&type1);
@@ -12,10 +12,14 @@
auto type_param = model->addOperand(&type3);
auto output = model->addOperand(&type4);
// Phase 2, operations
+ static float hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
+ model->setOperandValue(hash, hash_init, sizeof(float) * 8);
+ static int32_t type_param_init[] = {2};
+ model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {hash, lookup, weight, type_param},
+ {lookup, weight},
{output});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/lsh_projection_2.model.cpp b/nn/runtime/test/generated/models/lsh_projection_2.model.cpp
new file mode 100644
index 0000000..25e9d8b
--- /dev/null
+++ b/nn/runtime/test/generated/models/lsh_projection_2.model.cpp
@@ -0,0 +1,30 @@
+// Generated file (from: lsh_projection_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type3(Type::INT32, {});
+ OperandType type2(Type::TENSOR_FLOAT32, {3});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 2});
+ OperandType type1(Type::TENSOR_INT32, {3, 2});
+ OperandType type4(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto hash = model->addOperand(&type0);
+ auto lookup = model->addOperand(&type1);
+ auto weight = model->addOperand(&type2);
+ auto type_param = model->addOperand(&type3);
+ auto output = model->addOperand(&type4);
+ // Phase 2, operations
+ static float hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
+ model->setOperandValue(hash, hash_init, sizeof(float) * 8);
+ static int32_t type_param_init[] = {1};
+ model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {lookup, weight},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/lsh_projection_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/lsh_projection_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..d73645f
--- /dev/null
+++ b/nn/runtime/test/generated/models/lsh_projection_weights_as_inputs.model.cpp
@@ -0,0 +1,26 @@
+// Generated file (from: lsh_projection_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_FLOAT32, {3});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 2});
+ OperandType type3(Type::TENSOR_INT32, {1});
+ OperandType type1(Type::TENSOR_INT32, {3, 2});
+ OperandType type4(Type::TENSOR_INT32, {8});
+ // Phase 1, operands
+ auto hash = model->addOperand(&type0);
+ auto lookup = model->addOperand(&type1);
+ auto weight = model->addOperand(&type2);
+ auto type_param = model->addOperand(&type3);
+ auto output = model->addOperand(&type4);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {hash, lookup, weight, type_param},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/lstm.model.cpp b/nn/runtime/test/generated/models/lstm.model.cpp
index e5baa17..528bc5f 100644
--- a/nn/runtime/test/generated/models/lstm.model.cpp
+++ b/nn/runtime/test/generated/models/lstm.model.cpp
@@ -2,8 +2,8 @@
void CreateModel(Model *model) {
OperandType type5(Type::TENSOR_FLOAT32, {0,0});
OperandType type3(Type::TENSOR_FLOAT32, {0});
+ OperandType type9(Type::TENSOR_FLOAT32, {1, 16});
OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
- OperandType type9(Type::TENSOR_FLOAT32, {1, 4, 4});
OperandType type6(Type::TENSOR_FLOAT32, {1, 4});
OperandType type8(Type::TENSOR_FLOAT32, {1});
OperandType type1(Type::TENSOR_FLOAT32, {4, 2});
@@ -39,11 +39,11 @@
auto cell_state_out = model->addOperand(&type6);
auto output = model->addOperand(&type6);
// Phase 2, operations
- model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {output_state_out, cell_state_out, output, scratch_buffer});
+ model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param},
- {output_state_out, cell_state_out, output, scratch_buffer});
+ {scratch_buffer, output_state_out, cell_state_out, output});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/lstm2.model.cpp b/nn/runtime/test/generated/models/lstm2.model.cpp
index 14d7baf..4286acd 100644
--- a/nn/runtime/test/generated/models/lstm2.model.cpp
+++ b/nn/runtime/test/generated/models/lstm2.model.cpp
@@ -2,8 +2,8 @@
void CreateModel(Model *model) {
OperandType type5(Type::TENSOR_FLOAT32, {0,0});
OperandType type3(Type::TENSOR_FLOAT32, {0});
+ OperandType type9(Type::TENSOR_FLOAT32, {1, 12});
OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
- OperandType type9(Type::TENSOR_FLOAT32, {1, 4, 4});
OperandType type6(Type::TENSOR_FLOAT32, {1, 4});
OperandType type8(Type::TENSOR_FLOAT32, {1});
OperandType type1(Type::TENSOR_FLOAT32, {4, 2});
@@ -39,11 +39,11 @@
auto cell_state_out = model->addOperand(&type6);
auto output = model->addOperand(&type6);
// Phase 2, operations
- model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {output_state_out, cell_state_out, output, scratch_buffer});
+ model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param},
- {output_state_out, cell_state_out, output, scratch_buffer});
+ {scratch_buffer, output_state_out, cell_state_out, output});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/lstm3.model.cpp b/nn/runtime/test/generated/models/lstm3.model.cpp
index 16f0662..64b0056 100644
--- a/nn/runtime/test/generated/models/lstm3.model.cpp
+++ b/nn/runtime/test/generated/models/lstm3.model.cpp
@@ -4,9 +4,9 @@
OperandType type4(Type::TENSOR_FLOAT32, {16,20});
OperandType type9(Type::TENSOR_FLOAT32, {1});
OperandType type6(Type::TENSOR_FLOAT32, {2, 16});
- OperandType type10(Type::TENSOR_FLOAT32, {2, 20, 4});
OperandType type7(Type::TENSOR_FLOAT32, {2, 20});
OperandType type0(Type::TENSOR_FLOAT32, {2, 5});
+ OperandType type10(Type::TENSOR_FLOAT32, {2, 80});
OperandType type2(Type::TENSOR_FLOAT32, {20, 16});
OperandType type1(Type::TENSOR_FLOAT32, {20, 5});
OperandType type3(Type::TENSOR_FLOAT32, {20});
@@ -40,11 +40,11 @@
auto cell_state_out = model->addOperand(&type7);
auto output = model->addOperand(&type6);
// Phase 2, operations
- model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {output_state_out, cell_state_out, output, scratch_buffer});
+ model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param},
- {output_state_out, cell_state_out, output, scratch_buffer});
+ {scratch_buffer, output_state_out, cell_state_out, output});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/reshape.model.cpp b/nn/runtime/test/generated/models/reshape.model.cpp
index ec856e2..e7efea3 100644
--- a/nn/runtime/test/generated/models/reshape.model.cpp
+++ b/nn/runtime/test/generated/models/reshape.model.cpp
@@ -8,10 +8,12 @@
auto op2 = model->addOperand(&type1);
auto op3 = model->addOperand(&type2);
// Phase 2, operations
+ static int32_t op2_init[] = {-1};
+ model->setOperandValue(op2, op2_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2},
+ {op1},
{op3});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/reshape_quant8.model.cpp b/nn/runtime/test/generated/models/reshape_quant8.model.cpp
index afaf5bb..cb89320 100644
--- a/nn/runtime/test/generated/models/reshape_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/reshape_quant8.model.cpp
@@ -8,10 +8,12 @@
auto op2 = model->addOperand(&type1);
auto op3 = model->addOperand(&type2);
// Phase 2, operations
+ static int32_t op2_init[] = {-1};
+ model->setOperandValue(op2, op2_init, sizeof(int32_t) * 1);
model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
- {op1, op2},
+ {op1},
{op3});
assert(model->isValid());
}
diff --git a/nn/runtime/test/generated/models/reshape_quant8_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/reshape_quant8_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..9f6dafd
--- /dev/null
+++ b/nn/runtime/test/generated/models/reshape_quant8_weights_as_inputs.model.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: reshape_quant8_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3, 3}, 1.f, 0);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {9}, 1.f, 0);
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/reshape_weights_as_inputs.model.cpp b/nn/runtime/test/generated/models/reshape_weights_as_inputs.model.cpp
new file mode 100644
index 0000000..cf4396a
--- /dev/null
+++ b/nn/runtime/test/generated/models/reshape_weights_as_inputs.model.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: reshape_weights_as_inputs.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 3, 3});
+ OperandType type2(Type::TENSOR_FLOAT32, {9});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp b/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
index 8f0dbad..92d080b 100644
--- a/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
+++ b/nn/runtime/test/generated/models/space_to_depth_quant8_1.model.cpp
@@ -1,7 +1,7 @@
// Generated file (from: space_to_depth_quant8_1.mod.py). Do not edit
void CreateModel(Model *model) {
OperandType type1(Type::INT32, {});
- OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8});
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.5f, 0);
OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
// Phase 1, operands
auto input = model->addOperand(&type0);
diff --git a/nn/runtime/test/generated/models/svdf.model.cpp b/nn/runtime/test/generated/models/svdf.model.cpp
index 41533c2..54b1410 100644
--- a/nn/runtime/test/generated/models/svdf.model.cpp
+++ b/nn/runtime/test/generated/models/svdf.model.cpp
@@ -1,7 +1,7 @@
// Generated file (from: svdf.mod.py). Do not edit
void CreateModel(Model *model) {
- OperandType type4(Type::TENSOR_FLOAT32, {2, 36});
OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type4(Type::TENSOR_FLOAT32, {2, 40});
OperandType type6(Type::TENSOR_FLOAT32, {2, 4});
OperandType type2(Type::TENSOR_FLOAT32, {4, 10});
OperandType type1(Type::TENSOR_FLOAT32, {4, 3});
@@ -18,11 +18,11 @@
auto state_out = model->addOperand(&type4);
auto output = model->addOperand(&type6);
// Phase 2, operations
- model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {output, state_out});
+ model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input, weights_feature, weights_time, bias, state_in, rank_param, activation_param},
- {output, state_out});
+ {state_out, output});
assert(model->isValid());
}