Improve validation of the HAL structures.

Add a lot of validation of the structures passed in the HAL.
Particularly important are those that operands & arguments
don't try to reach out of their memory blocks.

Also grabs a few missing generated tests.

Bug: 67828197
Test: System tests & VTS tests.

Change-Id: I2edf6219fc660fab7c5b6a73e7a9cb8a358fb29b
diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp
index cb19390..de0785f 100644
--- a/nn/runtime/test/TestPartitioning.cpp
+++ b/nn/runtime/test/TestPartitioning.cpp
@@ -21,6 +21,7 @@
 #include "NeuralNetworks.h"
 #include "NeuralNetworksWrapper.h"
 #include "Utils.h"
+#include "ValidateHal.h"
 
 #include <gtest/gtest.h>
 
diff --git a/nn/runtime/test/TestTrivialModel.cpp b/nn/runtime/test/TestTrivialModel.cpp
index 1eda104..680fe0e 100644
--- a/nn/runtime/test/TestTrivialModel.cpp
+++ b/nn/runtime/test/TestTrivialModel.cpp
@@ -158,7 +158,7 @@
     Model modelBroadcastAdd2;
     // activation: NONE.
     int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE};
-    OperandType scalarType(Type::INT32, {1});
+    OperandType scalarType(Type::INT32, {});
     auto activation = modelBroadcastAdd2.addOperand(&scalarType);
     modelBroadcastAdd2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
 
@@ -190,7 +190,7 @@
     Model modelBroadcastMul2;
     // activation: NONE.
     int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE};
-    OperandType scalarType(Type::INT32, {1});
+    OperandType scalarType(Type::INT32, {});
     auto activation = modelBroadcastMul2.addOperand(&scalarType);
     modelBroadcastMul2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
 
diff --git a/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp b/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
index f855432..dcabb12 100644
--- a/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
@@ -1,8 +1,8 @@
 // Generated file (from: depth_to_space_quant8_2.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4});
-  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 0);
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 0);
   // Phase 1, operands
   auto input = model->addOperand(&type0);
   auto radius = model->addOperand(&type1);
diff --git a/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp b/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
index 7bd289d..4e91914 100644
--- a/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
@@ -1,8 +1,8 @@
 // Generated file (from: space_to_depth_quant8_2.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4});
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 0);
   // Phase 1, operands
   auto input = model->addOperand(&type0);
   auto radius = model->addOperand(&type1);
diff --git a/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp b/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp
index 3af6f3d..404c79b 100644
--- a/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp
@@ -6,7 +6,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 2, 2, 4},
             .numberOfConsumers = 1,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24,7 +24,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 4, 4, 1},
             .numberOfConsumers = 0,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
diff --git a/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp b/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp
index 6205750..1d7dc58 100644
--- a/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp
@@ -6,7 +6,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 4, 4, 1},
             .numberOfConsumers = 1,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24,7 +24,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 2, 2, 4},
             .numberOfConsumers = 0,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
diff --git a/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py b/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py
index 6306918..95ea042 100644
--- a/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py
+++ b/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py
@@ -1,7 +1,7 @@
 model = Model()
-i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}")
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}, 0.5f, 0")
 block = Int32Scalar("radius", 2)
-output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}")
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 0.5f, 0")
 
 model = model.Operation("DEPTH_TO_SPACE", i1, block).To(output)
 
diff --git a/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py b/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py
index 13fb8e5..b8f0d5f 100644
--- a/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py
+++ b/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py
@@ -1,7 +1,7 @@
 model = Model()
-i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}")
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 0.5f, 0")
 block = Int32Scalar("radius", 2)
-output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}")
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}, 0.5f, 0")
 
 model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)