Improve validation of the HAL structures.

Add a lot of validation of the structures passed in the HAL.
Particularly important are those that operands & arguments
don't try to reach out of their memory blocks.

Also grabs a few missing generated tests.

Bug: 67828197
Test: System tests & VTS tests.

Change-Id: I2edf6219fc660fab7c5b6a73e7a9cb8a358fb29b
diff --git a/nn/common/Android.bp b/nn/common/Android.bp
index a499104..ef3a75d 100644
--- a/nn/common/Android.bp
+++ b/nn/common/Android.bp
@@ -31,6 +31,7 @@
         "CpuExecutor.cpp",
         "OperationsUtils.cpp",
         "Utils.cpp",
+        "ValidateHal.cpp",
         "operations/Activation.cpp",
         "operations/Concatenation.cpp",
         "operations/Conv2D.cpp",
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index 2456267..75a70d1 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -78,26 +78,6 @@
     }
 }
 
-#define COUNT(X) (sizeof(X) / sizeof(X[0]))
-
-const char* kTypeNames[kNumberOfDataTypes] = {
-        "FLOAT32",        "INT32",        "UINT32",
-        "TENSOR_FLOAT32", "TENSOR_INT32", "TENSOR_QUANT8_ASYMM",
-};
-
-static_assert(COUNT(kTypeNames) == kNumberOfDataTypes, "kTypeNames is incorrect");
-
-const char* kTypeNamesOEM[kNumberOfDataTypesOEM] = {
-        "OEM",            "TENSOR_OEM_BYTE",
-};
-
-static_assert(COUNT(kTypeNamesOEM) == kNumberOfDataTypesOEM, "kTypeNamesOEM is incorrect");
-
-// TODO Check if this useful
-const char* kErrorNames[] = {
-        "NO_ERROR", "OUT_OF_MEMORY", "INCOMPLETE", "NULL", "BAD_DATA",
-};
-
 namespace {
 
 template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
@@ -116,6 +96,31 @@
 
 };  // anonymous namespace
 
+#define COUNT(X) (sizeof(X) / sizeof(X[0]))
+
+const char* kTypeNames[kNumberOfDataTypes] = {
+        "FLOAT32",        "INT32",        "UINT32",
+        "TENSOR_FLOAT32", "TENSOR_INT32", "TENSOR_QUANT8_ASYMM",
+};
+
+static_assert(COUNT(kTypeNames) == kNumberOfDataTypes, "kTypeNames is incorrect");
+
+const char* kTypeNamesOEM[kNumberOfDataTypesOEM] = {
+        "OEM",            "TENSOR_OEM_BYTE",
+};
+
+static_assert(COUNT(kTypeNamesOEM) == kNumberOfDataTypesOEM, "kTypeNamesOEM is incorrect");
+
+const char* getOperandTypeName(OperandType type) {
+    uint32_t n = static_cast<uint32_t>(type);
+    return tableLookup(kTypeNames, kTypeNamesOEM, n);
+}
+
+// TODO Check if this useful
+const char* kErrorNames[] = {
+        "NO_ERROR", "OUT_OF_MEMORY", "INCOMPLETE", "NULL", "BAD_DATA",
+};
+
 const char* kOperationNames[kNumberOfOperationTypes] = {
         "ADD",
         "AVERAGE_POOL",
@@ -296,154 +301,6 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
-static bool validOperandIndexes(const hidl_vec<uint32_t> indexes, size_t operandCount) {
-    for (uint32_t i : indexes) {
-        if (i >= operandCount) {
-            LOG(ERROR) << "Index out of range " << i << "/" << operandCount;
-            return false;
-        }
-    }
-    return true;
-}
-
-static bool validOperands(const hidl_vec<Operand>& operands, const hidl_vec<uint8_t>& operandValues,
-                          size_t poolCount) {
-    for (auto& operand : operands) {
-        if (!validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM,
-                       static_cast<uint32_t>(operand.type))) {
-            LOG(ERROR) << "Invalid operand type " << toString(operand.type);
-            return false;
-        }
-        /* TODO validate dim with type
-        if (!validOperandIndexes(operand.dimensions, mDimensions)) {
-            return false;
-        }
-        */
-        switch (operand.lifetime) {
-            case OperandLifeTime::CONSTANT_COPY:
-                if (operand.location.offset + operand.location.length > operandValues.size()) {
-                    LOG(ERROR) << "OperandValue location out of range.  Starts at "
-                               << operand.location.offset << ", length " << operand.location.length
-                           << ", max " << operandValues.size();
-                    return false;
-                }
-                break;
-            case OperandLifeTime::TEMPORARY_VARIABLE:
-            case OperandLifeTime::MODEL_INPUT:
-            case OperandLifeTime::MODEL_OUTPUT:
-            case OperandLifeTime::NO_VALUE:
-                if (operand.location.offset != 0 || operand.location.length != 0) {
-                    LOG(ERROR) << "Unexpected offset " << operand.location.offset << " or length "
-                               << operand.location.length << " for runtime location.";
-                    return false;
-                }
-                break;
-            case OperandLifeTime::CONSTANT_REFERENCE:
-                if (operand.location.poolIndex >= poolCount) {
-                    LOG(ERROR) << "Invalid poolIndex " << operand.location.poolIndex << "/"
-                               << poolCount;
-                    return false;
-                }
-                break;
-            // TODO: Validate that we are within the pool.
-            default:
-                LOG(ERROR) << "Invalid lifetime";
-                return false;
-        }
-    }
-    return true;
-}
-
-static bool validOperations(const hidl_vec<Operation>& operations, size_t operandCount) {
-    for (auto& op : operations) {
-        if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM,
-                       static_cast<uint32_t>(op.type))) {
-            LOG(ERROR) << "Invalid operation type " << toString(op.type);
-            return false;
-        }
-        if (!validOperandIndexes(op.inputs, operandCount) ||
-            !validOperandIndexes(op.outputs, operandCount)) {
-            return false;
-        }
-    }
-    return true;
-}
-
-// TODO doublecheck
-bool validateModel(const Model& model) {
-    const size_t operandCount = model.operands.size();
-    return (validOperands(model.operands, model.operandValues, model.pools.size()) &&
-            validOperations(model.operations, operandCount) &&
-            validOperandIndexes(model.inputIndexes, operandCount) &&
-            validOperandIndexes(model.outputIndexes, operandCount));
-}
-
-bool validRequestArguments(const hidl_vec<RequestArgument>& arguments,
-                           const hidl_vec<uint32_t>& operandIndexes,
-                           const hidl_vec<Operand>& operands, size_t poolCount,
-                           const char* type) {
-    const size_t argumentCount = arguments.size();
-    if (argumentCount != operandIndexes.size()) {
-        LOG(ERROR) << "Request specifies " << argumentCount << " " << type << "s but the model has "
-                   << operandIndexes.size();
-        return false;
-    }
-    for (size_t argumentIndex = 0; argumentIndex < argumentCount; argumentIndex++) {
-        const RequestArgument& argument = arguments[argumentIndex];
-        const uint32_t operandIndex = operandIndexes[argumentIndex];
-        const Operand& operand = operands[operandIndex];
-        if (argument.hasNoValue) {
-            if (argument.location.poolIndex != 0 ||
-                argument.location.offset != 0 ||
-                argument.location.length != 0 ||
-                argument.dimensions.size() != 0) {
-                LOG(ERROR) << "Request " << type << " " << argumentIndex
-                           << " has no value yet has details.";
-                return false;
-            }
-        }
-        if (argument.location.poolIndex >= poolCount) {
-            LOG(ERROR) << "Request " << type << " " << argumentIndex << " has an invalid poolIndex "
-                       << argument.location.poolIndex << "/" << poolCount;
-            return false;
-        }
-        // TODO: Validate that we are within the pool.
-        uint32_t rank = argument.dimensions.size();
-        if (rank > 0) {
-            if (rank != operand.dimensions.size()) {
-                LOG(ERROR) << "Request " << type << " " << argumentIndex
-                           << " has number of dimensions (" << rank
-                           << ") different than the model's (" << operand.dimensions.size() << ")";
-                return false;
-            }
-            for (size_t i = 0; i < rank; i++) {
-                if (argument.dimensions[i] != operand.dimensions[i] &&
-                    operand.dimensions[i] != 0) {
-                    LOG(ERROR) << "Request " << type << " " << argumentIndex
-                               << " has dimension " << i << " of " << operand.dimensions[i]
-                               << " different than the model's " << operand.dimensions[i];
-                    return false;
-                }
-                if (argument.dimensions[i] == 0) {
-                    LOG(ERROR) << "Request " << type << " " << argumentIndex
-                               << " has dimension " << i << " of zero";
-                    return false;
-                }
-            }
-        }
-    }
-    return true;
-}
-
-// TODO doublecheck
-bool validateRequest(const Request& request, const Model& model) {
-    const size_t poolCount = request.pools.size();
-    return (validRequestArguments(request.inputs, model.inputIndexes, model.operands, poolCount,
-                                  "input") &&
-            validRequestArguments(request.outputs, model.outputIndexes, model.operands, poolCount,
-                                  "output"));
-}
-
 #ifdef NN_DEBUGGABLE
 uint32_t getProp(const char* str, uint32_t defaultValue) {
     const std::string propStr = android::base::GetProperty(str, "");
diff --git a/nn/common/ValidateHal.cpp b/nn/common/ValidateHal.cpp
new file mode 100644
index 0000000..011bc3c
--- /dev/null
+++ b/nn/common/ValidateHal.cpp
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ValidateHal"
+
+#include "ValidateHal.h"
+#include "NeuralNetworks.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+
+namespace android {
+namespace nn {
+
+class MemoryAccessVerifier {
+public:
+    MemoryAccessVerifier(const hidl_vec<hidl_memory>& pools)
+        : mPoolCount(pools.size()), mPoolSizes(mPoolCount) {
+        for (size_t i = 0; i < mPoolCount; i++) {
+            mPoolSizes[i] = pools[i].size();
+        }
+    }
+    bool validate(const DataLocation& location) {
+        if (location.poolIndex >= mPoolCount) {
+            LOG(ERROR) << "Invalid poolIndex " << location.poolIndex << "/" << mPoolCount;
+            return false;
+        }
+        const size_t size = mPoolSizes[location.poolIndex];
+        // Do the addition using size_t to avoid potential wrap-around problems.
+        if (static_cast<size_t>(location.offset) + location.length > size) {
+            LOG(ERROR) << "Reference to pool " << location.poolIndex << " with offset "
+                       << location.offset << " and length " << location.length
+                       << " exceeds pool size of " << size;
+            return false;
+        }
+        return true;
+    }
+
+private:
+    size_t mPoolCount;
+    std::vector<size_t> mPoolSizes;
+};
+
+static bool validateOperands(const hidl_vec<Operand>& operands,
+                             const hidl_vec<uint8_t>& operandValues,
+                             const hidl_vec<hidl_memory>& pools) {
+    uint32_t index = 0;
+    MemoryAccessVerifier poolVerifier(pools);
+    for (auto& operand : operands) {
+        // Validate type and dimensions.
+        switch (operand.type) {
+            case OperandType::FLOAT32:
+            case OperandType::INT32:
+            case OperandType::UINT32:
+            case OperandType::OEM: {
+                size_t count = operand.dimensions.size();
+                if (count != 0) {
+                    LOG(ERROR) << "Operand " << index << ": Scalar data has dimensions of rank "
+                               << count;
+                    return false;
+                }
+                break;
+            }
+            case OperandType::TENSOR_FLOAT32:
+            case OperandType::TENSOR_INT32:
+            case OperandType::TENSOR_QUANT8_ASYMM:
+            case OperandType::TENSOR_OEM_BYTE: {
+                if (operand.dimensions.size() == 0) {
+                    LOG(ERROR) << "Operand " << index << ": Tensor has dimensions of rank 0";
+                    return false;
+                }
+                break;
+            }
+            default:
+                LOG(ERROR) << "Operand " << index << ": Invalid operand type "
+                           << toString(operand.type);
+                return false;
+        }
+
+        // TODO Validate the numberOfConsumers.
+        // TODO Since we have to validate it, there was no point in including it. For the next
+        // release, consider removing unless we have an additional process in system space
+        // that creates this value. In that case, it would not have to be validated.
+
+        // Validate the scale.
+        switch (operand.type) {
+            case OperandType::FLOAT32:
+            case OperandType::INT32:
+            case OperandType::UINT32:
+            case OperandType::TENSOR_FLOAT32:
+                if (operand.scale != 0.f) {
+                    LOG(ERROR) << "Operand " << index << ": Operand of type "
+                               << getOperandTypeName(operand.type) << " with a non-zero scale ("
+                               << operand.scale;
+                    return false;
+                }
+                break;
+            case OperandType::TENSOR_QUANT8_ASYMM:
+                if (operand.scale == 0.f) {
+                    LOG(ERROR) << "Operand " << index << ": Operand of type "
+                               << getOperandTypeName(operand.type) << " with a zero scale";
+                    return false;
+                }
+                break;
+            default:
+                // No validation for the OEM types. No validation also for TENSOR_INT32,
+                // as tensors of this type may be used with or without scale, depending on
+                // the operation.
+                // TODO We should have had a separate type for TENSOR_INT32 that a scale
+                // and those who don't.  Document now and fix in the next release.
+                break;
+        }
+
+        // Validate the zeroPoint.
+        switch (operand.type) {
+            case OperandType::FLOAT32:
+            case OperandType::INT32:
+            case OperandType::UINT32:
+            case OperandType::TENSOR_FLOAT32:
+            case OperandType::TENSOR_INT32:
+                if (operand.zeroPoint != 0) {
+                    LOG(ERROR) << "Operand " << index << ": Operand of type "
+                               << getOperandTypeName(operand.type) << " with an non-zero zeroPoint "
+                               << operand.zeroPoint;
+                    return false;
+                }
+                break;
+            default:
+                // No validation for the OEM types.
+                break;
+        }
+
+        // Validate the lifetime and the location.
+        const DataLocation& location = operand.location;
+        switch (operand.lifetime) {
+            case OperandLifeTime::CONSTANT_COPY:
+                if (location.poolIndex != 0) {
+                    LOG(ERROR) << "Operand " << index
+                               << ": CONSTANT_COPY with a non-zero poolIndex "
+                               << location.poolIndex;
+                    return false;
+                }
+                // Do the addition using size_t to avoid potential wrap-around problems.
+                if (static_cast<size_t>(location.offset) + location.length > operandValues.size()) {
+                    LOG(ERROR) << "Operand " << index
+                               << ": OperandValue location out of range.  Starts at "
+                               << location.offset << ", length " << location.length << ", max "
+                               << operandValues.size();
+                    return false;
+                }
+                break;
+            case OperandLifeTime::CONSTANT_REFERENCE:
+                if (!poolVerifier.validate(location)) {
+                    return false;
+                }
+                break;
+            case OperandLifeTime::TEMPORARY_VARIABLE:
+            case OperandLifeTime::MODEL_INPUT:
+            case OperandLifeTime::MODEL_OUTPUT:
+            case OperandLifeTime::NO_VALUE:
+                if (location.poolIndex != 0 || location.offset != 0 || location.length != 0) {
+                    LOG(ERROR) << "Operand " << index << ": Unexpected poolIndex "
+                               << location.poolIndex << ", offset " << location.offset
+                               << ", or length " << location.length << " for operand of lifetime "
+                               << toString(operand.lifetime);
+                    return false;
+                }
+                break;
+            default:
+                LOG(ERROR) << "Operand " << index << ": Invalid lifetime "
+                           << toString(operand.lifetime);
+                return false;
+        }
+
+        // For constants, validate that the length is as expected. The other lifetimes
+        // expect the length to be 0. Don't validate for OEM types.
+        if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
+            operand.lifetime == OperandLifeTime::CONSTANT_COPY) {
+            if (operand.type != OperandType::OEM &&
+                operand.type != OperandType::TENSOR_OEM_BYTE) {
+                uint32_t expectedLength = sizeOfData(operand.type, operand.dimensions);
+                if (location.length != expectedLength) {
+                    LOG(ERROR) << "Operand " << index << ": For operand " << toString(operand)
+                               << " expected a size of " << expectedLength << " but got "
+                               << location.length;
+                    return false;
+                }
+            }
+        }
+
+        index++;
+    }
+    return true;
+}
+
+static bool validateOperations(const hidl_vec<Operation>& operations,
+                               const hidl_vec<Operand>& operands) {
+    const size_t operandCount = operands.size();
+    // This vector keeps track of whether there's an operation that writes to
+    // each operand. It is used to validate that temporary variables and
+    // model outputs will be written to.
+    std::vector<bool> writtenTo(operandCount, false);
+    for (auto& op : operations) {
+        if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM,
+                       static_cast<uint32_t>(op.type))) {
+            LOG(ERROR) << "Invalid operation type " << toString(op.type);
+            return false;
+        }
+        // TODO Validate that the number of inputs and outputs, and their types, is correct
+        // for the operation. This is currently done in CpuExecutor but should be done
+        // here for all drivers.
+        for (uint32_t i : op.inputs) {
+            if (i >= operandCount) {
+                LOG(ERROR) << "Operation input index out of range " << i << "/" << operandCount;
+                return false;
+            }
+        }
+        for (uint32_t i : op.outputs) {
+            if (i >= operandCount) {
+                LOG(ERROR) << "Operation output index out of range " << i << "/" << operandCount;
+                return false;
+            }
+            const Operand& operand = operands[i];
+            if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE &&
+                operand.lifetime != OperandLifeTime::MODEL_OUTPUT) {
+                LOG(ERROR) << "Writing to an operand with incompatible lifetime "
+                           << toString(operand.lifetime);
+                return false;
+            }
+
+            // Check that we only write once to an operand.
+            if (writtenTo[i]) {
+                LOG(ERROR) << "Operand " << i << " written a second time";
+                return false;
+            }
+            writtenTo[i] = true;
+        }
+    }
+    for (size_t i = 0; i < operandCount; i++) {
+        if (!writtenTo[i]) {
+            const Operand& operand = operands[i];
+            if (operand.lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
+                operand.lifetime == OperandLifeTime::MODEL_OUTPUT) {
+                LOG(ERROR) << "Operand " << i << " with lifetime " << toString(operand.lifetime)
+                           << " is not being written to.";
+                return false;
+            }
+        }
+    }
+    // TODO More whole graph verifications are possible, for example that an
+    // operand is not use as input & output for the same op, and more
+    // generally that it is acyclic.
+    return true;
+}
+
+static bool validatePools(const hidl_vec<hidl_memory>& pools) {
+    for (const hidl_memory& memory : pools) {
+        const auto name = memory.name();
+        if (name != "ashmem" && name != "mmap_fd") {
+            LOG(ERROR) << "Unsupported memory type " << name;
+            return false;
+        }
+        if (memory.handle() == nullptr) {
+            LOG(ERROR) << "Memory of type " << name << " is null";
+            return false;
+        }
+    }
+    return true;
+}
+
+static bool validateModelInputOutputs(const hidl_vec<uint32_t> indexes,
+                                      const hidl_vec<Operand>& operands, OperandLifeTime lifetime) {
+    const size_t operandCount = operands.size();
+    for (uint32_t i : indexes) {
+        if (i >= operandCount) {
+            LOG(ERROR) << "Model input or output index out of range " << i << "/" << operandCount;
+            return false;
+        }
+        const Operand& operand = operands[i];
+        if (operand.lifetime != lifetime) {
+            LOG(ERROR) << "Model input or output has lifetime of " << toString(operand.lifetime)
+                       << " instead of the expected " << toString(lifetime);
+            return false;
+        }
+    }
+    return true;
+}
+
+bool validateModel(const Model& model) {
+    return (validateOperands(model.operands, model.operandValues, model.pools) &&
+            validateOperations(model.operations, model.operands) &&
+            validateModelInputOutputs(model.inputIndexes, model.operands,
+                                      OperandLifeTime::MODEL_INPUT) &&
+            validateModelInputOutputs(model.outputIndexes, model.operands,
+                                      OperandLifeTime::MODEL_OUTPUT) &&
+            validatePools(model.pools));
+}
+
+// Validates the arguments of a request. type is either "input" or "output" and is used
+// for printing error messages. The operandIndexes is the appropriate array of input
+// or output operand indexes that was passed to the ANeuralNetworksModel_identifyInputsAndOutputs.
+static bool validateRequestArguments(const hidl_vec<RequestArgument>& requestArguments,
+                                     const hidl_vec<uint32_t>& operandIndexes,
+                                     const hidl_vec<Operand>& operands,
+                                     const hidl_vec<hidl_memory>& pools, const char* type) {
+    MemoryAccessVerifier poolVerifier(pools);
+    // The request should specify as many arguments as were described in the model.
+    const size_t requestArgumentCount = requestArguments.size();
+    if (requestArgumentCount != operandIndexes.size()) {
+        LOG(ERROR) << "Request specifies " << requestArgumentCount << " " << type
+                   << "s but the model has " << operandIndexes.size();
+        return false;
+    }
+    for (size_t requestArgumentIndex = 0; requestArgumentIndex < requestArgumentCount;
+         requestArgumentIndex++) {
+        const RequestArgument& requestArgument = requestArguments[requestArgumentIndex];
+        const DataLocation& location = requestArgument.location;
+        // Get the operand index for this argument. We extract it from the list
+        // that was provided in the call to ANeuralNetworksModel_identifyInputsAndOutputs.
+        // We assume in this function that the model has been validated already.
+        const uint32_t operandIndex = operandIndexes[requestArgumentIndex];
+        const Operand& operand = operands[operandIndex];
+        if (requestArgument.hasNoValue) {
+            if (location.poolIndex != 0 || location.offset != 0 || location.length != 0 ||
+                requestArgument.dimensions.size() != 0) {
+                LOG(ERROR) << "Request " << type << " " << requestArgumentIndex
+                           << " has no value yet has details.";
+                return false;
+            }
+        } else {
+            // Validate the location.
+            if (!poolVerifier.validate(location)) {
+                return false;
+            }
+            // If the argument specified a dimension, validate it.
+            uint32_t rank = requestArgument.dimensions.size();
+            if (rank == 0) {
+                // Validate that all the dimensions are specified in the model.
+                for (size_t i = 0; i < operand.dimensions.size(); i++) {
+                    if (operand.dimensions[i] == 0) {
+                        LOG(ERROR) << "Model has dimension " << i
+                                   << " set to 0 but the request does specify the dimension.";
+                        return false;
+                    }
+                }
+            } else {
+                if (rank != operand.dimensions.size()) {
+                    LOG(ERROR) << "Request " << type << " " << requestArgumentIndex
+                               << " has number of dimensions (" << rank
+                               << ") different than the model's (" << operand.dimensions.size()
+                               << ")";
+                    return false;
+                }
+                for (size_t i = 0; i < rank; i++) {
+                    if (requestArgument.dimensions[i] != operand.dimensions[i] &&
+                        operand.dimensions[i] != 0) {
+                        LOG(ERROR) << "Request " << type << " " << requestArgumentIndex
+                                   << " has dimension " << i << " of "
+                                   << requestArgument.dimensions[i]
+                                   << " different than the model's " << operand.dimensions[i];
+                        return false;
+                    }
+                    if (requestArgument.dimensions[i] == 0) {
+                        LOG(ERROR) << "Request " << type << " " << requestArgumentIndex
+                                   << " has dimension " << i << " of zero";
+                        return false;
+                    }
+                }
+            }
+        }
+    }
+    return true;
+}
+
+bool validateRequest(const Request& request, const Model& model) {
+    return (validateRequestArguments(request.inputs, model.inputIndexes, model.operands,
+                                     request.pools, "input") &&
+            validateRequestArguments(request.outputs, model.outputIndexes, model.operands,
+                                     request.pools, "output") &&
+            validatePools(request.pools));
+}
+
+}  // namespace nn
+}  // namespace android
diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h
index 3eebf26..640184e 100644
--- a/nn/common/include/Utils.h
+++ b/nn/common/include/Utils.h
@@ -89,9 +89,12 @@
     return sizeOfData(operand.type, operand.dimensions);
 }
 
-// Returns the name of the operation in ASCII.
+// Returns the name of the operation type in ASCII.
 const char* getOperationName(OperationType opCode);
 
+// Returns the name of the operand type in ASCII.
+const char* getOperandTypeName(OperandType type);
+
 // Memory is unmapped.
 // Memory is reference counted by hidl_memory instances, and is deallocated
 // once there are no more references.
@@ -143,8 +146,6 @@
 int validateOperandType(const ANeuralNetworksOperandType& type, const char* tag, bool allowPartial);
 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
                         const char* tag);
-bool validateModel(const Model& model);
-bool validateRequest(const Request& request, const Model& model);
 
 inline size_t getSizeFromInts(int lower, int higher) {
     return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
diff --git a/nn/common/include/ValidateHal.h b/nn/common/include/ValidateHal.h
new file mode 100644
index 0000000..3f1a875
--- /dev/null
+++ b/nn/common/include/ValidateHal.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ML_NN_COMMON_VALIDATEHAL_H
+#define ANDROID_ML_NN_COMMON_VALIDATEHAL_H
+
+#include "HalInterfaces.h"
+
+namespace android {
+namespace nn {
+
+// Verifies that the model is valid, i.e. it is consistent, takes
+// only acceptable values, the constants don't extend outside the memory
+// regions they are part of, etc.
+// IMPORTANT: This function cannot validate that OEM operation and operands
+// are correctly defined, as these are specific to each implementation.
+// Each driver should do their own validation of OEM types.
+bool validateModel(const Model& model);
+
+// Verfies that the request for the given model is valid.
+// IMPORTANT: This function cannot validate that OEM operation and operands
+// are correctly defined, as these are specific to each implementation.
+// Each driver should do their own validation of OEM types.
+bool validateRequest(const Request& request, const Model& model);
+
+}  // namespace nn
+}  // namespace android
+
+#endif  // ANDROID_ML_NN_COMMON_VALIDATEHAL_H
diff --git a/nn/driver/sample/SampleDriver.cpp b/nn/driver/sample/SampleDriver.cpp
index faeecae..664607f 100644
--- a/nn/driver/sample/SampleDriver.cpp
+++ b/nn/driver/sample/SampleDriver.cpp
@@ -20,6 +20,7 @@
 
 #include "CpuExecutor.h"
 #include "HalInterfaces.h"
+#include "ValidateHal.h"
 
 #include <android-base/logging.h>
 #include <hidl/LegacySupport.h>
diff --git a/nn/driver/sample/SampleDriverAll.cpp b/nn/driver/sample/SampleDriverAll.cpp
index 5ddaaec..ee8cc02 100644
--- a/nn/driver/sample/SampleDriverAll.cpp
+++ b/nn/driver/sample/SampleDriverAll.cpp
@@ -20,6 +20,7 @@
 
 #include "HalInterfaces.h"
 #include "Utils.h"
+#include "ValidateHal.h"
 
 #include <android-base/logging.h>
 #include <hidl/LegacySupport.h>
diff --git a/nn/driver/sample/SampleDriverFloatFast.cpp b/nn/driver/sample/SampleDriverFloatFast.cpp
index cf41629..3aec4db 100644
--- a/nn/driver/sample/SampleDriverFloatFast.cpp
+++ b/nn/driver/sample/SampleDriverFloatFast.cpp
@@ -20,6 +20,7 @@
 
 #include "HalInterfaces.h"
 #include "Utils.h"
+#include "ValidateHal.h"
 
 #include <android-base/logging.h>
 #include <hidl/LegacySupport.h>
diff --git a/nn/driver/sample/SampleDriverFloatSlow.cpp b/nn/driver/sample/SampleDriverFloatSlow.cpp
index 87ed399..074f8ca 100644
--- a/nn/driver/sample/SampleDriverFloatSlow.cpp
+++ b/nn/driver/sample/SampleDriverFloatSlow.cpp
@@ -20,6 +20,7 @@
 
 #include "HalInterfaces.h"
 #include "Utils.h"
+#include "ValidateHal.h"
 
 #include <android-base/logging.h>
 #include <hidl/LegacySupport.h>
diff --git a/nn/driver/sample/SampleDriverMinimal.cpp b/nn/driver/sample/SampleDriverMinimal.cpp
index 0b65000..44bf533 100644
--- a/nn/driver/sample/SampleDriverMinimal.cpp
+++ b/nn/driver/sample/SampleDriverMinimal.cpp
@@ -21,6 +21,7 @@
 #include "HalInterfaces.h"
 #include "NeuralNetworksOEM.h"
 #include "Utils.h"
+#include "ValidateHal.h"
 
 #include <android-base/logging.h>
 #include <hidl/LegacySupport.h>
diff --git a/nn/driver/sample/SampleDriverQuant.cpp b/nn/driver/sample/SampleDriverQuant.cpp
index 25b07d2..cf2eb9c 100644
--- a/nn/driver/sample/SampleDriverQuant.cpp
+++ b/nn/driver/sample/SampleDriverQuant.cpp
@@ -20,6 +20,7 @@
 
 #include "HalInterfaces.h"
 #include "Utils.h"
+#include "ValidateHal.h"
 
 #include <android-base/logging.h>
 #include <hidl/LegacySupport.h>
diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp
index cb19390..de0785f 100644
--- a/nn/runtime/test/TestPartitioning.cpp
+++ b/nn/runtime/test/TestPartitioning.cpp
@@ -21,6 +21,7 @@
 #include "NeuralNetworks.h"
 #include "NeuralNetworksWrapper.h"
 #include "Utils.h"
+#include "ValidateHal.h"
 
 #include <gtest/gtest.h>
 
diff --git a/nn/runtime/test/TestTrivialModel.cpp b/nn/runtime/test/TestTrivialModel.cpp
index 1eda104..680fe0e 100644
--- a/nn/runtime/test/TestTrivialModel.cpp
+++ b/nn/runtime/test/TestTrivialModel.cpp
@@ -158,7 +158,7 @@
     Model modelBroadcastAdd2;
     // activation: NONE.
     int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE};
-    OperandType scalarType(Type::INT32, {1});
+    OperandType scalarType(Type::INT32, {});
     auto activation = modelBroadcastAdd2.addOperand(&scalarType);
     modelBroadcastAdd2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
 
@@ -190,7 +190,7 @@
     Model modelBroadcastMul2;
     // activation: NONE.
     int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE};
-    OperandType scalarType(Type::INT32, {1});
+    OperandType scalarType(Type::INT32, {});
     auto activation = modelBroadcastMul2.addOperand(&scalarType);
     modelBroadcastMul2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
 
diff --git a/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp b/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
index f855432..dcabb12 100644
--- a/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/models/depth_to_space_quant8_2.model.cpp
@@ -1,8 +1,8 @@
 // Generated file (from: depth_to_space_quant8_2.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4});
-  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 0);
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 0);
   // Phase 1, operands
   auto input = model->addOperand(&type0);
   auto radius = model->addOperand(&type1);
diff --git a/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp b/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
index 7bd289d..4e91914 100644
--- a/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/models/space_to_depth_quant8_2.model.cpp
@@ -1,8 +1,8 @@
 // Generated file (from: space_to_depth_quant8_2.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4});
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 0);
   // Phase 1, operands
   auto input = model->addOperand(&type0);
   auto radius = model->addOperand(&type1);
diff --git a/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp b/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp
index 3af6f3d..404c79b 100644
--- a/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depth_to_space_quant8_2.model.cpp
@@ -6,7 +6,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 2, 2, 4},
             .numberOfConsumers = 1,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24,7 +24,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 4, 4, 1},
             .numberOfConsumers = 0,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
diff --git a/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp b/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp
index 6205750..1d7dc58 100644
--- a/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/space_to_depth_quant8_2.model.cpp
@@ -6,7 +6,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 4, 4, 1},
             .numberOfConsumers = 1,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24,7 +24,7 @@
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 2, 2, 4},
             .numberOfConsumers = 0,
-            .scale = 0.0f,
+            .scale = 0.5f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
diff --git a/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py b/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py
index 6306918..95ea042 100644
--- a/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py
+++ b/nn/runtime/test/specs/depth_to_space_quant8_2.mod.py
@@ -1,7 +1,7 @@
 model = Model()
-i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}")
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}, 0.5f, 0")
 block = Int32Scalar("radius", 2)
-output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}")
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 0.5f, 0")
 
 model = model.Operation("DEPTH_TO_SPACE", i1, block).To(output)
 
diff --git a/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py b/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py
index 13fb8e5..b8f0d5f 100644
--- a/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py
+++ b/nn/runtime/test/specs/space_to_depth_quant8_2.mod.py
@@ -1,7 +1,7 @@
 model = Model()
-i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}")
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 0.5f, 0")
 block = Int32Scalar("radius", 2)
-output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}")
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}, 0.5f, 0")
 
 model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)