Allow fine grain verbose logging control through system property.

  - Existing DEBUG and INFO change to the new mechanism, existing ERROR and
    WARNING unchanged.
  - By default, all verbose logging are disabled.
  - setprop debug.nn.vlog 1 enables all verbose logging.
  - setprop debug.nn.vlog "tag1 tag2 ..." only enable the selected tags.
  - Available tags:
      all: same as 1.
      model: enable logging with tag MODEL.
      compilation: enable logging with tag COMPILATION.
      execution: enable logging with tag EXECUTION.
      cpuexe: enable logging with tag CPUEXE.
      manager: enable logging with tag MANAGER.
      driver: enable logging with tag DRIVER.

Bug: 63905942
Test: mm
Test: NeuralNetworksTests pass
Test: manually set property to allow individual module to logging
      verbosely
Test: tested with debug.nn.partition default -> 0 -> 1 to make
      sure getProp still works fine

Change-Id: Iaa0ffa62176dabcdc35058748597df720fd6e47e
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index 9b03d69..b8988b5 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -100,9 +100,9 @@
 // by the caller.
 int CpuExecutor::run(const Model& model, const Request& request,
                      const std::vector<RunTimePoolInfo>& runTimePoolInfos) {
-    LOG(DEBUG) << "CpuExecutor::run()";
-    LOG(DEBUG) << "model: " << toString(model);
-    LOG(DEBUG) << "request: " << toString(request);
+    VLOG(CPUEXE) << "CpuExecutor::run()";
+    // VLOG(CPUEXE) << "model: " << toString(model);
+    VLOG(CPUEXE) << "request: " << toString(request);
 
     mModel = &model;
     mRequest = &request; // TODO check if mRequest is needed
@@ -119,12 +119,12 @@
     }
     mModel = nullptr;
     mRequest = nullptr;
-    LOG(DEBUG) << "Completed run normally";
+    VLOG(CPUEXE) << "Completed run normally";
     return ANEURALNETWORKS_NO_ERROR;
 }
 
 bool CpuExecutor::initializeRunTimeInfo(const std::vector<RunTimePoolInfo>& runTimePoolInfos) {
-    LOG(DEBUG) << "CpuExecutor::initializeRunTimeInfo";
+    VLOG(CPUEXE) << "CpuExecutor::initializeRunTimeInfo";
     const size_t count = mModel->operands.size();
     mOperands.resize(count);
 
@@ -218,7 +218,7 @@
 }
 
 int CpuExecutor::executeOperation(const Operation& operation) {
-    LOG(DEBUG) << "CpuExecutor::executeOperation(" << toString(operation) << ")";
+    // VLOG(CPUEXE) << "CpuExecutor::executeOperation(" << toString(operation) << ")";
     const hidl_vec<uint32_t>& ins = operation.inputs;
     const hidl_vec<uint32_t>& outs = operation.outputs;
     bool success = false;
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index c0c9ccf..f73b12c 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -20,13 +20,64 @@
 #include "NeuralNetworks.h"
 
 #include <android-base/logging.h>
+#include <android-base/properties.h>
+#include <android-base/strings.h>
 #include <sys/system_properties.h>
+#include <unordered_map>
 
 using ::android::hidl::allocator::V1_0::IAllocator;
 
 namespace android {
 namespace nn {
 
+const char kVLogPropKey[] = "debug.nn.vlog";
+int vLogMask = ~0;
+
+// Split the space separated list of tags from verbose log setting and build the
+// logging mask from it. note that '1' and 'all' are special cases to enable all
+// verbose logging.
+//
+// NN API verbose logging setting comes from system property debug.nn.vlog.
+// Example:
+// setprop debug.nn.vlog 1 : enable all logging tags.
+// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
+//                                             COMPILATION tags.
+void initVLogMask() {
+    vLogMask = 0;
+    const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
+    if (vLogSetting.empty()) {
+        return;
+    }
+
+    std::unordered_map<std::string, int> vLogFlags = {
+        {"1", -1},
+        {"all", -1},
+        {"model", MODEL},
+        {"compilation", COMPILATION},
+        {"execution", EXECUTION},
+        {"cpuexe", CPUEXE},
+        {"manager", MANAGER},
+        {"driver", DRIVER}};
+
+    std::vector<std::string> elements = android::base::Split(vLogSetting, " ");
+    for (const auto& elem : elements) {
+        const auto& flag = vLogFlags.find(elem);
+        if (flag == vLogFlags.end()) {
+            LOG(ERROR) << "Unknown trace flag: " << elem;
+            continue;
+        }
+
+        if (flag->second == -1) {
+            // -1 is used for the special values "1" and "all" that enable all
+            // tracing.
+            vLogMask = ~0;
+            return;
+        } else {
+            vLogMask |= 1 << flag->second;
+        }
+    }
+}
+
 #define COUNT(X) (sizeof(X) / sizeof(X[0]))
 
 const char* kTypeNames[kNumberOfDataTypes] = {
@@ -384,31 +435,14 @@
 }
 
 #ifdef NN_DEBUGGABLE
-
-// Implementation of property_get from libcutils
-static int property_get(const char* key, char* value, const char* default_value) {
-    int len;
-    len = __system_property_get(key, value);
-    if (len > 0) {
-        return len;
-    }
-
-    if (default_value) {
-        len = strlen(default_value);
-        memcpy(value, default_value, len + 1);
-    }
-    return len;
-}
-
 uint32_t getProp(const char* str, uint32_t defaultValue) {
-    char buf[256];
-    if (property_get(str, buf, nullptr) > 0) {
-        return atoi(buf);
+    const std::string propStr = android::base::GetProperty(str, "");
+    if (propStr.size() > 0) {
+        return std::stoi(propStr);
     } else {
         return defaultValue;
     }
 }
-
 #endif  // NN_DEBUGGABLE
 
 } // namespace nn
diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h
index aeb664d..da03559 100644
--- a/nn/common/include/Utils.h
+++ b/nn/common/include/Utils.h
@@ -44,7 +44,30 @@
 // The lowest number assigned to any OEM Code in NeuralNetworksOEM.h.
 const int kOEMCodeBase = 10000;
 
-// TODO Remove all the LOG(DEBUG) statements in all the files.
+/* IMPORTANT: if you change the following list, don't
+ * forget to update the corresponding 'tags' table in
+ * the initVlogMask() function implemented in Utils.cpp.
+ */
+enum VLogFlags {
+    MODEL = 0,
+    COMPILATION,
+    EXECUTION,
+    CPUEXE,
+    MANAGER,
+    DRIVER
+};
+
+#define VLOG_IS_ON(TAG) \
+    ((vLogMask & (1 << (TAG))) != 0)
+
+#define VLOG(TAG)         \
+    if (LIKELY(!VLOG_IS_ON(TAG))) \
+        ;                 \
+    else                  \
+        LOG(INFO)
+
+extern int vLogMask;
+void initVLogMask();
 
 // Assert macro, as Android does not generally support assert.
 #define nnAssert(v)                                                                            \
diff --git a/nn/common/operations/EmbeddingLookup.cpp b/nn/common/operations/EmbeddingLookup.cpp
index 624a1b4..2ca72af 100644
--- a/nn/common/operations/EmbeddingLookup.cpp
+++ b/nn/common/operations/EmbeddingLookup.cpp
@@ -38,7 +38,6 @@
 
   for (uint32_t i = 0; i < lookup_->shape().dimensions[0]; i++) {
     int idx = static_cast<int>((reinterpret_cast<float*>(lookup_->buffer))[i]);
-    LOG(INFO) << "idx=" << idx;
     if (idx >= row_size || idx < 0) {
       LOG(ERROR) << "Embedding Lookup: index out of bounds.";
       return false;
diff --git a/nn/driver/sample/SampleDriver.cpp b/nn/driver/sample/SampleDriver.cpp
index 08f3a18..902d4e8 100644
--- a/nn/driver/sample/SampleDriver.cpp
+++ b/nn/driver/sample/SampleDriver.cpp
@@ -31,7 +31,7 @@
 
 Return<ErrorStatus> SampleDriver::prepareModel(const Model& model,
                                                const sp<IPreparedModelCallback>& callback) {
-    LOG(DEBUG) << "prepareModel(" << toString(model) << ")"; // TODO errror
+    VLOG(DRIVER) << "prepareModel(" << toString(model) << ")"; // TODO errror
     if (callback.get() == nullptr) {
         LOG(ERROR) << "invalid callback passed to prepareModel";
         return ErrorStatus::INVALID_ARGUMENT;
@@ -49,7 +49,7 @@
 }
 
 Return<DeviceStatus> SampleDriver::getStatus() {
-    LOG(DEBUG) << "getStatus()";
+    VLOG(DRIVER) << "getStatus()";
     return DeviceStatus::AVAILABLE;
 }
 
@@ -85,7 +85,7 @@
 
     CpuExecutor executor;
     int n = executor.run(mModel, request, poolInfo);
-    LOG(DEBUG) << "executor.run returned " << n;
+    VLOG(DRIVER) << "executor.run returned " << n;
     ErrorStatus executionStatus =
             n == ANEURALNETWORKS_NO_ERROR ? ErrorStatus::NONE : ErrorStatus::GENERAL_FAILURE;
     Return<void> returned = callback->notify(executionStatus);
@@ -96,7 +96,7 @@
 
 Return<ErrorStatus> SamplePreparedModel::execute(const Request& request,
                                                  const sp<IExecutionCallback>& callback) {
-    LOG(DEBUG) << "execute(" << toString(request) << ")";
+    VLOG(DRIVER) << "execute(" << toString(request) << ")";
     if (callback.get() == nullptr) {
         LOG(ERROR) << "invalid callback passed to execute";
         return ErrorStatus::INVALID_ARGUMENT;
diff --git a/nn/driver/sample/SampleDriverAll.cpp b/nn/driver/sample/SampleDriverAll.cpp
index c35e609..3e29481 100644
--- a/nn/driver/sample/SampleDriverAll.cpp
+++ b/nn/driver/sample/SampleDriverAll.cpp
@@ -37,7 +37,7 @@
 };
 
 Return<void> SampleDriverAll::getCapabilities(getCapabilities_cb cb) {
-    LOG(DEBUG) << "getCapabilities()";
+    VLOG(DRIVER) << "getCapabilities()";
     Capabilities capabilities = {.float32Performance = {.execTime = 1.1f, .powerUsage = 1.1f},
                                  .quantized8Performance = {.execTime = 1.1f, .powerUsage = 1.1f}};
     cb(ErrorStatus::NONE, capabilities);
@@ -46,7 +46,7 @@
 
 Return<void> SampleDriverAll::getSupportedOperations(const Model& model,
                                                      getSupportedOperations_cb cb) {
-    LOG(DEBUG) << "getSupportedOperations()";
+    VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count, true);
@@ -66,7 +66,7 @@
 using android::sp;
 
 int main() {
-    SetMinimumLogSeverity(android::base::VERBOSE);
+    android::nn::initVLogMask();
     sp<SampleDriverAll> driver(new SampleDriverAll());
     return driver->run();
 }
diff --git a/nn/driver/sample/SampleDriverFloatFast.cpp b/nn/driver/sample/SampleDriverFloatFast.cpp
index 3b9487a..4a2b749 100644
--- a/nn/driver/sample/SampleDriverFloatFast.cpp
+++ b/nn/driver/sample/SampleDriverFloatFast.cpp
@@ -37,7 +37,7 @@
 };
 
 Return<void> SampleDriverFloatFast::getCapabilities(getCapabilities_cb cb) {
-    LOG(DEBUG) << "getCapabilities()";
+    VLOG(DRIVER) << "getCapabilities()";
     Capabilities capabilities = {.float32Performance = {.execTime = 0.8f, .powerUsage = 1.2f},
                                  .quantized8Performance = {.execTime = 1.0f, .powerUsage = 1.0f}};
     cb(ErrorStatus::NONE, capabilities);
@@ -46,7 +46,7 @@
 
 Return<void> SampleDriverFloatFast::getSupportedOperations(const Model& model,
                                                            getSupportedOperations_cb cb) {
-    LOG(DEBUG) << "getSupportedOperations()";
+    VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count);
@@ -73,7 +73,7 @@
 using android::sp;
 
 int main() {
-    SetMinimumLogSeverity(android::base::VERBOSE);
+    android::nn::initVLogMask();
     sp<SampleDriverFloatFast> driver(new SampleDriverFloatFast());
     return driver->run();
 }
diff --git a/nn/driver/sample/SampleDriverFloatSlow.cpp b/nn/driver/sample/SampleDriverFloatSlow.cpp
index 26d2255..f472b9b 100644
--- a/nn/driver/sample/SampleDriverFloatSlow.cpp
+++ b/nn/driver/sample/SampleDriverFloatSlow.cpp
@@ -37,7 +37,7 @@
 };
 
 Return<void> SampleDriverFloatSlow::getCapabilities(getCapabilities_cb cb) {
-    LOG(DEBUG) << "getCapabilities()";
+    VLOG(DRIVER) << "getCapabilities()";
     Capabilities capabilities = {.float32Performance = {.execTime = 1.3f, .powerUsage = 0.7f},
                                  .quantized8Performance = {.execTime = 1.0f, .powerUsage = 1.0f}};
     cb(ErrorStatus::NONE, capabilities);
@@ -46,7 +46,7 @@
 
 Return<void> SampleDriverFloatSlow::getSupportedOperations(const Model& model,
                                                            getSupportedOperations_cb cb) {
-    LOG(DEBUG) << "getSupportedOperations()";
+    VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count);
@@ -73,7 +73,7 @@
 using android::sp;
 
 int main() {
-    SetMinimumLogSeverity(android::base::VERBOSE);
+    android::nn::initVLogMask();
     sp<SampleDriverFloatSlow> driver(new SampleDriverFloatSlow());
     return driver->run();
 }
diff --git a/nn/driver/sample/SampleDriverMinimal.cpp b/nn/driver/sample/SampleDriverMinimal.cpp
index fed0210..919ac1c 100644
--- a/nn/driver/sample/SampleDriverMinimal.cpp
+++ b/nn/driver/sample/SampleDriverMinimal.cpp
@@ -38,7 +38,7 @@
 };
 
 Return<void> SampleDriverMinimal::getCapabilities(getCapabilities_cb cb) {
-    LOG(DEBUG) << "getCapabilities()";
+    VLOG(DRIVER) << "getCapabilities()";
     Capabilities capabilities = {.float32Performance = {.execTime = 0.4f, .powerUsage = 0.5f},
                                  .quantized8Performance = {.execTime = 1.0f, .powerUsage = 1.0f}};
     cb(ErrorStatus::NONE, capabilities);
@@ -47,7 +47,7 @@
 
 Return<void> SampleDriverMinimal::getSupportedOperations(const Model& model,
                                                          getSupportedOperations_cb cb) {
-    LOG(DEBUG) << "getSupportedOperations()";
+    VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count);
@@ -85,7 +85,7 @@
 using android::sp;
 
 int main() {
-    SetMinimumLogSeverity(android::base::VERBOSE);
+    android::nn::initVLogMask();
     sp<SampleDriverMinimal> driver(new SampleDriverMinimal());
     return driver->run();
 }
diff --git a/nn/driver/sample/SampleDriverQuant.cpp b/nn/driver/sample/SampleDriverQuant.cpp
index 2f8dfdc..2695416 100644
--- a/nn/driver/sample/SampleDriverQuant.cpp
+++ b/nn/driver/sample/SampleDriverQuant.cpp
@@ -37,7 +37,7 @@
 };
 
 Return<void> SampleDriverQuant::getCapabilities(getCapabilities_cb cb) {
-    LOG(DEBUG) << "getCapabilities()";
+    VLOG(DRIVER) << "getCapabilities()";
     Capabilities capabilities = {.float32Performance = {.execTime = 50.0f, .powerUsage = 1.0f},
                                  .quantized8Performance = {.execTime = 50.0f, .powerUsage = 1.0f}};
     cb(ErrorStatus::NONE, capabilities);
@@ -46,7 +46,7 @@
 
 Return<void> SampleDriverQuant::getSupportedOperations(const Model& model,
                                                        getSupportedOperations_cb cb) {
-    LOG(DEBUG) << "getSupportedOperations()";
+    VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count);
@@ -73,7 +73,7 @@
 using android::sp;
 
 int main() {
-    SetMinimumLogSeverity(android::base::VERBOSE);
+    android::nn::initVLogMask();
     sp<SampleDriverQuant> driver(new SampleDriverQuant());
     return driver->run();
 }
diff --git a/nn/runtime/CompilationBuilder.cpp b/nn/runtime/CompilationBuilder.cpp
index e51aafe..a1e1a3f 100644
--- a/nn/runtime/CompilationBuilder.cpp
+++ b/nn/runtime/CompilationBuilder.cpp
@@ -29,7 +29,7 @@
 
 CompilationBuilder::CompilationBuilder(const ModelBuilder* model) :
     mModel(model) {
-    LOG(DEBUG) << "CompilationBuilder::CompilationBuilder";
+    VLOG(COMPILATION) << "CompilationBuilder::CompilationBuilder";
 }
 
 int CompilationBuilder::finish() {
diff --git a/nn/runtime/ExecutionBuilder.cpp b/nn/runtime/ExecutionBuilder.cpp
index d43f6a3..56dc723 100644
--- a/nn/runtime/ExecutionBuilder.cpp
+++ b/nn/runtime/ExecutionBuilder.cpp
@@ -98,7 +98,7 @@
         mInputs(mModel->inputCount()),
         mOutputs(mModel->outputCount()),
         mMemories(mModel->getMemories()) {
-    LOG(DEBUG) << "ExecutionBuilder::ExecutionBuilder";
+    VLOG(EXECUTION) << "ExecutionBuilder::ExecutionBuilder";
 }
 
 int ExecutionBuilder::setInput(uint32_t index, const ANeuralNetworksOperandType* type,
@@ -186,7 +186,7 @@
 // Ensure that executionCallback->notify() is called.
 static void cpuFallbackFull(const ExecutionBuilder* executionBuilder,
                             const sp<ExecutionCallback>& executionCallback) {
-    LOG(DEBUG) << "cpuFallbackFull";
+    VLOG(EXECUTION) << "cpuFallbackFull";
     StepExecutor executor(executionBuilder, executionBuilder->getModel(),
                           nullptr /* no IDevice, so CPU */,
                           nullptr /* no IPreparedModel */);
@@ -210,7 +210,7 @@
                                const ExecutionPlan* plan,
                                std::shared_ptr<ExecutionPlan::Controller> controller,
                                const sp<ExecutionCallback>& executionCallback) {
-    LOG(DEBUG) << "cpuFallbackPartial";
+    VLOG(EXECUTION) << "cpuFallbackPartial";
     std::shared_ptr<StepExecutor> executor;
     int n = plan->fallback(controller, &executor);
     if (n != ANEURALNETWORKS_NO_ERROR || executor->isCpu()) {
@@ -235,10 +235,10 @@
                                          std::shared_ptr<ExecutionPlan::Controller> controller,
                                          bool allowFallback,
                                          const sp<ExecutionCallback>& executionCallback) {
-    LOG(DEBUG) << "ExecutionBuilder::startCompute (from plan, iteratively)";
+    VLOG(EXECUTION) << "ExecutionBuilder::startCompute (from plan, iteratively)";
     while (true) {
         std::shared_ptr<StepExecutor> executor;
-        LOG(DEBUG) << "looking for next StepExecutor";
+        VLOG(EXECUTION) << "looking for next StepExecutor";
         int n = plan->next(controller, &executor);
         if (n != ANEURALNETWORKS_NO_ERROR) {
             if (allowFallback) {
@@ -358,10 +358,10 @@
         const std::vector<std::shared_ptr<Device>>& devices = DeviceManager::get()->getDrivers();
         for (const auto& device : devices) {
             hidl_vec<bool> supports;
-            LOG(DEBUG) << "Checking " << device->getName();
+            VLOG(EXECUTION) << "Checking " << device->getName();
             device->getSupportedOperations(hidlModel, &supports);
             if (std::find(supports.begin(), supports.end(), false) == supports.end()) {
-                LOG(DEBUG) << "ExecutionBuilder::startCompute (without plan) on " << device->getName();
+                VLOG(EXECUTION) << "ExecutionBuilder::startCompute (without plan) on " << device->getName();
                 StepExecutor executor(this, mModel, device->getInterface(),
                                       nullptr /* no IPreparedModel, so compile */);
                 executor.mapInputsAndOutputsTrivially();
@@ -372,7 +372,7 @@
 #endif  // DISABLE_PARTITIONED_EXECUTION
 
     // Run on the CPU.
-    LOG(DEBUG) << "ExecutionBuilder::startCompute (without plan) on CPU";
+    VLOG(EXECUTION) << "ExecutionBuilder::startCompute (without plan) on CPU";
     StepExecutor executor(this, mModel,
                           nullptr /* no IDevice, so CPU */,
                           nullptr /* no IPreparedModel */);
@@ -556,7 +556,7 @@
     // in the design document.
     sp<ExecutionCallback> executionCallback = new ExecutionCallback();
 
-    LOG(DEBUG) << "Before mPreparedModel->execute() " << toString(request);
+    VLOG(EXECUTION) << "Before mPreparedModel->execute() " << toString(request);
     // Execute.
     // TODO: What happens to the Callback if the service dies abnormally
     // -- won't that keep the Callback live forever, because the service
@@ -565,7 +565,7 @@
     // it seems like this is a small memory leak, if the Callback stays
     // alive forever.
     if (mPreparedModel->execute(request, executionCallback) != ErrorStatus::NONE) {
-        LOG(DEBUG) << "**Execute failed**";
+        VLOG(EXECUTION) << "**Execute failed**";
         return ANEURALNETWORKS_OP_FAILED;
     }
 
@@ -574,7 +574,7 @@
     executionCallback->wait();
     Return<ErrorStatus> executionStatus = executionCallback->getStatus();
     if (!executionStatus.isOk() || executionStatus != ErrorStatus::NONE) {
-        LOG(DEBUG) << "**Execute async failed**";
+        VLOG(EXECUTION) << "**Execute async failed**";
         return ANEURALNETWORKS_OP_FAILED;
     }
 
@@ -593,7 +593,7 @@
             memcpy(info.buffer, data + loc.offset, loc.length);
         }
     }
-    LOG(DEBUG) << "StepExecutor::startComputeOnDevice completed";
+    VLOG(EXECUTION) << "StepExecutor::startComputeOnDevice completed";
 
     *synchronizationCallback = executionCallback;
     return ANEURALNETWORKS_NO_ERROR;
diff --git a/nn/runtime/ExecutionPlan.cpp b/nn/runtime/ExecutionPlan.cpp
index 5318ccd..d2f74d5 100644
--- a/nn/runtime/ExecutionPlan.cpp
+++ b/nn/runtime/ExecutionPlan.cpp
@@ -282,7 +282,7 @@
 }
 
 int ExecutionStep::finishSubModel(const ModelBuilder* fromModel, bool* hasOutputOfUnknownSize) {
-    LOG(DEBUG) << "ExecutionStep::finishSubModel, step " << mIndex;
+    VLOG(COMPILATION) << "ExecutionStep::finishSubModel, step " << mIndex;
 
     auto convertModelInputsOrOutputs = [](
             // IN: mModel{Inputs|Outputs}
@@ -328,8 +328,9 @@
         for (uint32_t dimension : operand.dimensions) {
             if (dimension == 0) {
                 *hasOutputOfUnknownSize = true;
-                LOG(DEBUG) << "SubModelOutput (operand#" << subModelOutput.first << " of original graph)"
-                           << " has unknown size: " << toString(operand);
+                VLOG(COMPILATION) << "SubModelOutput (operand#" << subModelOutput.first
+                                << " of original graph) has unknown size: "
+                                << toString(operand);
                 break;
             }
         }
@@ -352,16 +353,16 @@
         return ANEURALNETWORKS_NO_ERROR;
     }
 
-    LOG(DEBUG) << "ExecutionStep::finishSubModel, compilation";
+    VLOG(COMPILATION) << "ExecutionStep::finishSubModel, compilation";
     return compile(mDevice, mSubModel.get(), &mPreparedSubModel);
 }
 
 void ExecutionStep::dump() const {
     Model model;
     mSubModel->setHidlModel(&model);
-    LOG(DEBUG) << "ExecutionStep#" << mIndex
-               << " for " << (mDevice == nullptr ? "CPU" : mDevice->getName())
-               << " submodel: " << toString(model);
+    VLOG(COMPILATION) << "ExecutionStep#" << mIndex
+                      << " for " << (mDevice == nullptr ? "CPU" : mDevice->getName())
+                      << " submodel: " << toString(model);
 }
 
 int ExecutionPlan::CompoundBody::finish(const ModelBuilder* fromModel) {
@@ -369,12 +370,12 @@
     for (const auto& step : mSteps) {
         int n = step->finishSubModel(fromModel, &mHasSubModelOutputOfUnknownSize);
         if (n != ANEURALNETWORKS_NO_ERROR) {
-            LOG(DEBUG) << "ExecutionPlan::CompoundBody::finish -- finishSubModel failed";
+            VLOG(COMPILATION) << "ExecutionPlan::CompoundBody::finish -- finishSubModel failed";
             return n;
         }
     }
     if (mHasSubModelOutputOfUnknownSize) {
-        LOG(DEBUG) << "ExecutionPlan::CompoundBody::finish -- mHasSubModelOutputOfUnknownSize";
+        VLOG(COMPILATION) << "ExecutionPlan::CompoundBody::finish -- mHasSubModelOutputOfUnknownSize";
         return ANEURALNETWORKS_OP_FAILED;
     }
 
@@ -388,7 +389,7 @@
         return ANEURALNETWORKS_NO_ERROR;
     }
 
-    LOG(DEBUG) << "ExecutionPlan::SimpleBody::finish, compilation";
+    VLOG(COMPILATION) << "ExecutionPlan::SimpleBody::finish, compilation";
     const int n = compile(mDevice, mModel, &mPreparedModel);
     mSuccessfulFinish = (n == ANEURALNETWORKS_NO_ERROR);
     return n;
@@ -418,7 +419,7 @@
     const ExecutionBuilder* executionBuilder) const {
     nnAssert((mState == EMPTY) == (mBody == nullptr));
     if (mBody && !mBody->mSuccessfulFinish) {
-        LOG(DEBUG) << "ExecutionPlan::makeController -- unsuccessful finish";
+        VLOG(EXECUTION) << "ExecutionPlan::makeController -- unsuccessful finish";
         return std::shared_ptr<Controller>(nullptr);
     }
 
@@ -474,8 +475,8 @@
                             std::shared_ptr<StepExecutor>* executor) const {
     *executor = nullptr;
 
-    LOG(DEBUG) << "ExecutionPlan::fallback(" << controller << ", " << executor
-               << "): mNextStepIndex = " << controller->mNextStepIndex;
+    VLOG(EXECUTION) << "ExecutionPlan::fallback(" << controller << ", " << executor
+                    << "): mNextStepIndex = " << controller->mNextStepIndex;
 
     if (controller->mNextStepIndex == 0) {
         // We haven't called next().
@@ -495,8 +496,8 @@
                         std::shared_ptr<StepExecutor>* executor) const {
     *executor = nullptr;
 
-    LOG(DEBUG) << "ExecutionPlan::next(" << controller << ", " << executor
-               << "): mNextStepIndex = " << controller->mNextStepIndex;
+    VLOG(EXECUTION) << "ExecutionPlan::next(" << controller << ", " << executor
+                    << "): mNextStepIndex = " << controller->mNextStepIndex;
 
     if (controller->mNextStepIndex == Controller::kBadStepIndex) {
         return ANEURALNETWORKS_OP_FAILED;
@@ -614,7 +615,7 @@
     if (mBody) {
         mBody->dump();
     } else {
-        LOG(DEBUG) << "EMPTY";
+        VLOG(COMPILATION) << "EMPTY";
     }
 }
 
@@ -644,7 +645,7 @@
 }
 
 void ExecutionPlan::SimpleBody::dump() const {
-    LOG(DEBUG) << "SIMPLE for " << (mDevice == nullptr ? "CPU" : mDevice->getName());
+    VLOG(COMPILATION) << "SIMPLE for " << (mDevice == nullptr ? "CPU" : mDevice->getName());
 }
 
 void ExecutionPlan::CompoundBody::dump() const {
@@ -664,8 +665,8 @@
     const size_t deviceCount = nonCpuDeviceCount + 1;
     const size_t operationCount = mOperations.size();
 
-    LOG(DEBUG) << "ModelBuilder::partitionTheWork: deviceCount = " << deviceCount
-               << ", operationCount = " << operationCount;
+    VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: deviceCount = " << deviceCount
+                      << ", operationCount = " << operationCount;
 
     // If we only have the CPU, or if the graph has no operations, no
     // need to try to partition.
@@ -686,11 +687,9 @@
                            std::not_equal_to<int>()) == bestDeviceForOperation.end()) {
         const int bestDeviceIndex = bestDeviceForOperation[0];
         const bool cpu = (size_t(bestDeviceIndex) == deviceCount - 1);
-        if (WOULD_LOG(DEBUG)) {
-            LOG(DEBUG) << "ModelBuilder::partitionTheWork: only one best device: "
-                       << bestDeviceIndex << " = "
-                       << (cpu ? "CPU" : devices[bestDeviceIndex]->getName());
-        }
+        VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: only one best device: "
+                          << bestDeviceIndex << " = "
+                          << (cpu ? "CPU" : devices[bestDeviceIndex]->getName());
         plan->becomeSingleStep(cpu ? nullptr : devices[bestDeviceIndex], this);
         return plan->finish(this);
     }
@@ -704,7 +703,8 @@
     auto enqueueOnAppropriateDevice = [&](uint32_t operationIndex) {
         int deviceIndex = bestDeviceForOperation[operationIndex];
         perDeviceQueue[deviceIndex].push(operationIndex);
-        LOG(DEBUG) << "enqueueOnAppropriateDevice " << operationIndex << " onto " << deviceIndex;
+        VLOG(COMPILATION) << "enqueueOnAppropriateDevice " << operationIndex << " onto "
+                          << deviceIndex;
     };
 
     // This helper function finds a device that has operations ready to process.
@@ -726,7 +726,7 @@
     while (true) {
         // Find the device we'll do this step for.
         int deviceIndex = findNextDeviceToProcess();
-        LOG(DEBUG) << "findNextDeviceToProcess: " << deviceIndex;
+        VLOG(COMPILATION) << "findNextDeviceToProcess: " << deviceIndex;
         if (deviceIndex < 0) {
             break;
         }
@@ -747,10 +747,11 @@
     }
 
     int n = plan->finish(this);
-    if (WOULD_LOG(DEBUG)) {
+    if (VLOG_IS_ON(COMPILATION)) {
         Model model;
         setHidlModel(&model);
-        LOG(DEBUG) << "ModelBuilder::partitionTheWork: original model: " << toString(model);
+        VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: original model: "
+                          << toString(model);
         plan->dump();
     }
     return n;
@@ -846,10 +847,10 @@
         // TODO What if it is an OEM op?
         (*bestDeviceForOperation)[operationIndex] =
                 bestChoice >= 0 ? bestChoice : static_cast<int>(nonCpuDeviceCount);
-        LOG(VERBOSE) << "ModelBuilder::findBestDeviceForEachOperation("
-                     << toString(getOperation(operationIndex).type)
-                     << ") = "
-                     << (*bestDeviceForOperation)[operationIndex];
+        VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation("
+                          << toString(getOperation(operationIndex).type)
+                          << ") = "
+                          << (*bestDeviceForOperation)[operationIndex];
     }
     return ANEURALNETWORKS_NO_ERROR;
 }
diff --git a/nn/runtime/Manager.cpp b/nn/runtime/Manager.cpp
index 42e0eed..25ccb21 100644
--- a/nn/runtime/Manager.cpp
+++ b/nn/runtime/Manager.cpp
@@ -44,8 +44,8 @@
         if (status != ErrorStatus::NONE) {
             LOG(ERROR) << "IDevice::getCapabilities returned the error " << toString(status);
         }
-        LOG(DEBUG) << "Capab " << capabilities.float32Performance.execTime;
-        LOG(DEBUG) << "Capab " << capabilities.quantized8Performance.execTime;
+        VLOG(MANAGER) << "Capab " << capabilities.float32Performance.execTime;
+        VLOG(MANAGER) << "Capab " << capabilities.quantized8Performance.execTime;
         mFloat32Performance = capabilities.float32Performance;
         mQuantized8Performance = capabilities.quantized8Performance;
     });
@@ -118,7 +118,7 @@
 void DeviceManager::findAvailableDevices() {
     using ::android::hardware::neuralnetworks::V1_0::IDevice;
     using ::android::hidl::manager::V1_0::IServiceManager;
-    LOG(DEBUG) << "findAvailableDevices";
+    VLOG(MANAGER) << "findAvailableDevices";
 
     sp<IServiceManager> manager = hardware::defaultServiceManager();
     if (manager == nullptr) {
@@ -128,7 +128,7 @@
 
     manager->listByInterface(IDevice::descriptor, [this](const hidl_vec<hidl_string>& names) {
         for (const auto& name : names) {
-            LOG(DEBUG) << "Found interface " << name.c_str();
+            VLOG(MANAGER) << "Found interface " << name.c_str();
             sp<IDevice> device = IDevice::getService(name);
             if (device == nullptr) {
                 LOG(ERROR) << "Got a null IDEVICE for " << name.c_str();
@@ -140,7 +140,7 @@
 }
 
 DeviceManager::DeviceManager() {
-    LOG(VERBOSE) << "DeviceManager::DeviceManager";
+    VLOG(MANAGER) << "DeviceManager::DeviceManager";
     findAvailableDevices();
 #ifdef NN_DEBUGGABLE
     mPartitioning = getProp("debug.nn.partition", kPartitioningDefault);
diff --git a/nn/runtime/NeuralNetworks.cpp b/nn/runtime/NeuralNetworks.cpp
index f9785c2..979ca7f 100644
--- a/nn/runtime/NeuralNetworks.cpp
+++ b/nn/runtime/NeuralNetworks.cpp
@@ -235,6 +235,7 @@
 }
 
 int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
+    initVLogMask();
     if (!model) {
         LOG(ERROR) << "ANeuralNetworksModel_create passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
diff --git a/nn/runtime/test/TestGenerated.cpp b/nn/runtime/test/TestGenerated.cpp
index 4c794aa..4602699 100644
--- a/nn/runtime/test/TestGenerated.cpp
+++ b/nn/runtime/test/TestGenerated.cpp
@@ -158,10 +158,7 @@
 
 class GeneratedTests : public ::testing::Test {
    protected:
-    virtual void SetUp() {
-        // For detailed logs, uncomment this line:
-        // SetMinimumLogSeverity(android::base::VERBOSE);
-    }
+    virtual void SetUp() {}
 };
 
 // Testcases generated from runtime/test/specs/*.mod.py
diff --git a/nn/runtime/test/TestMain.cpp b/nn/runtime/test/TestMain.cpp
index 8445ac7..29d8076 100644
--- a/nn/runtime/test/TestMain.cpp
+++ b/nn/runtime/test/TestMain.cpp
@@ -16,7 +16,6 @@
 
 #include "Manager.h"
 #include "NeuralNetworksWrapper.h"
-//#include "SampleDriver.h"
 #include "Utils.h"
 
 #include <gtest/gtest.h>
@@ -26,6 +25,7 @@
 int main(int argc, char** argv) {
     ::testing::InitGoogleTest(&argc, argv);
 
+    android::nn::initVLogMask();
     // Test with the installed drivers.
     int n1 = RUN_ALL_TESTS();
 
diff --git a/nn/runtime/test/TestMemory.cpp b/nn/runtime/test/TestMemory.cpp
index 30785db..74931ca 100644
--- a/nn/runtime/test/TestMemory.cpp
+++ b/nn/runtime/test/TestMemory.cpp
@@ -32,10 +32,7 @@
 // Tests the various ways to pass weights and input/output data.
 class MemoryTest : public ::testing::Test {
 protected:
-    virtual void SetUp() {
-        // For detailed logs, uncomment this line:
-        // SetMinimumLogSeverity(android::base::VERBOSE);
-    }
+    virtual void SetUp() {}
 
     const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}};
     const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f},
diff --git a/nn/runtime/test/TestTrivialModel.cpp b/nn/runtime/test/TestTrivialModel.cpp
index 14935e3..1eda104 100644
--- a/nn/runtime/test/TestTrivialModel.cpp
+++ b/nn/runtime/test/TestTrivialModel.cpp
@@ -28,10 +28,7 @@
 
 class TrivialTest : public ::testing::Test {
 protected:
-    virtual void SetUp() {
-        // For detailed logs, uncomment this line:
-        // SetMinimumLogSeverity(android::base::VERBOSE);
-    }
+    virtual void SetUp() {}
 
     const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}};
     const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f},
diff --git a/nn/runtime/test/TestValidation.cpp b/nn/runtime/test/TestValidation.cpp
index 4ac8fc1..d7056c9 100644
--- a/nn/runtime/test/TestValidation.cpp
+++ b/nn/runtime/test/TestValidation.cpp
@@ -26,10 +26,7 @@
 namespace {
 class ValidationTest : public ::testing::Test {
 protected:
-    virtual void SetUp() {
-        // For detailed logs, uncomment this line:
-        // SetMinimumLogSeverity(android::base::VERBOSE);
-    }
+    virtual void SetUp() {}
 };
 
 class ValidationTestModel : public ValidationTest {