Decouple Burst types from 1.3 types

Bug: 141718368
Test: mma
Test: NeuralNetworksTest_static
Change-Id: Ic3890343154ba734d42521239e63c0fa2315f42c
diff --git a/nn/common/ExecutionBurstController.cpp b/nn/common/ExecutionBurstController.cpp
index eb956de..8463df8 100644
--- a/nn/common/ExecutionBurstController.cpp
+++ b/nn/common/ExecutionBurstController.cpp
@@ -29,6 +29,7 @@
 #include <utility>
 #include <vector>
 
+#include "HalInterfaces.h"
 #include "Tracing.h"
 #include "Utils.h"
 
@@ -37,6 +38,10 @@
 
 using namespace hal;
 
+using V1_2::FmqRequestDatum;
+using V1_2::FmqResultDatum;
+using V1_2::IBurstCallback;
+using V1_2::IBurstContext;
 using FmqRequestDescriptor = hardware::MQDescriptorSync<FmqRequestDatum>;
 using FmqResultDescriptor = hardware::MQDescriptorSync<FmqResultDatum>;
 
@@ -226,6 +231,10 @@
     return std::make_tuple(errorStatus, std::move(outputShapes), timing);
 }
 
+V1_0::ErrorStatus legacyConvertResultCodeToErrorStatus(int resultCode) {
+    return convertToV1_0(convertResultCodeToErrorStatus(resultCode));
+}
+
 std::pair<std::unique_ptr<ResultChannelReceiver>, const FmqResultDescriptor*>
 ResultChannelReceiver::create(size_t channelLength, std::chrono::microseconds pollingTimeWindow) {
     std::unique_ptr<FmqResultChannel> fmqResultChannel =
diff --git a/nn/common/ExecutionBurstServer.cpp b/nn/common/ExecutionBurstServer.cpp
index 2b48ada..583ebf5 100644
--- a/nn/common/ExecutionBurstServer.cpp
+++ b/nn/common/ExecutionBurstServer.cpp
@@ -29,6 +29,7 @@
 #include <utility>
 #include <vector>
 
+#include "HalInterfaces.h"
 #include "Tracing.h"
 
 namespace android::nn {
@@ -37,6 +38,10 @@
 using namespace hal;
 
 using hardware::MQDescriptorSync;
+using V1_2::FmqRequestDatum;
+using V1_2::FmqResultDatum;
+using V1_2::IBurstCallback;
+using V1_2::IBurstContext;
 
 constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
                               std::numeric_limits<uint64_t>::max()};
diff --git a/nn/common/include/ExecutionBurstController.h b/nn/common/include/ExecutionBurstController.h
index e8f3657..2a4de7d 100644
--- a/nn/common/include/ExecutionBurstController.h
+++ b/nn/common/include/ExecutionBurstController.h
@@ -18,6 +18,12 @@
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_EXECUTION_BURST_CONTROLLER_H
 
 #include <android-base/macros.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/IBurstCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IBurstContext.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
 #include <fmq/MessageQueue.h>
 #include <hidl/MQDescriptor.h>
 
@@ -31,8 +37,6 @@
 #include <utility>
 #include <vector>
 
-#include "HalInterfaces.h"
-
 namespace android::nn {
 
 /**
@@ -51,9 +55,9 @@
  *     request.
  * @return Serialized FMQ request data.
  */
-std::vector<hal::FmqRequestDatum> serialize(const hal::V1_0::Request& request,
-                                            hal::MeasureTiming measure,
-                                            const std::vector<int32_t>& slots);
+std::vector<hardware::neuralnetworks::V1_2::FmqRequestDatum> serialize(
+        const hardware::neuralnetworks::V1_0::Request& request,
+        hardware::neuralnetworks::V1_2::MeasureTiming measure, const std::vector<int32_t>& slots);
 
 /**
  * Deserialize the FMQ result data.
@@ -64,8 +68,18 @@
  * @param data Serialized FMQ result data.
  * @return Result object if successfully deserialized, std::nullopt otherwise.
  */
-std::optional<std::tuple<hal::V1_0::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>>
-deserialize(const std::vector<hal::FmqResultDatum>& data);
+std::optional<std::tuple<hardware::neuralnetworks::V1_0::ErrorStatus,
+                         std::vector<hardware::neuralnetworks::V1_2::OutputShape>,
+                         hardware::neuralnetworks::V1_2::Timing>>
+deserialize(const std::vector<hardware::neuralnetworks::V1_2::FmqResultDatum>& data);
+
+/**
+ * Convert result code to error status.
+ *
+ * @param resultCode Result code to be converted.
+ * @return ErrorStatus Resultant error status.
+ */
+hardware::neuralnetworks::V1_0::ErrorStatus legacyConvertResultCodeToErrorStatus(int resultCode);
 
 /**
  * ResultChannelReceiver is responsible for waiting on the channel until the
@@ -77,9 +91,10 @@
  * invalidated, unblocking the receiver.
  */
 class ResultChannelReceiver {
-    using FmqResultDescriptor = hardware::MQDescriptorSync<hal::FmqResultDatum>;
-    using FmqResultChannel =
-            hardware::MessageQueue<hal::FmqResultDatum, hardware::kSynchronizedReadWrite>;
+    using FmqResultDescriptor =
+            hardware::MQDescriptorSync<hardware::neuralnetworks::V1_2::FmqResultDatum>;
+    using FmqResultChannel = hardware::MessageQueue<hardware::neuralnetworks::V1_2::FmqResultDatum,
+                                                    hardware::kSynchronizedReadWrite>;
 
    public:
     /**
@@ -108,7 +123,9 @@
      * @return Result object if successfully received, std::nullopt if error or
      *     if the receiver object was invalidated.
      */
-    std::optional<std::tuple<hal::V1_0::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>>
+    std::optional<std::tuple<hardware::neuralnetworks::V1_0::ErrorStatus,
+                             std::vector<hardware::neuralnetworks::V1_2::OutputShape>,
+                             hardware::neuralnetworks::V1_2::Timing>>
     getBlocking();
 
     /**
@@ -118,7 +135,7 @@
     void invalidate();
 
     // prefer calling ResultChannelReceiver::getBlocking
-    std::optional<std::vector<hal::FmqResultDatum>> getPacketBlocking();
+    std::optional<std::vector<hardware::neuralnetworks::V1_2::FmqResultDatum>> getPacketBlocking();
 
     ResultChannelReceiver(std::unique_ptr<FmqResultChannel> fmqResultChannel,
                           std::chrono::microseconds pollingTimeWindow);
@@ -135,9 +152,11 @@
  * available.
  */
 class RequestChannelSender {
-    using FmqRequestDescriptor = hardware::MQDescriptorSync<hal::FmqRequestDatum>;
+    using FmqRequestDescriptor =
+            hardware::MQDescriptorSync<hardware::neuralnetworks::V1_2::FmqRequestDatum>;
     using FmqRequestChannel =
-            hardware::MessageQueue<hal::FmqRequestDatum, hardware::kSynchronizedReadWrite>;
+            hardware::MessageQueue<hardware::neuralnetworks::V1_2::FmqRequestDatum,
+                                   hardware::kSynchronizedReadWrite>;
 
    public:
     /**
@@ -161,7 +180,8 @@
      *     the request.
      * @return 'true' on successful send, 'false' otherwise.
      */
-    bool send(const hal::V1_0::Request& request, hal::MeasureTiming measure,
+    bool send(const hardware::neuralnetworks::V1_0::Request& request,
+              hardware::neuralnetworks::V1_2::MeasureTiming measure,
               const std::vector<int32_t>& slots);
 
     /**
@@ -172,7 +192,7 @@
     void invalidate();
 
     // prefer calling RequestChannelSender::send
-    bool sendPacket(const std::vector<hal::FmqRequestDatum>& packet);
+    bool sendPacket(const std::vector<hardware::neuralnetworks::V1_2::FmqRequestDatum>& packet);
 
     RequestChannelSender(std::unique_ptr<FmqRequestChannel> fmqRequestChannel);
 
@@ -206,14 +226,14 @@
      * efficiency, if two hidl_memory objects represent the same underlying
      * buffer, they must use the same key.
      */
-    class ExecutionBurstCallback : public hal::IBurstCallback {
+    class ExecutionBurstCallback : public hardware::neuralnetworks::V1_2::IBurstCallback {
         DISALLOW_COPY_AND_ASSIGN(ExecutionBurstCallback);
 
        public:
         ExecutionBurstCallback() = default;
 
-        hal::Return<void> getMemories(const hal::hidl_vec<int32_t>& slots,
-                                      getMemories_cb cb) override;
+        hardware::Return<void> getMemories(const hardware::hidl_vec<int32_t>& slots,
+                                           getMemories_cb cb) override;
 
         /**
          * This function performs one of two different actions:
@@ -231,7 +251,7 @@
          * @return Unique slot identifiers where each returned slot element
          *     corresponds to a memory resource element in "memories".
          */
-        std::vector<int32_t> getSlots(const hal::hidl_vec<hal::hidl_memory>& memories,
+        std::vector<int32_t> getSlots(const hardware::hidl_vec<hardware::hidl_memory>& memories,
                                       const std::vector<intptr_t>& keys);
 
         /*
@@ -249,13 +269,13 @@
         std::pair<bool, int32_t> freeMemory(intptr_t key);
 
        private:
-        int32_t getSlotLocked(const hal::hidl_memory& memory, intptr_t key);
+        int32_t getSlotLocked(const hardware::hidl_memory& memory, intptr_t key);
         int32_t allocateSlotLocked();
 
         std::mutex mMutex;
         std::stack<int32_t, std::vector<int32_t>> mFreeSlots;
         std::map<intptr_t, int32_t> mMemoryIdToSlot;
-        std::vector<hal::hidl_memory> mMemoryCache;
+        std::vector<hardware::hidl_memory> mMemoryCache;
     };
 
     /**
@@ -271,15 +291,15 @@
      * @return ExecutionBurstController Execution burst controller object.
      */
     static std::unique_ptr<ExecutionBurstController> create(
-            const sp<hal::V1_2::IPreparedModel>& preparedModel,
+            const sp<hardware::neuralnetworks::V1_2::IPreparedModel>& preparedModel,
             std::chrono::microseconds pollingTimeWindow);
 
     // prefer calling ExecutionBurstController::create
     ExecutionBurstController(const std::shared_ptr<RequestChannelSender>& requestChannelSender,
                              const std::shared_ptr<ResultChannelReceiver>& resultChannelReceiver,
-                             const sp<hal::IBurstContext>& burstContext,
+                             const sp<hardware::neuralnetworks::V1_2::IBurstContext>& burstContext,
                              const sp<ExecutionBurstCallback>& callback,
-                             const sp<hal::hidl_death_recipient>& deathHandler = nullptr);
+                             const sp<hardware::hidl_death_recipient>& deathHandler = nullptr);
 
     // explicit destructor to unregister the death recipient
     ~ExecutionBurstController();
@@ -298,8 +318,10 @@
      *     - whether or not a failed burst execution should be re-run using a
      *       different path (e.g., IPreparedModel::executeSynchronously)
      */
-    std::tuple<int, std::vector<hal::OutputShape>, hal::Timing, bool> compute(
-            const hal::V1_0::Request& request, hal::MeasureTiming measure,
+    std::tuple<int, std::vector<hardware::neuralnetworks::V1_2::OutputShape>,
+               hardware::neuralnetworks::V1_2::Timing, bool>
+    compute(const hardware::neuralnetworks::V1_0::Request& request,
+            hardware::neuralnetworks::V1_2::MeasureTiming measure,
             const std::vector<intptr_t>& memoryIds);
 
     /**
@@ -313,9 +335,9 @@
     std::mutex mMutex;
     const std::shared_ptr<RequestChannelSender> mRequestChannelSender;
     const std::shared_ptr<ResultChannelReceiver> mResultChannelReceiver;
-    const sp<hal::IBurstContext> mBurstContext;
+    const sp<hardware::neuralnetworks::V1_2::IBurstContext> mBurstContext;
     const sp<ExecutionBurstCallback> mMemoryCache;
-    const sp<hal::hidl_death_recipient> mDeathHandler;
+    const sp<hardware::hidl_death_recipient> mDeathHandler;
 };
 
 }  // namespace android::nn
diff --git a/nn/common/include/ExecutionBurstServer.h b/nn/common/include/ExecutionBurstServer.h
index 2a0dfba..7b3f82d 100644
--- a/nn/common/include/ExecutionBurstServer.h
+++ b/nn/common/include/ExecutionBurstServer.h
@@ -18,6 +18,11 @@
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_EXECUTION_BURST_SERVER_H
 
 #include <android-base/macros.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/IBurstCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
 #include <fmq/MessageQueue.h>
 #include <hidl/MQDescriptor.h>
 
@@ -29,12 +34,12 @@
 #include <tuple>
 #include <vector>
 
-#include "HalInterfaces.h"
-
 namespace android::nn {
 
-using FmqRequestDescriptor = hardware::MQDescriptorSync<hal::FmqRequestDatum>;
-using FmqResultDescriptor = hardware::MQDescriptorSync<hal::FmqResultDatum>;
+using FmqRequestDescriptor =
+        hardware::MQDescriptorSync<hardware::neuralnetworks::V1_2::FmqRequestDatum>;
+using FmqResultDescriptor =
+        hardware::MQDescriptorSync<hardware::neuralnetworks::V1_2::FmqResultDatum>;
 
 /**
  * Function to serialize results.
@@ -46,9 +51,10 @@
  * @param timing Timing information of the execution.
  * @return Serialized FMQ result data.
  */
-std::vector<hal::FmqResultDatum> serialize(hal::V1_0::ErrorStatus errorStatus,
-                                           const std::vector<hal::OutputShape>& outputShapes,
-                                           hal::Timing timing);
+std::vector<hardware::neuralnetworks::V1_2::FmqResultDatum> serialize(
+        hardware::neuralnetworks::V1_0::ErrorStatus errorStatus,
+        const std::vector<hardware::neuralnetworks::V1_2::OutputShape>& outputShapes,
+        hardware::neuralnetworks::V1_2::Timing timing);
 
 /**
  * Deserialize the FMQ request data.
@@ -60,8 +66,9 @@
  * @param data Serialized FMQ request data.
  * @return Request object if successfully deserialized, std::nullopt otherwise.
  */
-std::optional<std::tuple<hal::V1_0::Request, std::vector<int32_t>, hal::MeasureTiming>> deserialize(
-        const std::vector<hal::FmqRequestDatum>& data);
+std::optional<std::tuple<hardware::neuralnetworks::V1_0::Request, std::vector<int32_t>,
+                         hardware::neuralnetworks::V1_2::MeasureTiming>>
+deserialize(const std::vector<hardware::neuralnetworks::V1_2::FmqRequestDatum>& data);
 
 /**
  * RequestChannelReceiver is responsible for waiting on the channel until the
@@ -74,7 +81,8 @@
  */
 class RequestChannelReceiver {
     using FmqRequestChannel =
-            hardware::MessageQueue<hal::FmqRequestDatum, hardware::kSynchronizedReadWrite>;
+            hardware::MessageQueue<hardware::neuralnetworks::V1_2::FmqRequestDatum,
+                                   hardware::kSynchronizedReadWrite>;
 
    public:
     /**
@@ -103,7 +111,8 @@
      * @return Request object if successfully received, std::nullopt if error or
      *     if the receiver object was invalidated.
      */
-    std::optional<std::tuple<hal::V1_0::Request, std::vector<int32_t>, hal::MeasureTiming>>
+    std::optional<std::tuple<hardware::neuralnetworks::V1_0::Request, std::vector<int32_t>,
+                             hardware::neuralnetworks::V1_2::MeasureTiming>>
     getBlocking();
 
     /**
@@ -116,7 +125,7 @@
                            std::chrono::microseconds pollingTimeWindow);
 
    private:
-    std::optional<std::vector<hal::FmqRequestDatum>> getPacketBlocking();
+    std::optional<std::vector<hardware::neuralnetworks::V1_2::FmqRequestDatum>> getPacketBlocking();
 
     const std::unique_ptr<FmqRequestChannel> mFmqRequestChannel;
     std::atomic<bool> mTeardown{false};
@@ -129,8 +138,8 @@
  * available.
  */
 class ResultChannelSender {
-    using FmqResultChannel =
-            hardware::MessageQueue<hal::FmqResultDatum, hardware::kSynchronizedReadWrite>;
+    using FmqResultChannel = hardware::MessageQueue<hardware::neuralnetworks::V1_2::FmqResultDatum,
+                                                    hardware::kSynchronizedReadWrite>;
 
    public:
     /**
@@ -151,11 +160,12 @@
      * @param timing Timing information of the execution.
      * @return 'true' on successful send, 'false' otherwise.
      */
-    bool send(hal::V1_0::ErrorStatus errorStatus, const std::vector<hal::OutputShape>& outputShapes,
-              hal::Timing timing);
+    bool send(hardware::neuralnetworks::V1_0::ErrorStatus errorStatus,
+              const std::vector<hardware::neuralnetworks::V1_2::OutputShape>& outputShapes,
+              hardware::neuralnetworks::V1_2::Timing timing);
 
     // prefer calling ResultChannelSender::send
-    bool sendPacket(const std::vector<hal::FmqResultDatum>& packet);
+    bool sendPacket(const std::vector<hardware::neuralnetworks::V1_2::FmqResultDatum>& packet);
 
     ResultChannelSender(std::unique_ptr<FmqResultChannel> fmqResultChannel);
 
@@ -168,7 +178,7 @@
  * deserializing a request object from a FMQ, performing the inference, and
  * serializing the result back across another FMQ.
  */
-class ExecutionBurstServer : public hal::IBurstContext {
+class ExecutionBurstServer : public hardware::neuralnetworks::V1_2::IBurstContext {
     DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutionBurstServer);
 
    public:
@@ -208,7 +218,7 @@
          * @param memory Memory resource to be cached.
          * @param slot Slot identifier corresponding to the memory resource.
          */
-        virtual void addCacheEntry(const hal::hidl_memory& memory, int32_t slot) = 0;
+        virtual void addCacheEntry(const hardware::hidl_memory& memory, int32_t slot) = 0;
 
         /**
          * Removes an entry specified by a slot from the cache.
@@ -233,9 +243,12 @@
          * @return Result of the execution, including the status of the
          *     execution, dynamic output shapes, and any timing information.
          */
-        virtual std::tuple<hal::V1_0::ErrorStatus, hal::hidl_vec<hal::OutputShape>, hal::Timing>
-        execute(const hal::V1_0::Request& request, const std::vector<int32_t>& slots,
-                hal::MeasureTiming measure) = 0;
+        virtual std::tuple<hardware::neuralnetworks::V1_0::ErrorStatus,
+                           hardware::hidl_vec<hardware::neuralnetworks::V1_2::OutputShape>,
+                           hardware::neuralnetworks::V1_2::Timing>
+        execute(const hardware::neuralnetworks::V1_0::Request& request,
+                const std::vector<int32_t>& slots,
+                hardware::neuralnetworks::V1_2::MeasureTiming measure) = 0;
     };
 
     /**
@@ -261,8 +274,8 @@
      * @result IBurstContext Handle to the burst context.
      */
     static sp<ExecutionBurstServer> create(
-            const sp<hal::IBurstCallback>& callback, const FmqRequestDescriptor& requestChannel,
-            const FmqResultDescriptor& resultChannel,
+            const sp<hardware::neuralnetworks::V1_2::IBurstCallback>& callback,
+            const FmqRequestDescriptor& requestChannel, const FmqResultDescriptor& resultChannel,
             std::shared_ptr<IBurstExecutorWithCache> executorWithCache,
             std::chrono::microseconds pollingTimeWindow = std::chrono::microseconds{0});
 
@@ -290,18 +303,19 @@
      * @result IBurstContext Handle to the burst context.
      */
     static sp<ExecutionBurstServer> create(
-            const sp<hal::IBurstCallback>& callback, const FmqRequestDescriptor& requestChannel,
-            const FmqResultDescriptor& resultChannel, hal::V1_2::IPreparedModel* preparedModel,
+            const sp<hardware::neuralnetworks::V1_2::IBurstCallback>& callback,
+            const FmqRequestDescriptor& requestChannel, const FmqResultDescriptor& resultChannel,
+            hardware::neuralnetworks::V1_2::IPreparedModel* preparedModel,
             std::chrono::microseconds pollingTimeWindow = std::chrono::microseconds{0});
 
-    ExecutionBurstServer(const sp<hal::IBurstCallback>& callback,
+    ExecutionBurstServer(const sp<hardware::neuralnetworks::V1_2::IBurstCallback>& callback,
                          std::unique_ptr<RequestChannelReceiver> requestChannel,
                          std::unique_ptr<ResultChannelSender> resultChannel,
                          std::shared_ptr<IBurstExecutorWithCache> cachedExecutor);
     ~ExecutionBurstServer();
 
     // Used by the NN runtime to preemptively remove any stored memory.
-    hal::Return<void> freeMemory(int32_t slot) override;
+    hardware::Return<void> freeMemory(int32_t slot) override;
 
    private:
     // Ensures all cache entries contained in mExecutorWithCache are present in
@@ -318,7 +332,7 @@
     std::thread mWorker;
     std::mutex mMutex;
     std::atomic<bool> mTeardown{false};
-    const sp<hal::IBurstCallback> mCallback;
+    const sp<hardware::neuralnetworks::V1_2::IBurstCallback> mCallback;
     const std::unique_ptr<RequestChannelReceiver> mRequestChannelReceiver;
     const std::unique_ptr<ResultChannelSender> mResultChannelSender;
     const std::shared_ptr<IBurstExecutorWithCache> mExecutorWithCache;
diff --git a/nn/common/include/HalInterfaces.h b/nn/common/include/HalInterfaces.h
index db6149c..fe1ff56 100644
--- a/nn/common/include/HalInterfaces.h
+++ b/nn/common/include/HalInterfaces.h
@@ -69,10 +69,6 @@
 using V1_2::Constant;
 using V1_2::DeviceType;
 using V1_2::Extension;
-using V1_2::FmqRequestDatum;
-using V1_2::FmqResultDatum;
-using V1_2::IBurstCallback;
-using V1_2::IBurstContext;
 using V1_2::MeasureTiming;
 using V1_2::OutputShape;
 using V1_2::SymmPerChannelQuantParams;