Add QoS types to NNAPI runtime

Bug: 136739795
Bug: 142902514
Bug: 145300530
Test: mma
Change-Id: If3ce646d512b02daa78479aa7c75de99e20def21
diff --git a/nn/common/ExecutionBurstController.cpp b/nn/common/ExecutionBurstController.cpp
index 1136024..eb956de 100644
--- a/nn/common/ExecutionBurstController.cpp
+++ b/nn/common/ExecutionBurstController.cpp
@@ -144,7 +144,7 @@
 }
 
 // deserialize a packet into the result
-std::optional<std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>> deserialize(
+std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>> deserialize(
         const std::vector<FmqResultDatum>& data) {
     using discriminator = FmqResultDatum::hidl_discriminator;
 
@@ -161,7 +161,7 @@
     const FmqResultDatum::PacketInformation& packetInfo = data[index].packetInformation();
     index++;
     const uint32_t packetSize = packetInfo.packetSize;
-    const ErrorStatus errorStatus = packetInfo.errorStatus;
+    const V1_0::ErrorStatus errorStatus = packetInfo.errorStatus;
     const uint32_t numberOfOperands = packetInfo.numberOfOperands;
 
     // verify packet size
@@ -245,7 +245,7 @@
                                              std::chrono::microseconds pollingTimeWindow)
     : mFmqResultChannel(std::move(fmqResultChannel)), kPollingTimeWindow(pollingTimeWindow) {}
 
-std::optional<std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>>
+std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>>
 ResultChannelReceiver::getBlocking() {
     const auto packet = getPacketBlocking();
     if (!packet) {
@@ -266,7 +266,7 @@
     // TODO: look for a different/better way to signal/notify the futex to
     // wake up any thread waiting on it
     FmqResultDatum datum;
-    datum.packetInformation({/*.packetSize=*/0, /*.errorStatus=*/ErrorStatus::GENERAL_FAILURE,
+    datum.packetInformation({/*.packetSize=*/0, /*.errorStatus=*/V1_0::ErrorStatus::GENERAL_FAILURE,
                              /*.numberOfOperands=*/0});
     mFmqResultChannel->writeBlocking(&datum, 1);
 }
@@ -395,12 +395,12 @@
     // ensure all memories are valid
     if (!std::all_of(memories.begin(), memories.end(),
                      [](const hidl_memory& memory) { return memory.valid(); })) {
-        cb(ErrorStatus::INVALID_ARGUMENT, {});
+        cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
         return Void();
     }
 
     // return successful
-    cb(ErrorStatus::NONE, std::move(memories));
+    cb(V1_0::ErrorStatus::NONE, std::move(memories));
     return Void();
 }
 
@@ -494,11 +494,12 @@
     }
 
     // configure burst
-    ErrorStatus errorStatus;
+    V1_0::ErrorStatus errorStatus;
     sp<IBurstContext> burstContext;
     const Return<void> ret = preparedModel->configureExecutionBurst(
             callback, *requestChannelDescriptor, *resultChannelDescriptor,
-            [&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
+            [&errorStatus, &burstContext](V1_0::ErrorStatus status,
+                                          const sp<IBurstContext>& context) {
                 errorStatus = status;
                 burstContext = context;
             });
@@ -509,7 +510,7 @@
                    << ret.description();
         return nullptr;
     }
-    if (errorStatus != ErrorStatus::NONE) {
+    if (errorStatus != V1_0::ErrorStatus::NONE) {
         LOG(ERROR) << "IPreparedModel::configureExecutionBurst failed with status "
                    << toString(errorStatus);
         return nullptr;
@@ -565,9 +566,10 @@
 }
 
 static std::tuple<int, std::vector<OutputShape>, Timing, bool> getExecutionResult(
-        ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing, bool fallback) {
+        V1_0::ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing,
+        bool fallback) {
     auto [n, checkedOutputShapes, checkedTiming] =
-            getExecutionResult(status, std::move(outputShapes), timing);
+            getExecutionResult(convertToV1_3(status), std::move(outputShapes), timing);
     return {n, std::move(checkedOutputShapes), checkedTiming, fallback};
 }
 
@@ -589,7 +591,8 @@
     if (!success) {
         LOG(ERROR) << "Error sending FMQ packet";
         // only use fallback execution path if the packet could not be sent
-        return getExecutionResult(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming, /*fallback=*/true);
+        return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming,
+                                  /*fallback=*/true);
     }
 
     // get result packet
@@ -597,7 +600,8 @@
     if (!result) {
         LOG(ERROR) << "Error retrieving FMQ packet";
         // only use fallback execution path if the packet could not be sent
-        return getExecutionResult(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming, /*fallback=*/false);
+        return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming,
+                                  /*fallback=*/false);
     }
 
     // unpack results and return (only use fallback execution path if the
diff --git a/nn/common/ExecutionBurstServer.cpp b/nn/common/ExecutionBurstServer.cpp
index 890b653..7cd130f 100644
--- a/nn/common/ExecutionBurstServer.cpp
+++ b/nn/common/ExecutionBurstServer.cpp
@@ -62,7 +62,7 @@
 
     void removeCacheEntry(int32_t slot) override { mMemoryCache.erase(slot); }
 
-    std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
+    std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
             const V1_0::Request& request, const std::vector<int32_t>& slots,
             MeasureTiming measure) override {
         // convert slots to pools
@@ -75,11 +75,11 @@
         fullRequest.pools = std::move(pools);
 
         // setup execution
-        ErrorStatus returnedStatus = ErrorStatus::GENERAL_FAILURE;
+        V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
         hidl_vec<OutputShape> returnedOutputShapes;
         Timing returnedTiming;
         auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](
-                          ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+                          V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
                           const Timing& timing) {
             returnedStatus = status;
             returnedOutputShapes = outputShapes;
@@ -88,7 +88,7 @@
 
         // execute
         const Return<void> ret = mpPreparedModel->executeSynchronously(fullRequest, measure, cb);
-        if (!ret.isOk() || returnedStatus != ErrorStatus::NONE) {
+        if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE) {
             LOG(ERROR) << "IPreparedModelAdapter::execute -- Error executing";
             return {returnedStatus, {}, kNoTiming};
         }
@@ -104,7 +104,7 @@
 }  // anonymous namespace
 
 // serialize result
-std::vector<FmqResultDatum> serialize(ErrorStatus errorStatus,
+std::vector<FmqResultDatum> serialize(V1_0::ErrorStatus errorStatus,
                                       const std::vector<OutputShape>& outputShapes, Timing timing) {
     // count how many elements need to be sent for a request
     size_t count = 2 + outputShapes.size();
@@ -458,7 +458,7 @@
 ResultChannelSender::ResultChannelSender(std::unique_ptr<FmqResultChannel> fmqResultChannel)
     : mFmqResultChannel(std::move(fmqResultChannel)) {}
 
-bool ResultChannelSender::send(ErrorStatus errorStatus,
+bool ResultChannelSender::send(V1_0::ErrorStatus errorStatus,
                                const std::vector<OutputShape>& outputShapes, Timing timing) {
     const std::vector<FmqResultDatum> serialized = serialize(errorStatus, outputShapes, timing);
     return sendPacket(serialized);
@@ -469,7 +469,7 @@
         LOG(ERROR)
                 << "ResultChannelSender::sendPacket -- packet size exceeds size available in FMQ";
         const std::vector<FmqResultDatum> errorPacket =
-                serialize(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+                serialize(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
 
         // Always send the packet with "blocking" because this signals the futex
         // and unblocks the consumer if it is waiting on the futex.
@@ -575,9 +575,9 @@
         return;
     }
 
-    ErrorStatus errorStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus errorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     std::vector<hidl_memory> returnedMemories;
-    auto cb = [&errorStatus, &returnedMemories](ErrorStatus status,
+    auto cb = [&errorStatus, &returnedMemories](V1_0::ErrorStatus status,
                                                 const hidl_vec<hidl_memory>& memories) {
         errorStatus = status;
         returnedMemories = memories;
@@ -585,7 +585,7 @@
 
     const Return<void> ret = mCallback->getMemories(unknownSlots, cb);
 
-    if (!ret.isOk() || errorStatus != ErrorStatus::NONE ||
+    if (!ret.isOk() || errorStatus != V1_0::ErrorStatus::NONE ||
         returnedMemories.size() != unknownSlots.size()) {
         LOG(ERROR) << "Error retrieving memories";
         return;
@@ -610,7 +610,7 @@
         // "task" function can end
         if (!arguments) {
             if (!mTeardown) {
-                mResultChannelSender->send(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+                mResultChannelSender->send(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
             }
             continue;
         }
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index 195b804..f753a16 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -1658,40 +1658,51 @@
         case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
             return ErrorStatus::DEVICE_UNAVAILABLE;
 
-        default:
-            LOG(ERROR) << "Unknown result code " << resultCode
-                       << " mapped to ErrorStatus::GENERAL_FAILURE";
-            return ErrorStatus::GENERAL_FAILURE;
         case ANEURALNETWORKS_BAD_STATE:
         case ANEURALNETWORKS_INCOMPLETE:
         case ANEURALNETWORKS_OP_FAILED:
         case ANEURALNETWORKS_OUT_OF_MEMORY:
         case ANEURALNETWORKS_UNMAPPABLE:
+        case ANEURALNETWORKS_DEAD_OBJECT:
             return ErrorStatus::GENERAL_FAILURE;
+
+        case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
+            return ErrorStatus::MISSED_DEADLINE_TRANSIENT;
+        case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
+            return ErrorStatus::MISSED_DEADLINE_PERSISTENT;
+        case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
+            return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT;
+        case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
+            return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT;
     }
+    LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE";
+    return ErrorStatus::GENERAL_FAILURE;
 }
 
 int convertErrorStatusToResultCode(ErrorStatus status) {
     switch (status) {
         case ErrorStatus::NONE:
             return ANEURALNETWORKS_NO_ERROR;
-
-        case ErrorStatus::INVALID_ARGUMENT:
-            return ANEURALNETWORKS_BAD_DATA;
-
-        case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
-            return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
-
         case ErrorStatus::DEVICE_UNAVAILABLE:
             return ANEURALNETWORKS_UNAVAILABLE_DEVICE;
-
-        default:
-            LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
-                       << " mapped to ANEURALNETWORKS_OP_FAILED";
-            return ANEURALNETWORKS_OP_FAILED;
         case ErrorStatus::GENERAL_FAILURE:
             return ANEURALNETWORKS_OP_FAILED;
+        case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
+            return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
+        case ErrorStatus::INVALID_ARGUMENT:
+            return ANEURALNETWORKS_BAD_DATA;
+        case ErrorStatus::MISSED_DEADLINE_TRANSIENT:
+            return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT;
+        case ErrorStatus::MISSED_DEADLINE_PERSISTENT:
+            return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT;
+        case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
+            return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT;
+        case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
+            return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT;
     }
+    LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
+               << " mapped to ANEURALNETWORKS_OP_FAILED";
+    return ANEURALNETWORKS_OP_FAILED;
 }
 
 std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
@@ -1950,6 +1961,43 @@
     return true;
 }
 
+V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) {
+    return status;
+}
+
+V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) {
+    switch (status) {
+        case V1_3::ErrorStatus::NONE:
+            return V1_0::ErrorStatus::NONE;
+        case V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
+            return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
+        case V1_3::ErrorStatus::GENERAL_FAILURE:
+            return V1_0::ErrorStatus::GENERAL_FAILURE;
+        case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
+            return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+        case V1_3::ErrorStatus::INVALID_ARGUMENT:
+            return V1_0::ErrorStatus::INVALID_ARGUMENT;
+        case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
+            return V1_0::ErrorStatus::GENERAL_FAILURE;
+        case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
+            return V1_0::ErrorStatus::GENERAL_FAILURE;
+        case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
+            return V1_0::ErrorStatus::GENERAL_FAILURE;
+        case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
+            return V1_0::ErrorStatus::GENERAL_FAILURE;
+    }
+    LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE";
+    return V1_0::ErrorStatus::GENERAL_FAILURE;
+}
+
+V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) {
+    return static_cast<V1_3::ErrorStatus>(status);
+}
+
+V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) {
+    return status;
+}
+
 static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) {
     return static_cast<V1_0::OperationType>(type);
 }
diff --git a/nn/common/include/ExecutionBurstController.h b/nn/common/include/ExecutionBurstController.h
index 15db0fc..e8f3657 100644
--- a/nn/common/include/ExecutionBurstController.h
+++ b/nn/common/include/ExecutionBurstController.h
@@ -64,8 +64,8 @@
  * @param data Serialized FMQ result data.
  * @return Result object if successfully deserialized, std::nullopt otherwise.
  */
-std::optional<std::tuple<hal::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>> deserialize(
-        const std::vector<hal::FmqResultDatum>& data);
+std::optional<std::tuple<hal::V1_0::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>>
+deserialize(const std::vector<hal::FmqResultDatum>& data);
 
 /**
  * ResultChannelReceiver is responsible for waiting on the channel until the
@@ -108,7 +108,7 @@
      * @return Result object if successfully received, std::nullopt if error or
      *     if the receiver object was invalidated.
      */
-    std::optional<std::tuple<hal::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>>
+    std::optional<std::tuple<hal::V1_0::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>>
     getBlocking();
 
     /**
diff --git a/nn/common/include/ExecutionBurstServer.h b/nn/common/include/ExecutionBurstServer.h
index 5bac095..2a0dfba 100644
--- a/nn/common/include/ExecutionBurstServer.h
+++ b/nn/common/include/ExecutionBurstServer.h
@@ -46,7 +46,7 @@
  * @param timing Timing information of the execution.
  * @return Serialized FMQ result data.
  */
-std::vector<hal::FmqResultDatum> serialize(hal::ErrorStatus errorStatus,
+std::vector<hal::FmqResultDatum> serialize(hal::V1_0::ErrorStatus errorStatus,
                                            const std::vector<hal::OutputShape>& outputShapes,
                                            hal::Timing timing);
 
@@ -151,7 +151,7 @@
      * @param timing Timing information of the execution.
      * @return 'true' on successful send, 'false' otherwise.
      */
-    bool send(hal::ErrorStatus errorStatus, const std::vector<hal::OutputShape>& outputShapes,
+    bool send(hal::V1_0::ErrorStatus errorStatus, const std::vector<hal::OutputShape>& outputShapes,
               hal::Timing timing);
 
     // prefer calling ResultChannelSender::send
@@ -233,8 +233,8 @@
          * @return Result of the execution, including the status of the
          *     execution, dynamic output shapes, and any timing information.
          */
-        virtual std::tuple<hal::ErrorStatus, hal::hidl_vec<hal::OutputShape>, hal::Timing> execute(
-                const hal::V1_0::Request& request, const std::vector<int32_t>& slots,
+        virtual std::tuple<hal::V1_0::ErrorStatus, hal::hidl_vec<hal::OutputShape>, hal::Timing>
+        execute(const hal::V1_0::Request& request, const std::vector<int32_t>& slots,
                 hal::MeasureTiming measure) = 0;
     };
 
diff --git a/nn/common/include/HalInterfaces.h b/nn/common/include/HalInterfaces.h
index 8efbf21..fc18e2b 100644
--- a/nn/common/include/HalInterfaces.h
+++ b/nn/common/include/HalInterfaces.h
@@ -30,6 +30,7 @@
 #include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
 #include <android/hardware/neuralnetworks/1.2/types.h>
 #include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
 #include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
 #include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
 #include <android/hardware/neuralnetworks/1.3/types.h>
@@ -60,7 +61,6 @@
 
 using V1_0::DataLocation;
 using V1_0::DeviceStatus;
-using V1_0::ErrorStatus;
 using V1_0::FusedActivationFunc;
 using V1_0::PerformanceInfo;
 using V1_0::RequestArgument;
@@ -72,7 +72,6 @@
 using V1_2::FmqResultDatum;
 using V1_2::IBurstCallback;
 using V1_2::IBurstContext;
-using V1_2::IExecutionCallback;
 using V1_2::MeasureTiming;
 using V1_2::OutputShape;
 using V1_2::SymmPerChannelQuantParams;
@@ -80,8 +79,10 @@
 using V1_3::BufferDesc;
 using V1_3::BufferRole;
 using V1_3::Capabilities;
+using V1_3::ErrorStatus;
 using V1_3::IBuffer;
 using V1_3::IDevice;
+using V1_3::IExecutionCallback;
 using V1_3::IPreparedModel;
 using V1_3::IPreparedModelCallback;
 using V1_3::Model;
@@ -92,6 +93,8 @@
 using V1_3::Operation;
 using V1_3::OperationType;
 using V1_3::OperationTypeRange;
+using V1_3::OptionalTimePoint;
+using V1_3::Priority;
 using V1_3::Request;
 using V1_3::Subgraph;
 using ExtensionNameAndPrefix = V1_2::Model::ExtensionNameAndPrefix;
@@ -101,6 +104,8 @@
         hardware::hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 using ModelFactory = std::function<Model()>;
 
+inline constexpr Priority kDefaultPriority = Priority::MEDIUM;
+
 }  // namespace android::nn::hal
 
 #endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_INTERFACES_H
diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h
index ada19fc..2d341ef 100644
--- a/nn/common/include/Utils.h
+++ b/nn/common/include/Utils.h
@@ -22,6 +22,7 @@
 #include <set>
 #include <string>
 #include <tuple>
+#include <utility>
 #include <vector>
 
 #include "HalInterfaces.h"
@@ -385,6 +386,11 @@
 bool compliantWithV1_2(const hal::V1_3::Model& model,
                        std::set<uint32_t>* noncompliantOperations = nullptr);
 
+hal::V1_0::ErrorStatus convertToV1_0(hal::V1_0::ErrorStatus status);
+hal::V1_0::ErrorStatus convertToV1_0(hal::V1_3::ErrorStatus status);
+hal::V1_3::ErrorStatus convertToV1_3(hal::V1_0::ErrorStatus status);
+hal::V1_3::ErrorStatus convertToV1_3(hal::V1_3::ErrorStatus status);
+
 hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities);
 hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities);
 hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities);
@@ -459,6 +465,19 @@
 hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_0::OperandLifeTime lifetime);
 hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_3::OperandLifeTime lifetime);
 
+constexpr hal::Priority convertToHalPriority(int32_t priority) {
+    switch (priority) {
+        case ANEURALNETWORKS_PRIORITY_LOW:
+            return hal::Priority::LOW;
+        case ANEURALNETWORKS_PRIORITY_MEDIUM:
+            return hal::Priority::MEDIUM;
+        case ANEURALNETWORKS_PRIORITY_HIGH:
+            return hal::Priority::HIGH;
+    }
+    LOG(FATAL) << "unrecognized priority: " << priority;
+    return {};
+}
+
 #ifdef NN_DEBUGGABLE
 uint32_t getProp(const char* str, uint32_t defaultValue = 0);
 #endif  // NN_DEBUGGABLE
diff --git a/nn/driver/sample/SampleDriver.cpp b/nn/driver/sample/SampleDriver.cpp
index 443dfa9..132b457 100644
--- a/nn/driver/sample/SampleDriver.cpp
+++ b/nn/driver/sample/SampleDriver.cpp
@@ -66,7 +66,7 @@
                  "SampleDriver::getCapabilities");
     return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
         // TODO(dgross): Do we need to check compliantWithV1_0(capabilities)?
-        cb(error, convertToV1_0(capabilities));
+        cb(convertToV1_0(error), convertToV1_0(capabilities));
     });
 }
 
@@ -75,7 +75,7 @@
                  "SampleDriver::getCapabilities_1_1");
     return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
         // TODO(dgross): Do we need to check compliantWithV1_1(capabilities)?
-        cb(error, convertToV1_1(capabilities));
+        cb(convertToV1_0(error), convertToV1_1(capabilities));
     });
 }
 
@@ -84,27 +84,27 @@
                  "SampleDriver::getCapabilities_1_2");
     return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
         // TODO(dgross): Do we need to check compliantWithV1_2(capabilities)?
-        cb(error, convertToV1_2(capabilities));
+        cb(convertToV1_0(error), convertToV1_2(capabilities));
     });
 }
 
 Return<void> SampleDriver::getVersionString(getVersionString_cb cb) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
                  "SampleDriver::getVersionString");
-    cb(ErrorStatus::NONE, "JUST_AN_EXAMPLE");
+    cb(V1_0::ErrorStatus::NONE, "JUST_AN_EXAMPLE");
     return Void();
 }
 
 Return<void> SampleDriver::getType(getType_cb cb) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getType");
-    cb(ErrorStatus::NONE, V1_2::DeviceType::CPU);
+    cb(V1_0::ErrorStatus::NONE, V1_2::DeviceType::CPU);
     return Void();
 }
 
 Return<void> SampleDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
                  "SampleDriver::getSupportedExtensions");
-    cb(ErrorStatus::NONE, {/* No extensions. */});
+    cb(V1_0::ErrorStatus::NONE, {/* No extensions. */});
     return Void();
 }
 
@@ -114,10 +114,13 @@
                  "SampleDriver::getSupportedOperations");
     if (!validateModel(model)) {
         VLOG(DRIVER) << "getSupportedOperations";
-        cb(ErrorStatus::INVALID_ARGUMENT, {});
+        cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
         return Void();
     }
-    return getSupportedOperations_1_3(convertToV1_3(model), cb);
+    return getSupportedOperations_1_3(convertToV1_3(model),
+                                      [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+                                          cb(convertToV1_0(status), supported);
+                                      });
 }
 
 Return<void> SampleDriver::getSupportedOperations_1_1(const V1_1::Model& model,
@@ -126,10 +129,13 @@
                  "SampleDriver::getSupportedOperations_1_1");
     if (!validateModel(model)) {
         VLOG(DRIVER) << "getSupportedOperations_1_1";
-        cb(ErrorStatus::INVALID_ARGUMENT, {});
+        cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
         return Void();
     }
-    return getSupportedOperations_1_3(convertToV1_3(model), cb);
+    return getSupportedOperations_1_3(convertToV1_3(model),
+                                      [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+                                          cb(convertToV1_0(status), supported);
+                                      });
 }
 
 Return<void> SampleDriver::getSupportedOperations_1_2(const V1_2::Model& model,
@@ -138,60 +144,79 @@
                  "SampleDriver::getSupportedOperations_1_2");
     if (!validateModel(model)) {
         VLOG(DRIVER) << "getSupportedOperations_1_2";
-        cb(ErrorStatus::INVALID_ARGUMENT, {});
+        cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
         return Void();
     }
-    return getSupportedOperations_1_3(convertToV1_3(model), cb);
+    return getSupportedOperations_1_3(convertToV1_3(model),
+                                      [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+                                          cb(convertToV1_0(status), supported);
+                                      });
 }
 
 Return<void> SampleDriver::getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
                  "SampleDriver::getNumberOfCacheFilesNeeded");
     // Set both numbers to be 0 for cache not supported.
-    cb(ErrorStatus::NONE, /*numModelCache=*/0, /*numDataCache=*/0);
+    cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/0, /*numDataCache=*/0);
     return Void();
 }
 
-Return<ErrorStatus> SampleDriver::prepareModel(const V1_0::Model& model,
-                                               const sp<V1_0::IPreparedModelCallback>& callback) {
-    NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel");
-    return prepareModelBase(model, this, ExecutionPreference::FAST_SINGLE_ANSWER, callback);
+Return<void> SampleDriver::supportsDeadlines(supportsDeadlines_cb cb) {
+    NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
+                 "SampleDriver::supportsDeadlines");
+    // Set both numbers to be false for deadlines not supported.
+    cb(/*prepareModelDeadline=*/false, /*executionDeadline=*/false);
+    return Void();
 }
 
-Return<ErrorStatus> SampleDriver::prepareModel_1_1(
+Return<V1_0::ErrorStatus> SampleDriver::prepareModel(
+        const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) {
+    NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel");
+    const ErrorStatus status = prepareModelBase(
+            model, this, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, callback);
+    return convertToV1_0(status);
+}
+
+Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_1(
         const V1_1::Model& model, ExecutionPreference preference,
         const sp<V1_0::IPreparedModelCallback>& callback) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_1");
-    return prepareModelBase(model, this, preference, callback);
+    const ErrorStatus status =
+            prepareModelBase(model, this, preference, kDefaultPriority, {}, callback);
+    return convertToV1_0(status);
 }
 
-Return<ErrorStatus> SampleDriver::prepareModel_1_2(
+Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_2(
         const V1_2::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&,
         const hidl_vec<hidl_handle>&, const CacheToken&,
         const sp<V1_2::IPreparedModelCallback>& callback) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_2");
-    return prepareModelBase(model, this, preference, callback);
+    const ErrorStatus status =
+            prepareModelBase(model, this, preference, kDefaultPriority, {}, callback);
+    return convertToV1_0(status);
 }
 
-Return<ErrorStatus> SampleDriver::prepareModel_1_3(
-        const V1_3::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&,
+Return<V1_3::ErrorStatus> SampleDriver::prepareModel_1_3(
+        const V1_3::Model& model, ExecutionPreference preference, Priority priority,
+        const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>&,
         const hidl_vec<hidl_handle>&, const CacheToken&,
         const sp<V1_3::IPreparedModelCallback>& callback) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_3");
-    return prepareModelBase(model, this, preference, callback);
+    return prepareModelBase(model, this, preference, priority, deadline, callback);
 }
 
-Return<ErrorStatus> SampleDriver::prepareModelFromCache(
+Return<V1_0::ErrorStatus> SampleDriver::prepareModelFromCache(
         const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
         const sp<V1_2::IPreparedModelCallback>& callback) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
                  "SampleDriver::prepareModelFromCache");
     notify(callback, ErrorStatus::GENERAL_FAILURE, nullptr);
-    return ErrorStatus::GENERAL_FAILURE;
+    return V1_0::ErrorStatus::GENERAL_FAILURE;
 }
 
 Return<ErrorStatus> SampleDriver::prepareModelFromCache_1_3(
-        const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
+        Priority /*priority*/, const OptionalTimePoint& /*deadline*/, const hidl_vec<hidl_handle>&,
+        const hidl_vec<hidl_handle>&, const CacheToken&,
         const sp<V1_3::IPreparedModelCallback>& callback) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
                  "SampleDriver::prepareModelFromCache_1_3");
@@ -266,10 +291,10 @@
 }
 
 template <typename T_IExecutionCallback>
-Return<ErrorStatus> executeBase(const Request& request, MeasureTiming measure, const Model& model,
-                                const SampleDriver& driver,
-                                const std::vector<RunTimePoolInfo>& poolInfos,
-                                const sp<T_IExecutionCallback>& callback) {
+ErrorStatus executeBase(const Request& request, MeasureTiming measure, const Model& model,
+                        const SampleDriver& driver, const std::vector<RunTimePoolInfo>& poolInfos,
+                        const OptionalTimePoint& /*deadline*/,
+                        const sp<T_IExecutionCallback>& callback) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "SampleDriver::executeBase");
     VLOG(DRIVER) << "executeBase(" << SHOW_IF_DEBUG(toString(request)) << ")";
 
@@ -294,27 +319,31 @@
     return ErrorStatus::NONE;
 }
 
-Return<ErrorStatus> SamplePreparedModel::execute(const V1_0::Request& request,
-                                                 const sp<V1_0::IExecutionCallback>& callback) {
-    return executeBase(convertToV1_3(request), MeasureTiming::NO, mModel, *mDriver, mPoolInfos,
-                       callback);
+Return<V1_0::ErrorStatus> SamplePreparedModel::execute(
+        const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) {
+    const ErrorStatus status = executeBase(convertToV1_3(request), MeasureTiming::NO, mModel,
+                                           *mDriver, mPoolInfos, {}, callback);
+    return convertToV1_0(status);
 }
 
-Return<ErrorStatus> SamplePreparedModel::execute_1_2(const V1_0::Request& request,
-                                                     MeasureTiming measure,
-                                                     const sp<V1_2::IExecutionCallback>& callback) {
-    return executeBase(convertToV1_3(request), measure, mModel, *mDriver, mPoolInfos, callback);
+Return<V1_0::ErrorStatus> SamplePreparedModel::execute_1_2(
+        const V1_0::Request& request, MeasureTiming measure,
+        const sp<V1_2::IExecutionCallback>& callback) {
+    const ErrorStatus status = executeBase(convertToV1_3(request), measure, mModel, *mDriver,
+                                           mPoolInfos, {}, callback);
+    return convertToV1_0(status);
 }
 
-Return<ErrorStatus> SamplePreparedModel::execute_1_3(const V1_3::Request& request,
-                                                     MeasureTiming measure,
-                                                     const sp<V1_2::IExecutionCallback>& callback) {
-    return executeBase(request, measure, mModel, *mDriver, mPoolInfos, callback);
+Return<V1_3::ErrorStatus> SamplePreparedModel::execute_1_3(
+        const V1_3::Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
+        const sp<V1_3::IExecutionCallback>& callback) {
+    return executeBase(request, measure, mModel, *mDriver, mPoolInfos, deadline, callback);
 }
 
 static std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> executeSynchronouslyBase(
         const Request& request, MeasureTiming measure, const Model& model,
-        const SampleDriver& driver, const std::vector<RunTimePoolInfo>& poolInfos) {
+        const SampleDriver& driver, const std::vector<RunTimePoolInfo>& poolInfos,
+        const OptionalTimePoint& /*deadline*/) {
     NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
                  "SampleDriver::executeSynchronouslyBase");
     VLOG(DRIVER) << "executeSynchronouslyBase(" << SHOW_IF_DEBUG(toString(request)) << ")";
@@ -355,17 +384,18 @@
 Return<void> SamplePreparedModel::executeSynchronously(const V1_0::Request& request,
                                                        MeasureTiming measure,
                                                        executeSynchronously_cb cb) {
-    auto [status, outputShapes, timing] =
-            executeSynchronouslyBase(convertToV1_3(request), measure, mModel, *mDriver, mPoolInfos);
-    cb(status, std::move(outputShapes), timing);
+    auto [status, outputShapes, timing] = executeSynchronouslyBase(
+            convertToV1_3(request), measure, mModel, *mDriver, mPoolInfos, {});
+    cb(convertToV1_0(status), std::move(outputShapes), timing);
     return Void();
 }
 
 Return<void> SamplePreparedModel::executeSynchronously_1_3(const V1_3::Request& request,
                                                            MeasureTiming measure,
+                                                           const OptionalTimePoint& deadline,
                                                            executeSynchronously_1_3_cb cb) {
     auto [status, outputShapes, timing] =
-            executeSynchronouslyBase(request, measure, mModel, *mDriver, mPoolInfos);
+            executeSynchronouslyBase(request, measure, mModel, *mDriver, mPoolInfos, deadline);
     cb(status, std::move(outputShapes), timing);
     return Void();
 }
@@ -392,7 +422,7 @@
 
     void removeCacheEntry(int32_t slot) override { mMemoryCache.erase(slot); }
 
-    std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
+    std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
             const V1_0::Request& request, const std::vector<int32_t>& slots,
             MeasureTiming measure) override {
         NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
@@ -404,7 +434,7 @@
         // ensure all relevant pools are valid
         if (!std::all_of(slots.begin(), slots.end(),
                          [this](int32_t slot) { return isCacheEntryPresent(slot); })) {
-            return {ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
+            return {V1_0::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
         }
 
         // finish the request object (for validation)
@@ -419,7 +449,7 @@
 
         // validate request object against the model
         if (!validateRequest(fullRequest, mModel)) {
-            return {ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
+            return {V1_0::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
         }
 
         // select relevant entries from cache
@@ -434,9 +464,9 @@
         int n = executor.run(mModel, fullRequest, mModelPoolInfos, requestPoolInfos);
         if (measure == MeasureTiming::YES) deviceEnd = now();
         VLOG(DRIVER) << "executor.run returned " << n;
-        ErrorStatus executionStatus = convertResultCodeToErrorStatus(n);
+        V1_0::ErrorStatus executionStatus = convertToV1_0(convertResultCodeToErrorStatus(n));
         hidl_vec<OutputShape> outputShapes = executor.getOutputShapes();
-        if (measure == MeasureTiming::YES && executionStatus == ErrorStatus::NONE) {
+        if (measure == MeasureTiming::YES && executionStatus == V1_0::ErrorStatus::NONE) {
             driverEnd = now();
             Timing timing = {
                     .timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)),
@@ -497,9 +527,9 @@
             callback, requestChannel, resultChannel, executorWithCache, pollingTimeWindow);
 
     if (burst == nullptr) {
-        cb(ErrorStatus::GENERAL_FAILURE, {});
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
     } else {
-        cb(ErrorStatus::NONE, burst);
+        cb(V1_0::ErrorStatus::NONE, burst);
     }
 
     return Void();
diff --git a/nn/driver/sample/SampleDriver.h b/nn/driver/sample/SampleDriver.h
index 9f0a7d4..4163113 100644
--- a/nn/driver/sample/SampleDriver.h
+++ b/nn/driver/sample/SampleDriver.h
@@ -56,27 +56,30 @@
     hal::Return<void> getSupportedOperations_1_2(const hal::V1_2::Model& model,
                                                  getSupportedOperations_1_2_cb cb) override;
     hal::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override;
-    hal::Return<hal::ErrorStatus> prepareModel(
+    hal::Return<void> supportsDeadlines(supportsDeadlines_cb cb) override;
+    hal::Return<hal::V1_0::ErrorStatus> prepareModel(
             const hal::V1_0::Model& model,
             const sp<hal::V1_0::IPreparedModelCallback>& callback) override;
-    hal::Return<hal::ErrorStatus> prepareModel_1_1(
+    hal::Return<hal::V1_0::ErrorStatus> prepareModel_1_1(
             const hal::V1_1::Model& model, hal::ExecutionPreference preference,
             const sp<hal::V1_0::IPreparedModelCallback>& callback) override;
-    hal::Return<hal::ErrorStatus> prepareModel_1_2(
+    hal::Return<hal::V1_0::ErrorStatus> prepareModel_1_2(
             const hal::V1_2::Model& model, hal::ExecutionPreference preference,
             const hal::hidl_vec<hal::hidl_handle>& modelCache,
             const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
             const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
-    hal::Return<hal::ErrorStatus> prepareModel_1_3(
+    hal::Return<hal::V1_3::ErrorStatus> prepareModel_1_3(
             const hal::V1_3::Model& model, hal::ExecutionPreference preference,
+            hal::Priority priority, const hal::OptionalTimePoint& deadline,
             const hal::hidl_vec<hal::hidl_handle>& modelCache,
             const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
             const sp<hal::V1_3::IPreparedModelCallback>& callback) override;
-    hal::Return<hal::ErrorStatus> prepareModelFromCache(
+    hal::Return<hal::V1_0::ErrorStatus> prepareModelFromCache(
             const hal::hidl_vec<hal::hidl_handle>& modelCache,
             const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
             const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
-    hal::Return<hal::ErrorStatus> prepareModelFromCache_1_3(
+    hal::Return<hal::V1_3::ErrorStatus> prepareModelFromCache_1_3(
+            hal::Priority priority, const hal::OptionalTimePoint& deadline,
             const hal::hidl_vec<hal::hidl_handle>& modelCache,
             const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
             const sp<hal::V1_3::IPreparedModelCallback>& callback) override;
@@ -105,20 +108,22 @@
         : mModel(model), mDriver(driver), kPreference(preference) {}
     ~SamplePreparedModel() override {}
     bool initialize();
-    hal::Return<hal::ErrorStatus> execute(
+    hal::Return<hal::V1_0::ErrorStatus> execute(
             const hal::V1_0::Request& request,
             const sp<hal::V1_0::IExecutionCallback>& callback) override;
-    hal::Return<hal::ErrorStatus> execute_1_2(
+    hal::Return<hal::V1_0::ErrorStatus> execute_1_2(
             const hal::V1_0::Request& request, hal::MeasureTiming measure,
             const sp<hal::V1_2::IExecutionCallback>& callback) override;
-    hal::Return<hal::ErrorStatus> execute_1_3(
+    hal::Return<hal::V1_3::ErrorStatus> execute_1_3(
             const hal::V1_3::Request& request, hal::MeasureTiming measure,
-            const sp<hal::V1_2::IExecutionCallback>& callback) override;
+            const hal::OptionalTimePoint& deadline,
+            const sp<hal::V1_3::IExecutionCallback>& callback) override;
     hal::Return<void> executeSynchronously(const hal::V1_0::Request& request,
                                            hal::MeasureTiming measure,
                                            executeSynchronously_cb cb) override;
     hal::Return<void> executeSynchronously_1_3(const hal::V1_3::Request& request,
                                                hal::MeasureTiming measure,
+                                               const hal::OptionalTimePoint& deadline,
                                                executeSynchronously_1_3_cb cb) override;
     hal::Return<void> configureExecutionBurst(
             const sp<hal::V1_2::IBurstCallback>& callback,
diff --git a/nn/driver/sample/SampleDriverPartial.cpp b/nn/driver/sample/SampleDriverPartial.cpp
index 1bba375..2ba3d9a 100644
--- a/nn/driver/sample/SampleDriverPartial.cpp
+++ b/nn/driver/sample/SampleDriverPartial.cpp
@@ -49,13 +49,15 @@
 }
 
 Return<ErrorStatus> SampleDriverPartial::prepareModel_1_3(
-        const V1_3::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&,
+        const V1_3::Model& model, ExecutionPreference preference, Priority priority,
+        const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>&,
         const hidl_vec<hidl_handle>&, const CacheToken&,
         const sp<V1_3::IPreparedModelCallback>& callback) {
     std::vector<bool> supported = getSupportedOperationsImpl(model);
     bool isModelFullySupported =
             std::all_of(supported.begin(), supported.end(), [](bool v) { return v; });
-    return prepareModelBase(model, this, preference, callback, isModelFullySupported);
+    return prepareModelBase(model, this, preference, priority, deadline, callback,
+                            isModelFullySupported);
 }
 
 }  // namespace sample_driver
diff --git a/nn/driver/sample/SampleDriverPartial.h b/nn/driver/sample/SampleDriverPartial.h
index f6f37aa..090389a 100644
--- a/nn/driver/sample/SampleDriverPartial.h
+++ b/nn/driver/sample/SampleDriverPartial.h
@@ -40,6 +40,7 @@
                                                  getSupportedOperations_1_3_cb cb) override;
     hal::Return<hal::ErrorStatus> prepareModel_1_3(
             const hal::V1_3::Model& model, hal::ExecutionPreference preference,
+            hal::Priority priority, const hal::OptionalTimePoint& deadline,
             const hal::hidl_vec<hal::hidl_handle>& modelCache,
             const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
             const sp<hal::V1_3::IPreparedModelCallback>& callback) override;
diff --git a/nn/driver/sample/SampleDriverUtils.cpp b/nn/driver/sample/SampleDriverUtils.cpp
index 8801e5f..7cccf92 100644
--- a/nn/driver/sample/SampleDriverUtils.cpp
+++ b/nn/driver/sample/SampleDriverUtils.cpp
@@ -27,7 +27,7 @@
 
 void notify(const sp<V1_0::IPreparedModelCallback>& callback, const ErrorStatus& status,
             const sp<SamplePreparedModel>& preparedModel) {
-    const auto ret = callback->notify(status, preparedModel);
+    const auto ret = callback->notify(convertToV1_0(status), preparedModel);
     if (!ret.isOk()) {
         LOG(ERROR) << "Error when calling IPreparedModelCallback::notify: " << ret.description();
     }
@@ -35,7 +35,7 @@
 
 void notify(const sp<V1_2::IPreparedModelCallback>& callback, const ErrorStatus& status,
             const sp<SamplePreparedModel>& preparedModel) {
-    const auto ret = callback->notify_1_2(status, preparedModel);
+    const auto ret = callback->notify_1_2(convertToV1_0(status), preparedModel);
     if (!ret.isOk()) {
         LOG(ERROR) << "Error when calling IPreparedModelCallback::notify_1_2: "
                    << ret.description();
@@ -53,7 +53,7 @@
 
 void notify(const sp<V1_0::IExecutionCallback>& callback, const ErrorStatus& status,
             const hidl_vec<OutputShape>&, Timing) {
-    const auto ret = callback->notify(status);
+    const auto ret = callback->notify(convertToV1_0(status));
     if (!ret.isOk()) {
         LOG(ERROR) << "Error when calling IExecutionCallback::notify: " << ret.description();
     }
@@ -61,12 +61,20 @@
 
 void notify(const sp<V1_2::IExecutionCallback>& callback, const ErrorStatus& status,
             const hidl_vec<OutputShape>& outputShapes, Timing timing) {
-    const auto ret = callback->notify_1_2(status, outputShapes, timing);
+    const auto ret = callback->notify_1_2(convertToV1_0(status), outputShapes, timing);
     if (!ret.isOk()) {
         LOG(ERROR) << "Error when calling IExecutionCallback::notify_1_2: " << ret.description();
     }
 }
 
+void notify(const sp<V1_3::IExecutionCallback>& callback, const ErrorStatus& status,
+            const hidl_vec<OutputShape>& outputShapes, Timing timing) {
+    const auto ret = callback->notify_1_3(status, outputShapes, timing);
+    if (!ret.isOk()) {
+        LOG(ERROR) << "Error when calling IExecutionCallback::notify_1_3" << ret.description();
+    }
+}
+
 }  // namespace sample_driver
 }  // namespace nn
 }  // namespace android
diff --git a/nn/driver/sample/SampleDriverUtils.h b/nn/driver/sample/SampleDriverUtils.h
index ad6ddb2..b40b040 100644
--- a/nn/driver/sample/SampleDriverUtils.h
+++ b/nn/driver/sample/SampleDriverUtils.h
@@ -38,11 +38,15 @@
 void notify(const sp<hal::V1_2::IExecutionCallback>& callback, const hal::ErrorStatus& status,
             const hal::hidl_vec<hal::OutputShape>& outputShapes, hal::Timing timing);
 
+void notify(const sp<hal::V1_3::IExecutionCallback>& callback, const hal::ErrorStatus& status,
+            const hal::hidl_vec<hal::OutputShape>& outputShapes, hal::Timing timing);
+
 template <typename T_Model, typename T_IPreparedModelCallback>
-hal::Return<hal::ErrorStatus> prepareModelBase(const T_Model& model, const SampleDriver* driver,
-                                               hal::ExecutionPreference preference,
-                                               const sp<T_IPreparedModelCallback>& callback,
-                                               bool isFullModelSupported = true) {
+hal::ErrorStatus prepareModelBase(const T_Model& model, const SampleDriver* driver,
+                                  hal::ExecutionPreference preference, hal::Priority /*priority*/,
+                                  const hal::OptionalTimePoint& /*deadline*/,
+                                  const sp<T_IPreparedModelCallback>& callback,
+                                  bool isFullModelSupported = true) {
     if (callback.get() == nullptr) {
         LOG(ERROR) << "invalid callback passed to prepareModelBase";
         return hal::ErrorStatus::INVALID_ARGUMENT;
diff --git a/nn/runtime/Callbacks.cpp b/nn/runtime/Callbacks.cpp
index 9045484..6655a1a 100644
--- a/nn/runtime/Callbacks.cpp
+++ b/nn/runtime/Callbacks.cpp
@@ -32,8 +32,8 @@
 
 // PreparedModelCallback methods begin here
 
-Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
-                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+Return<void> PreparedModelCallback::notifyInternal(ErrorStatus errorStatus,
+                                                   const sp<V1_0::IPreparedModel>& preparedModel) {
     {
         std::lock_guard<std::mutex> hold(mMutex);
 
@@ -52,14 +52,19 @@
     return Void();
 }
 
-Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
+Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus errorStatus,
+                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), preparedModel);
+}
+
+Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
                                                const sp<V1_2::IPreparedModel>& preparedModel) {
-    return notify(errorStatus, preparedModel);
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), preparedModel);
 }
 
 Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
                                                const sp<V1_3::IPreparedModel>& preparedModel) {
-    return notify(errorStatus, preparedModel);
+    return notifyInternal(errorStatus, preparedModel);
 }
 
 void PreparedModelCallback::wait() const {
@@ -79,32 +84,20 @@
 
 // ExecutionCallback methods begin here
 
-Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
-    notifyInternal(errorStatus, {}, kNoTiming);
-    return Void();
+Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) {
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), {}, kNoTiming);
 }
 
-Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
+Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
                                            const hidl_vec<OutputShape>& outputShapes,
                                            const Timing& timing) {
-    if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
-        // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE.
-        if (outputShapes.size() == 0) {
-            LOG(ERROR) << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
-            notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
-            return Void();
-        }
-    } else if (errorStatus != ErrorStatus::NONE) {
-        // outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE.
-        if (outputShapes.size() != 0) {
-            LOG(ERROR) << "Notified with non-empty output shape vector when error status is "
-                          "neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
-            notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
-            return Void();
-        }
-    }
-    notifyInternal(errorStatus, outputShapes, timing);
-    return Void();
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), outputShapes, timing);
+}
+
+Return<void> ExecutionCallback::notify_1_3(V1_3::ErrorStatus errorStatus,
+                                           const hidl_vec<OutputShape>& outputShapes,
+                                           const Timing& timing) {
+    return notifyInternal(errorStatus, outputShapes, timing);
 }
 
 void ExecutionCallback::wait() const {
@@ -187,15 +180,35 @@
     mOnFinish = finish;
 }
 
-void ExecutionCallback::notifyInternal(ErrorStatus errorStatus,
-                                       const hidl_vec<OutputShape>& outputShapes,
-                                       const Timing& timing) {
+Return<void> ExecutionCallback::notifyInternal(ErrorStatus errorStatus,
+                                               hidl_vec<OutputShape> outputShapes, Timing timing) {
+    // check results
+    if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE.
+        if (outputShapes.size() == 0) {
+            LOG(ERROR) << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
+            errorStatus = ErrorStatus::GENERAL_FAILURE;
+            outputShapes = {};
+            timing = kNoTiming;
+        }
+    } else if (errorStatus != ErrorStatus::NONE) {
+        // outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE.
+        if (outputShapes.size() != 0) {
+            LOG(ERROR) << "Notified with non-empty output shape vector when error status is "
+                          "neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
+            errorStatus = ErrorStatus::GENERAL_FAILURE;
+            outputShapes = {};
+            timing = kNoTiming;
+        }
+    }
+
+    // store results
     {
         std::lock_guard<std::mutex> hold(mMutex);
 
         // quick-return if object has already been notified
         if (mNotified) {
-            return;
+            return Void();
         }
 
         mErrorStatus = errorStatus;
@@ -212,6 +225,7 @@
         }
     }
     mCondition.notify_all();
+    return Void();
 }
 
 }  // namespace android::nn
diff --git a/nn/runtime/Callbacks.h b/nn/runtime/Callbacks.h
index 2f0a9e5..1c484e4 100644
--- a/nn/runtime/Callbacks.h
+++ b/nn/runtime/Callbacks.h
@@ -85,7 +85,7 @@
      * @param preparedModel Returned model that has been prepared for execution,
      *     nullptr if the model was unable to be prepared.
      */
-    hal::Return<void> notify(hal::ErrorStatus status,
+    hal::Return<void> notify(hal::V1_0::ErrorStatus status,
                              const sp<hal::V1_0::IPreparedModel>& preparedModel) override;
 
     /**
@@ -111,7 +111,7 @@
      * @param preparedModel Returned model that has been prepared for execution,
      *     nullptr if the model was unable to be prepared.
      */
-    hal::Return<void> notify_1_2(hal::ErrorStatus status,
+    hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status,
                                  const sp<hal::V1_2::IPreparedModel>& preparedModel) override;
 
     /**
@@ -134,10 +134,12 @@
      *     - DEVICE_UNAVAILABLE if driver is offline or busy
      *     - GENERAL_FAILURE if there is an unspecified error
      *     - INVALID_ARGUMENT if the input model is invalid
+     *     - MISSED_DEADLINE_* if the deadline could not be met
+     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
      * @param preparedModel Returned model that has been prepared for execution,
      *     nullptr if the model was unable to be prepared.
      */
-    hal::Return<void> notify_1_3(hal::ErrorStatus status,
+    hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status,
                                  const sp<hal::V1_3::IPreparedModel>& preparedModel) override;
 
     /**
@@ -158,8 +160,11 @@
      *     - DEVICE_UNAVAILABLE if driver is offline or busy
      *     - GENERAL_FAILURE if there is an unspecified error
      *     - INVALID_ARGUMENT if the input model is invalid
+     *     - MISSED_DEADLINE_* if the deadline could not be met
+     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+     *     - DEAD_OBJECT if the driver crashed without returning a result
      */
-    hal::ErrorStatus getStatus() const;
+    hal::V1_3::ErrorStatus getStatus() const;
 
     /**
      * Retrieves the model that has been prepared for execution from the
@@ -174,6 +179,9 @@
     sp<hal::V1_0::IPreparedModel> getPreparedModel() const;
 
    private:
+    hal::Return<void> notifyInternal(hal::ErrorStatus errorStatus,
+                                     const sp<hal::V1_0::IPreparedModel>& preparedModel);
+
     mutable std::mutex mMutex;
     mutable std::condition_variable mCondition;
     bool mNotified GUARDED_BY(mMutex) = false;
@@ -186,8 +194,8 @@
  * from a task executing asynchronously with respect to the runtime. If a
  * calling thread calls wait or get* on a ExecutionCallback object and the
  * corresponding asynchronous task has not finished the execution, the calling
- * thread will block until the asynchronous task has either called notify or
- * notify_1_2.
+ * thread will block until the asynchronous task has called one of the notify*
+ * methods.
  *
  * If the callback object is notified more than once, only the results of the
  * first call to notify* are used, and the results from subsequent calls are
@@ -206,8 +214,8 @@
      * all prior and future wait calls on the ExecutionCallback object to
      * proceed.
      *
-     * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
-     * be called on a given ExecutionCallback object.
+     * One of the IExecutionCallback::notify* methods must be called on a given
+     * ExecutionCallback object.
      *
      * If the callback object is notified more than once, only the results of
      * the first call to notify* are used, and the results from subsequent calls
@@ -223,7 +231,7 @@
      *         enough to store the resultant values
      *     - INVALID_ARGUMENT if the input request is invalid
      */
-    hal::Return<void> notify(hal::ErrorStatus status) override;
+    hal::Return<void> notify(hal::V1_0::ErrorStatus status) override;
 
     /**
      * IExecutionCallback::notify_1_2 marks the callback object with the results
@@ -231,8 +239,8 @@
      * asynchronous execution that held this callback and enables all prior and
      * future wait calls on the ExecutionCallback object to proceed.
      *
-     * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
-     * be called on a given ExecutionCallback object.
+     * One of the IExecutionCallback::notify* methods must be called on a given
+     * ExecutionCallback object.
      *
      * If the callback object is notified more than once, only the results of
      * the first call to notify* are used, and the results from subsequent calls
@@ -258,15 +266,54 @@
      *     reported as UINT64_MAX. A driver may choose to report any time as
      *     UINT64_MAX, indicating that particular measurement is not available.
      */
-    hal::Return<void> notify_1_2(hal::ErrorStatus status,
+    hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status,
+                                 const hal::hidl_vec<hal::OutputShape>& outputShapes,
+                                 const hal::Timing& timing) override;
+
+    /**
+     * IExecutionCallback::notify_1_3 marks the callback object with the results
+     * (error status, dynamic output shapes, and timing information) of the
+     * asynchronous execution that held this callback and enables all prior and
+     * future wait calls on the ExecutionCallback object to proceed.
+     *
+     * One of the IExecutionCallback::notify* methods must be called on a given
+     * ExecutionCallback object.
+     *
+     * If the callback object is notified more than once, only the results of
+     * the first call to notify* are used, and the results from subsequent calls
+     * are discarded.
+     *
+     * @param status Error status returned from launching the asynchronous task
+     *     (if the launch fails) or from the asynchronous task itself (if the
+     *     launch succeeds). Must be:
+     *     - NONE if the asynchronous execution was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
+     *         error
+     *     - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
+     *         not large enough to store the corresponding output
+     *     - INVALID_ARGUMENT if one of the input arguments to prepareModel is
+     *         invalid
+     *     - MISSED_DEADLINE_* if the deadline could not be met
+     *     - RESOURCE_EXHAUSTED_* if the execution was aborted by the driver
+     * @param outputShapes A list of shape information of model output operands.
+     *     The index into "outputShapes" corresponds to the index of the output
+     *     operand in the Request outputs vector. outputShapes must be empty
+     *     unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE.
+     * @param Timing Duration of execution. Unless MeasureTiming::YES was passed
+     *     when launching the execution and status is NONE, all times must be
+     *     reported as UINT64_MAX. A driver may choose to report any time as
+     *     UINT64_MAX, indicating that particular measurement is not available.
+     */
+    hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status,
                                  const hal::hidl_vec<hal::OutputShape>& outputShapes,
                                  const hal::Timing& timing) override;
 
     // An overload of the latest notify interface to hide the version from ExecutionBuilder.
-    hal::Return<void> notify(hal::ErrorStatus status,
+    hal::Return<void> notify(hal::V1_3::ErrorStatus status,
                              const hal::hidl_vec<hal::OutputShape>& outputShapes,
                              const hal::Timing& timing) {
-        return notify_1_2(status, outputShapes, timing);
+        return notify_1_3(status, outputShapes, timing);
     }
 
     /**
@@ -277,9 +324,10 @@
 
     /**
      * Retrieves the error status returned from the asynchronous task launched
-     * by IPreparedModel::execute*. If IPreparedModel::execute* has not finished
-     * asynchronously executing, this call will block until the asynchronous
-     * task notifies the object.
+     * by IPreparedModel::execute* (but not by
+     * IPreparedModel::executeSynchronously*). If IPreparedModel::execute* has
+     * not finished asynchronously executing, this call will block until the
+     * asynchronous task notifies the object.
      *
      * @return status Error status returned from launching the asynchronous task
      *     (if the launch fails) or from the asynchronous task itself (if the
@@ -292,8 +340,11 @@
      *         not large enough to store the corresponding output
      *     - INVALID_ARGUMENT if one of the input arguments to prepareModel is
      *         invalid
+     *     - MISSED_DEADLINE_* if the deadline could not be met
+     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+     *     - DEAD_OBJECT if the driver crashed without returning a result
      */
-    hal::ErrorStatus getStatus() const;
+    hal::V1_3::ErrorStatus getStatus() const;
 
     /**
      * Retrieves the output shapes returned from the asynchronous task launched
@@ -385,9 +436,9 @@
      * before any call to wait or get* return. It then enables all prior and
      * future wait calls on the ExecutionCallback object to proceed.
      */
-    void notifyInternal(hal::ErrorStatus errorStatus,
-                        const hal::hidl_vec<hal::OutputShape>& outputShapes,
-                        const hal::Timing& timing);
+    hal::Return<void> notifyInternal(hal::ErrorStatus errorStatus,
+                                     hal::hidl_vec<hal::OutputShape> outputShapes,
+                                     hal::Timing timing);
 
     // members
     mutable std::mutex mMutex;
diff --git a/nn/runtime/NeuralNetworks.cpp b/nn/runtime/NeuralNetworks.cpp
index ad84844..8913c0e 100644
--- a/nn/runtime/NeuralNetworks.cpp
+++ b/nn/runtime/NeuralNetworks.cpp
@@ -527,6 +527,16 @@
 static_assert(static_cast<int32_t>(DeviceType::ACCELERATOR) == ANEURALNETWORKS_DEVICE_ACCELERATOR,
               "DeviceType::ACCELERATOR != ANEURALNETWORKS_DEVICE_ACCELERATOR");
 
+// Make sure that the constants are compatible with the values defined in
+// hardware/interfaces/neuralnetworks/1.3/types.hal.
+static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_LOW) == Priority::LOW,
+              "ANEURALNETWORKS_PRIORITY_LOW does not map to Priority::LOW");
+static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) ==
+                      Priority::MEDIUM,
+              "ANEURALNETWORKS_PRIORITY_MEDIUM does not map to Priority::MEDIUM");
+static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_HIGH) == Priority::HIGH,
+              "ANEURALNETWORKS_PRIORITY_HIGH does not map to Priority::HIGH");
+
 // Asserts for ANeuralNetworksOperandType memory layout
 static_assert(offsetof(ANeuralNetworksOperandType, type) == 0,
               "ANeuralNetworksOperandType.type offset != 0");
@@ -563,9 +573,10 @@
               "Constant::BYTE_SIZE_OF_CACHE_TOKEN != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN");
 
 // Asserts for compilation priority
-static_assert(ANEURALNETWORKS_PRIORITY_LOW == 0, "ANEURALNETWORKS_PRIORITY_LOW has changed");
-static_assert(ANEURALNETWORKS_PRIORITY_MEDIUM == 1, "ANEURALNETWORKS_PRIORITY_MEDIUM has changed");
-static_assert(ANEURALNETWORKS_PRIORITY_HIGH == 2, "ANEURALNETWORKS_PRIORITY_HIGH has changed");
+static_assert(ANEURALNETWORKS_PRIORITY_LOW == 90, "ANEURALNETWORKS_PRIORITY_LOW has changed");
+static_assert(ANEURALNETWORKS_PRIORITY_MEDIUM == 100,
+              "ANEURALNETWORKS_PRIORITY_MEDIUM has changed");
+static_assert(ANEURALNETWORKS_PRIORITY_HIGH == 110, "ANEURALNETWORKS_PRIORITY_HIGH has changed");
 static_assert(ANEURALNETWORKS_PRIORITY_DEFAULT == ANEURALNETWORKS_PRIORITY_MEDIUM,
               "ANEURALNETWORKS_PRIORITY_DEFAULT has changed");
 
diff --git a/nn/runtime/VersionedInterfaces.cpp b/nn/runtime/VersionedInterfaces.cpp
index d7c96f6..c4c6e85 100644
--- a/nn/runtime/VersionedInterfaces.cpp
+++ b/nn/runtime/VersionedInterfaces.cpp
@@ -106,7 +106,7 @@
 const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
 
 void sendFailureMessage(const sp<IPreparedModelCallback>& cb) {
-    cb->notify(ErrorStatus::GENERAL_FAILURE, nullptr);
+    cb->notify_1_3(ErrorStatus::GENERAL_FAILURE, nullptr);
 }
 
 void sendFailureMessage(const sp<PreparedModelCallback>& cb) {
@@ -114,7 +114,7 @@
 }
 
 void sendFailureMessage(const sp<IExecutionCallback>& cb) {
-    cb->notify(ErrorStatus::GENERAL_FAILURE);
+    cb->notify_1_3(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
 }
 
 // This class is thread safe
@@ -209,7 +209,7 @@
 
     // version 1.3+ HAL
     if (mPreparedModelV1_3 != nullptr) {
-        Return<ErrorStatus> ret = mPreparedModelV1_3->execute_1_3(request, measure, callback);
+        Return<ErrorStatus> ret = mPreparedModelV1_3->execute_1_3(request, measure, {}, callback);
         if (!ret.isOk()) {
             LOG(ERROR) << "execute_1_3 failure: " << ret.description();
             return failWithStatus(ErrorStatus::GENERAL_FAILURE);
@@ -231,14 +231,16 @@
 
     // version 1.2 HAL
     if (mPreparedModelV1_2 != nullptr) {
-        Return<ErrorStatus> ret = mPreparedModelV1_2->execute_1_2(request10, measure, callback);
+        Return<V1_0::ErrorStatus> ret =
+                mPreparedModelV1_2->execute_1_2(request10, measure, callback);
         if (!ret.isOk()) {
             LOG(ERROR) << "execute_1_2 failure: " << ret.description();
             return failWithStatus(ErrorStatus::GENERAL_FAILURE);
         }
-        if (ret != ErrorStatus::NONE) {
-            LOG(ERROR) << "execute_1_2 returned " << toString(static_cast<ErrorStatus>(ret));
-            return failWithStatus(ret);
+        const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
+        if (status != V1_0::ErrorStatus::NONE) {
+            LOG(ERROR) << "execute_1_2 returned " << toString(status);
+            return failWithStatus(convertToV1_3(status));
         }
         callback->wait();
         return getResults(*callback);
@@ -246,14 +248,15 @@
 
     // version 1.0 HAL
     if (mPreparedModelV1_0 != nullptr) {
-        Return<ErrorStatus> ret = mPreparedModelV1_0->execute(request10, callback);
+        Return<V1_0::ErrorStatus> ret = mPreparedModelV1_0->execute(request10, callback);
         if (!ret.isOk()) {
             LOG(ERROR) << "execute failure: " << ret.description();
             return failWithStatus(ErrorStatus::GENERAL_FAILURE);
         }
-        if (ret != ErrorStatus::NONE) {
-            LOG(ERROR) << "execute returned " << toString(static_cast<ErrorStatus>(ret));
-            return failWithStatus(ret);
+        const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
+        if (status != V1_0::ErrorStatus::NONE) {
+            LOG(ERROR) << "execute returned " << toString(status);
+            return failWithStatus(convertToV1_3(status));
         }
         callback->wait();
         return getResults(*callback);
@@ -272,7 +275,7 @@
     if (mPreparedModelV1_3 != nullptr) {
         std::tuple<int, std::vector<OutputShape>, Timing> result;
         Return<void> ret = mPreparedModelV1_3->executeSynchronously_1_3(
-                request, measure,
+                request, measure, {},
                 [&result](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
                           const Timing& timing) {
                     result = getExecutionResult(error, outputShapes, timing);
@@ -296,9 +299,9 @@
         std::tuple<int, std::vector<OutputShape>, Timing> result;
         Return<void> ret = mPreparedModelV1_2->executeSynchronously(
                 request10, measure,
-                [&result](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
+                [&result](V1_0::ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
                           const Timing& timing) {
-                    result = getExecutionResult(error, outputShapes, timing);
+                    result = getExecutionResult(convertToV1_3(error), outputShapes, timing);
                 });
         if (!ret.isOk()) {
             LOG(ERROR) << "executeSynchronously failure: " << ret.description();
@@ -370,8 +373,8 @@
     const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
     std::pair<ErrorStatus, Capabilities> result = kFailure;
     const Return<void> ret = device->getCapabilities_1_2(
-            [&result](ErrorStatus error, const V1_2::Capabilities& capabilities) {
-                result = std::make_pair(error, convertToV1_3(capabilities));
+            [&result](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities) {
+                result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
             });
     if (!ret.isOk()) {
         LOG(ERROR) << "getCapabilities_1_2 failure: " << ret.description();
@@ -386,9 +389,9 @@
     const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
     std::pair<ErrorStatus, Capabilities> result = kFailure;
     const Return<void> ret = device->getCapabilities_1_1(
-            [&result](ErrorStatus error, const V1_1::Capabilities& capabilities) {
+            [&result](V1_0::ErrorStatus error, const V1_1::Capabilities& capabilities) {
                 // Time taken to convert capabilities is trivial
-                result = std::make_pair(error, convertToV1_3(capabilities));
+                result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
             });
     if (!ret.isOk()) {
         LOG(ERROR) << "getCapabilities_1_1 failure: " << ret.description();
@@ -403,9 +406,9 @@
     const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
     std::pair<ErrorStatus, Capabilities> result = kFailure;
     const Return<void> ret = device->getCapabilities(
-            [&result](ErrorStatus error, const V1_0::Capabilities& capabilities) {
+            [&result](V1_0::ErrorStatus error, const V1_0::Capabilities& capabilities) {
                 // Time taken to convert capabilities is trivial
-                result = std::make_pair(error, convertToV1_3(capabilities));
+                result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
             });
     if (!ret.isOk()) {
         LOG(ERROR) << "getCapabilities failure: " << ret.description();
@@ -421,8 +424,8 @@
     const std::pair<ErrorStatus, hidl_vec<Extension>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
     std::pair<ErrorStatus, hidl_vec<Extension>> result = kFailure;
     const Return<void> ret = device->getSupportedExtensions(
-            [&result](ErrorStatus error, const hidl_vec<Extension>& extensions) {
-                result = std::make_pair(error, extensions);
+            [&result](V1_0::ErrorStatus error, const hidl_vec<Extension>& extensions) {
+                result = std::make_pair(convertToV1_3(error), extensions);
             });
     if (!ret.isOk()) {
         LOG(ERROR) << "getSupportedExtensions failure: " << ret.description();
@@ -441,11 +444,12 @@
     CHECK(device != nullptr);
     constexpr int32_t kFailure = -1;
     int32_t result = kFailure;
-    const Return<void> ret = device->getType([&result](ErrorStatus error, DeviceType deviceType) {
-        if (error == ErrorStatus::NONE) {
-            result = static_cast<int32_t>(deviceType);
-        }
-    });
+    const Return<void> ret =
+            device->getType([&result](V1_0::ErrorStatus error, DeviceType deviceType) {
+                if (error == V1_0::ErrorStatus::NONE) {
+                    result = static_cast<int32_t>(deviceType);
+                }
+            });
     if (!ret.isOk()) {
         LOG(ERROR) << "getType failure: " << ret.description();
         return kFailure;
@@ -462,9 +466,9 @@
     CHECK(device != nullptr);
     const std::pair<ErrorStatus, hidl_string> kFailure = {ErrorStatus::GENERAL_FAILURE, ""};
     std::pair<ErrorStatus, hidl_string> result = kFailure;
-    const Return<void> ret =
-            device->getVersionString([&result](ErrorStatus error, const hidl_string& version) {
-                result = std::make_pair(error, version);
+    const Return<void> ret = device->getVersionString(
+            [&result](V1_0::ErrorStatus error, const hidl_string& version) {
+                result = std::make_pair(convertToV1_3(error), version);
             });
     if (!ret.isOk()) {
         LOG(ERROR) << "getVersion failure: " << ret.description();
@@ -485,8 +489,8 @@
                                                                       0, 0};
     std::tuple<ErrorStatus, uint32_t, uint32_t> result = kFailure;
     const Return<void> ret = device->getNumberOfCacheFilesNeeded(
-            [&result](ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
-                result = {error, numModelCache, numDataCache};
+            [&result](V1_0::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
+                result = {convertToV1_3(error), numModelCache, numDataCache};
             });
     if (!ret.isOk()) {
         LOG(ERROR) << "getNumberOfCacheFilesNeeded failure: " << ret.description();
@@ -699,13 +703,13 @@
     return {getDevice<T_IDevice>(), mDeathHandler};
 }
 
-template <typename T_IDevice, typename T_Callback>
-Return<ErrorStatus> callProtected(
-        const char* context, const std::function<Return<ErrorStatus>(const sp<T_IDevice>&)>& fn,
-        const sp<T_IDevice>& device, const sp<T_Callback>& callback,
-        const sp<IDeviceDeathHandler>& deathHandler) {
+template <typename T_Return, typename T_IDevice, typename T_Callback>
+Return<T_Return> callProtected(const char* context,
+                               const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn,
+                               const sp<T_IDevice>& device, const sp<T_Callback>& callback,
+                               const sp<IDeviceDeathHandler>& deathHandler) {
     const auto scoped = deathHandler->protectCallback(callback);
-    Return<ErrorStatus> ret = fn(device);
+    Return<T_Return> ret = fn(device);
     // Suppose there was a transport error.  We have the following cases:
     // 1. Either not due to a dead device, or due to a device that was
     //    already dead at the time of the call to protectCallback().  In
@@ -716,7 +720,7 @@
     // Furthermore, what if there was no transport error, but the ErrorStatus is
     // other than NONE?  We'll conservatively signal the callback anyway, just in
     // case the driver was sloppy and failed to do so.
-    if (!ret.isOk() || ret != ErrorStatus::NONE) {
+    if (!ret.isOk() || ret != T_Return::NONE) {
         // What if the deathHandler has signalled or will signal the callback?
         // This is fine -- we're permitted to signal multiple times; and we're
         // sending the same signal that the deathHandler does.
@@ -725,7 +729,7 @@
         // ignored.
 
         if (ret.isOk()) {
-            LOG(ERROR) << context << " returned " << toString(static_cast<ErrorStatus>(ret));
+            LOG(ERROR) << context << " returned " << toString(static_cast<T_Return>(ret));
         } else {
             LOG(ERROR) << context << " failure: " << ret.description();
         }
@@ -864,8 +868,9 @@
         Return<void> ret = recoverable<void, V1_2::IDevice>(
                 __FUNCTION__, [&model12, &result](const sp<V1_2::IDevice>& device) {
                     return device->getSupportedOperations_1_2(
-                            model12, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
-                                result = std::make_pair(error, supported);
+                            model12,
+                            [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
+                                result = std::make_pair(convertToV1_3(error), supported);
                             });
                 });
         if (!ret.isOk()) {
@@ -896,8 +901,9 @@
         Return<void> ret = recoverable<void, V1_1::IDevice>(
                 __FUNCTION__, [&model11, &result](const sp<V1_1::IDevice>& device) {
                     return device->getSupportedOperations_1_1(
-                            model11, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
-                                result = std::make_pair(error, supported);
+                            model11,
+                            [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
+                                result = std::make_pair(convertToV1_3(error), supported);
                             });
                 });
         if (!ret.isOk()) {
@@ -928,8 +934,9 @@
         Return<void> ret = recoverable<void, V1_0::IDevice>(
                 __FUNCTION__, [&model10, &result](const sp<V1_0::IDevice>& device) {
                     return device->getSupportedOperations(
-                            model10, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
-                                result = std::make_pair(error, supported);
+                            model10,
+                            [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
+                                result = std::make_pair(convertToV1_3(error), supported);
                             });
                 });
         if (!ret.isOk()) {
@@ -1064,8 +1071,8 @@
                 __FUNCTION__,
                 [&model, &preference, &modelCache, &dataCache, &token,
                  &callback](const sp<V1_3::IDevice>& device) {
-                    return device->prepareModel_1_3(model, preference, modelCache, dataCache, token,
-                                                    callback);
+                    return device->prepareModel_1_3(model, preference, kDefaultPriority, {},
+                                                    modelCache, dataCache, token, callback);
                 },
                 callback);
         if (!ret.isOk()) {
@@ -1095,7 +1102,7 @@
             }
         }
         if (compliant) {
-            const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_2::IDevice>(
+            const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
                     __FUNCTION__,
                     [&model12, &preference, &modelCache, &dataCache, &token,
                      &callback](const sp<V1_2::IDevice>& device) {
@@ -1107,10 +1114,10 @@
                 LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description();
                 return prepareModelFailure();
             }
-            if (ret != ErrorStatus::NONE) {
-                LOG(ERROR) << "prepareModel_1_2 returned "
-                           << toString(static_cast<ErrorStatus>(ret));
-                return prepareModelFailure(ret);
+            const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
+            if (status != V1_0::ErrorStatus::NONE) {
+                LOG(ERROR) << "prepareModel_1_2 returned " << toString(status);
+                return prepareModelFailure(convertToV1_3(status));
             }
             return prepareModelResult(*callback, "prepareModel_1_2", kServiceName);
         }
@@ -1135,7 +1142,7 @@
             }
         }
         if (compliant) {
-            const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_1::IDevice>(
+            const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_1::IDevice>(
                     __FUNCTION__,
                     [&model11, &preference, &callback](const sp<V1_1::IDevice>& device) {
                         return device->prepareModel_1_1(model11, preference, callback);
@@ -1145,10 +1152,10 @@
                 LOG(ERROR) << "prepareModel_1_1 failure: " << ret.description();
                 return prepareModelFailure();
             }
-            if (ret != ErrorStatus::NONE) {
-                LOG(ERROR) << "prepareModel_1_1 returned "
-                           << toString(static_cast<ErrorStatus>(ret));
-                return prepareModelFailure(ret);
+            const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
+            if (status != V1_0::ErrorStatus::NONE) {
+                LOG(ERROR) << "prepareModel_1_1 returned " << toString(status);
+                return prepareModelFailure(convertToV1_3(status));
             }
             return prepareModelResult(*callback, "prepareModel_1_1", kServiceName);
         }
@@ -1173,7 +1180,7 @@
             }
         }
         if (compliant) {
-            const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_0::IDevice>(
+            const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_0::IDevice>(
                     __FUNCTION__,
                     [&model10, &callback](const sp<V1_0::IDevice>& device) {
                         return device->prepareModel(model10, callback);
@@ -1183,9 +1190,10 @@
                 LOG(ERROR) << "prepareModel failure: " << ret.description();
                 return prepareModelFailure();
             }
-            if (ret != ErrorStatus::NONE) {
-                LOG(ERROR) << "prepareModel returned " << toString(static_cast<ErrorStatus>(ret));
-                return prepareModelFailure(ret);
+            const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
+            if (status != V1_0::ErrorStatus::NONE) {
+                LOG(ERROR) << "prepareModel returned " << toString(status);
+                return prepareModelFailure(convertToV1_3(status));
             }
             return prepareModelResult(*callback, "prepareModel", kServiceName);
         }
@@ -1219,8 +1227,8 @@
         const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>(
                 __FUNCTION__,
                 [&modelCache, &dataCache, &token, &callback](const sp<V1_3::IDevice>& device) {
-                    return device->prepareModelFromCache_1_3(modelCache, dataCache, token,
-                                                             callback);
+                    return device->prepareModelFromCache_1_3(kDefaultPriority, {}, modelCache,
+                                                             dataCache, token, callback);
                 },
                 callback);
         if (!ret.isOk()) {
@@ -1238,7 +1246,7 @@
     // version 1.2 HAL
     if (getDevice<V1_2::IDevice>() != nullptr) {
         const sp<PreparedModelCallback> callback = new PreparedModelCallback();
-        const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_2::IDevice>(
+        const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
                 __FUNCTION__,
                 [&modelCache, &dataCache, &token, &callback](const sp<V1_2::IDevice>& device) {
                     return device->prepareModelFromCache(modelCache, dataCache, token, callback);
@@ -1248,10 +1256,10 @@
             LOG(ERROR) << "prepareModelFromCache failure: " << ret.description();
             return prepareModelFailure();
         }
-        if (ret != ErrorStatus::NONE) {
-            LOG(ERROR) << "prepareModelFromCache returned "
-                       << toString(static_cast<ErrorStatus>(ret));
-            return prepareModelFailure(ret);
+        const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
+        if (status != V1_0::ErrorStatus::NONE) {
+            LOG(ERROR) << "prepareModelFromCache returned " << toString(status);
+            return prepareModelFailure(convertToV1_3(status));
         }
         return prepareModelResult(*callback, "prepareModelFromCache", kServiceName);
     }
diff --git a/nn/runtime/test/TestCompilationCaching.cpp b/nn/runtime/test/TestCompilationCaching.cpp
index 7ec9182..f743298 100644
--- a/nn/runtime/test/TestCompilationCaching.cpp
+++ b/nn/runtime/test/TestCompilationCaching.cpp
@@ -98,33 +98,35 @@
        public:
         CachingPreparedModel() = default;
 
-        Return<ErrorStatus> execute(const V1_0::Request&,
-                                    const sp<V1_0::IExecutionCallback>&) override {
-            return ErrorStatus::DEVICE_UNAVAILABLE;
+        Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
+                                          const sp<V1_0::IExecutionCallback>&) override {
+            return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
         }
-        Return<ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming,
-                                        const sp<V1_2::IExecutionCallback>&) override {
-            return ErrorStatus::DEVICE_UNAVAILABLE;
+        Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming,
+                                              const sp<V1_2::IExecutionCallback>&) override {
+            return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
         }
-        Return<ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming,
-                                        const sp<V1_2::IExecutionCallback>&) override {
-            return ErrorStatus::DEVICE_UNAVAILABLE;
+        Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming,
+                                              const OptionalTimePoint&,
+                                              const sp<V1_3::IExecutionCallback>&) override {
+            return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
         }
         Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming,
                                           executeSynchronously_cb cb) override {
-            cb(ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
+            cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
             return Void();
         }
         Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming,
+                                              const OptionalTimePoint&,
                                               executeSynchronously_1_3_cb cb) override {
-            cb(ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
+            cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
             return Void();
         }
         Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&,
                                              const MQDescriptorSync<V1_2::FmqRequestDatum>&,
                                              const MQDescriptorSync<V1_2::FmqResultDatum>&,
                                              configureExecutionBurst_cb cb) override {
-            cb(ErrorStatus::DEVICE_UNAVAILABLE, nullptr);
+            cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, nullptr);
             return Void();
         }
     };
@@ -153,31 +155,31 @@
                 .relaxedFloat32toFloat16PerformanceScalar = kPerf,
                 .relaxedFloat32toFloat16PerformanceTensor = kPerf,
                 .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf)};
-        cb(ErrorStatus::NONE, capabilities);
+        cb(V1_3::ErrorStatus::NONE, capabilities);
         return Void();
     }
 
     // Reports supporting all operations.
     Return<void> getSupportedOperations_1_3(const Model& model,
-                                            getSupportedOperations_cb cb) override {
+                                            getSupportedOperations_1_3_cb cb) override {
         std::vector<bool> supported(model.main.operations.size(), true);
-        cb(ErrorStatus::NONE, supported);
+        cb(V1_3::ErrorStatus::NONE, supported);
         return Void();
     }
 
     // Reports according to mGetNumCacheFiles.
     Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
-        cb(mErrorStatusGetNumCacheFiles, mNumModelCache, mNumDataCache);
+        cb(convertToV1_0(mErrorStatusGetNumCacheFiles), mNumModelCache, mNumDataCache);
         return Void();
     }
 
     // Generates CachingPreparedModel.
     // Writes the cache entry per mCacheXData and sets mHasCalledPrepareModel.
-    Return<ErrorStatus> prepareModel_1_3(const Model&, ExecutionPreference,
-                                         const hidl_vec<hidl_handle>& modelCacheHandle,
-                                         const hidl_vec<hidl_handle>& dataCacheHandle,
-                                         const CacheToken&,
-                                         const sp<V1_3::IPreparedModelCallback>& cb) override {
+    Return<V1_3::ErrorStatus> prepareModel_1_3(
+            const Model&, ExecutionPreference, Priority, const OptionalTimePoint&,
+            const hidl_vec<hidl_handle>& modelCacheHandle,
+            const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
+            const sp<V1_3::IPreparedModelCallback>& cb) override {
         checkNumberOfCacheHandles(modelCacheHandle.size(), dataCacheHandle.size());
         if (modelCacheHandle.size() != 0 || dataCacheHandle.size() != 0) {
             writeToCache(modelCacheHandle, mModelCacheData);
@@ -186,25 +188,25 @@
         } else {
             mHasCalledPrepareModel = HasCalledPrepareModel::WITHOUT_CACHING;
         }
-        cb->notify_1_3(ErrorStatus::NONE, new CachingPreparedModel());
-        return ErrorStatus::NONE;
+        cb->notify_1_3(V1_3::ErrorStatus::NONE, new CachingPreparedModel());
+        return V1_3::ErrorStatus::NONE;
     }
 
     // Checks if the cache entry is correct, notifies error status according to
     // mErrorStatusPrepareFromCache, sets mHasCalledPrepareModelFromCache.
-    Return<ErrorStatus> prepareModelFromCache_1_3(
-            const hidl_vec<hidl_handle>& modelCacheHandle,
+    Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
+            Priority, const OptionalTimePoint&, const hidl_vec<hidl_handle>& modelCacheHandle,
             const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
             const sp<V1_3::IPreparedModelCallback>& callback) override {
         readFromCache(modelCacheHandle, mModelCacheData);
         readFromCache(dataCacheHandle, mDataCacheData);
         mHasCalledPrepareModelFromCache = true;
-        if (mErrorStatusPrepareFromCache == ErrorStatus::NONE) {
+        if (mErrorStatusPrepareFromCache == V1_3::ErrorStatus::NONE) {
             callback->notify_1_3(mErrorStatusPrepareFromCache, new CachingPreparedModel());
         } else {
             callback->notify_1_3(mErrorStatusPrepareFromCache, nullptr);
         }
-        return ErrorStatus::NONE;
+        return V1_3::ErrorStatus::NONE;
     };
 
     bool hasCalledPrepareModelFromCache() const { return mHasCalledPrepareModelFromCache; }
diff --git a/nn/runtime/test/TestExecution.cpp b/nn/runtime/test/TestExecution.cpp
index 945500e..fb4e9d9 100644
--- a/nn/runtime/test/TestExecution.cpp
+++ b/nn/runtime/test/TestExecution.cpp
@@ -33,6 +33,7 @@
 #include "NeuralNetworks.h"
 #include "SampleDriver.h"
 #include "TestNeuralNetworksWrapper.h"
+#include "Utils.h"
 #include "ValidateHal.h"
 
 namespace android {
@@ -51,6 +52,7 @@
 using WrapperModel = nn::test_wrapper::Model;
 using WrapperOperandType = nn::test_wrapper::OperandType;
 using WrapperType = nn::test_wrapper::Type;
+using nn::convertToV1_0;
 
 template <typename T>
 using MQDescriptorSync = hardware::MQDescriptorSync<T>;
@@ -72,44 +74,45 @@
           mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
           mErrorStatus(errorStatus) {}
 
-    Return<ErrorStatus> execute(const V1_0::Request& request,
-                                const sp<V1_0::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
+                                      const sp<V1_0::IExecutionCallback>& callback) override {
         CHECK(mPreparedModelV1_0 != nullptr) << "V1_0 prepared model is nullptr.";
         if (mErrorStatus == ErrorStatus::NONE) {
             return mPreparedModelV1_0->execute(request, callback);
         } else {
-            callback->notify(mErrorStatus);
-            return ErrorStatus::NONE;
+            callback->notify(convertToV1_0(mErrorStatus));
+            return V1_0::ErrorStatus::NONE;
         }
     }
 
-    Return<ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
-                                    const sp<V1_2::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
+                                          const sp<V1_2::IExecutionCallback>& callback) override {
         CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
         if (mErrorStatus == ErrorStatus::NONE) {
             return mPreparedModelV1_2->execute_1_2(request, measure, callback);
         } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
             OutputShape shape = {.dimensions = {1}, .isSufficient = false};
-            callback->notify_1_2(mErrorStatus, {shape}, kBadTiming);
-            return ErrorStatus::NONE;
+            callback->notify_1_2(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
+            return V1_0::ErrorStatus::NONE;
         } else {
-            callback->notify_1_2(mErrorStatus, {}, kBadTiming);
-            return ErrorStatus::NONE;
+            callback->notify_1_2(convertToV1_0(mErrorStatus), {}, kBadTiming);
+            return V1_0::ErrorStatus::NONE;
         }
     }
 
-    Return<ErrorStatus> execute_1_3(const V1_3::Request& request, MeasureTiming measure,
-                                    const sp<V1_2::IExecutionCallback>& callback) override {
+    Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request, MeasureTiming measure,
+                                          const OptionalTimePoint& deadline,
+                                          const sp<V1_3::IExecutionCallback>& callback) override {
         CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
         if (mErrorStatus == ErrorStatus::NONE) {
-            return mPreparedModelV1_3->execute_1_3(request, measure, callback);
+            return mPreparedModelV1_3->execute_1_3(request, measure, deadline, callback);
         } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
             OutputShape shape = {.dimensions = {1}, .isSufficient = false};
-            callback->notify_1_2(mErrorStatus, {shape}, kBadTiming);
-            return ErrorStatus::NONE;
+            callback->notify_1_3(mErrorStatus, {shape}, kBadTiming);
+            return V1_3::ErrorStatus::NONE;
         } else {
-            callback->notify_1_2(mErrorStatus, {}, kBadTiming);
-            return ErrorStatus::NONE;
+            callback->notify_1_3(mErrorStatus, {}, kBadTiming);
+            return V1_3::ErrorStatus::NONE;
         }
     }
 
@@ -117,28 +120,23 @@
                                       executeSynchronously_cb cb) override {
         CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
         if (mErrorStatus == ErrorStatus::NONE) {
-            return mPreparedModelV1_2->executeSynchronously(
-                    request, measure,
-                    [&cb](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
-                          const Timing& timing) { cb(error, outputShapes, timing); });
+            return mPreparedModelV1_2->executeSynchronously(request, measure, cb);
         } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
             OutputShape shape = {.dimensions = {1}, .isSufficient = false};
-            cb(mErrorStatus, {shape}, kBadTiming);
+            cb(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
             return Void();
         } else {
-            cb(mErrorStatus, {}, kBadTiming);
+            cb(convertToV1_0(mErrorStatus), {}, kBadTiming);
             return Void();
         }
     }
 
     Return<void> executeSynchronously_1_3(const V1_3::Request& request, MeasureTiming measure,
+                                          const OptionalTimePoint& deadline,
                                           executeSynchronously_1_3_cb cb) override {
         CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
         if (mErrorStatus == ErrorStatus::NONE) {
-            return mPreparedModelV1_3->executeSynchronously_1_3(
-                    request, measure,
-                    [&cb](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
-                          const Timing& timing) { cb(error, outputShapes, timing); });
+            return mPreparedModelV1_3->executeSynchronously_1_3(request, measure, deadline, cb);
         } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
             OutputShape shape = {.dimensions = {1}, .isSufficient = false};
             cb(mErrorStatus, {shape}, kBadTiming);
@@ -159,7 +157,7 @@
             return mPreparedModelV1_2->configureExecutionBurst(callback, requestChannel,
                                                                resultChannel, cb);
         } else {
-            cb(mErrorStatus, nullptr);
+            cb(convertToV1_0(mErrorStatus), nullptr);
             return Void();
         }
     }
@@ -179,13 +177,13 @@
     TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
         : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
 
-    Return<ErrorStatus> execute(const V1_0::Request& request,
-                                const sp<V1_0::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
+                                      const sp<V1_0::IExecutionCallback>& callback) override {
         return mLatestPreparedModel->execute(request, callback);
     }
 
-    Return<ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
-                                    const sp<V1_2::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
+                                          const sp<V1_2::IExecutionCallback>& callback) override {
         return mLatestPreparedModel->execute_1_2(request, measure, callback);
     }
 
@@ -213,8 +211,8 @@
     TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
         : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
 
-    Return<ErrorStatus> execute(const V1_0::Request& request,
-                                const sp<V1_0::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
+                                      const sp<V1_0::IExecutionCallback>& callback) override {
         return mLatestPreparedModel->execute(request, callback);
     }
 
@@ -242,7 +240,7 @@
                 .relaxedFloat32toFloat16PerformanceTensor = kPerf,
                 .operandPerformance =
                         nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)};
-        _hidl_cb(ErrorStatus::NONE, capabilities);
+        _hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
         return Void();
     }
 
@@ -250,21 +248,21 @@
                                             getSupportedOperations_1_3_cb cb) override {
         if (nn::validateModel(model)) {
             std::vector<bool> supported(model.main.operations.size(), true);
-            cb(ErrorStatus::NONE, supported);
+            cb(V1_3::ErrorStatus::NONE, supported);
         } else {
-            cb(ErrorStatus::INVALID_ARGUMENT, {});
+            cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
         }
         return Void();
     }
 
-    Return<ErrorStatus> prepareModel_1_3(
-            const HidlModel& model, ExecutionPreference preference,
-            const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
-            const CacheToken& token,
+    Return<V1_3::ErrorStatus> prepareModel_1_3(
+            const HidlModel& model, ExecutionPreference preference, Priority priority,
+            const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
+            const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
             const sp<V1_3::IPreparedModelCallback>& actualCallback) override {
         sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
-        Return<ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
-                model, preference, modelCache, dataCache, token, localCallback);
+        Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
+                model, preference, priority, deadline, modelCache, dataCache, token, localCallback);
         if (!prepareModelReturn.isOkUnchecked()) {
             return prepareModelReturn;
         }
@@ -281,67 +279,69 @@
                     V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel()));
         } else {
             actualCallback->notify_1_3(
-                    ErrorStatus::NONE,
+                    V1_3::ErrorStatus::NONE,
                     new TestPreparedModel13(localCallback->getPreparedModel(), mErrorStatus));
         }
         return prepareModelReturn;
     }
 
-    Return<ErrorStatus> prepareModel_1_2(
+    Return<V1_0::ErrorStatus> prepareModel_1_2(
             const V1_2::Model& model, ExecutionPreference preference,
             const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
             const CacheToken& token,
             const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
         sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
-        Return<ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
+        Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
                 model, preference, modelCache, dataCache, token, localCallback);
         if (!prepareModelReturn.isOkUnchecked()) {
             return prepareModelReturn;
         }
-        if (prepareModelReturn != ErrorStatus::NONE) {
+        if (prepareModelReturn != V1_0::ErrorStatus::NONE) {
             actualCallback->notify_1_2(
-                    localCallback->getStatus(),
+                    convertToV1_0(localCallback->getStatus()),
                     V1_2::IPreparedModel::castFrom(localCallback->getPreparedModel()));
             return prepareModelReturn;
         }
         localCallback->wait();
         if (localCallback->getStatus() != ErrorStatus::NONE) {
             actualCallback->notify_1_2(
-                    localCallback->getStatus(),
+                    convertToV1_0(localCallback->getStatus()),
                     V1_2::IPreparedModel::castFrom(localCallback->getPreparedModel()));
         } else {
             actualCallback->notify_1_2(
-                    ErrorStatus::NONE,
+                    V1_0::ErrorStatus::NONE,
                     new TestPreparedModel12(localCallback->getPreparedModel(), mErrorStatus));
         }
         return prepareModelReturn;
     }
 
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
-        Return<ErrorStatus> prepareModelReturn =
+        Return<V1_0::ErrorStatus> prepareModelReturn =
                 SampleDriver::prepareModel_1_1(model, preference, localCallback);
         if (!prepareModelReturn.isOkUnchecked()) {
             return prepareModelReturn;
         }
-        if (prepareModelReturn != ErrorStatus::NONE) {
-            actualCallback->notify(localCallback->getStatus(), localCallback->getPreparedModel());
+        if (prepareModelReturn != V1_0::ErrorStatus::NONE) {
+            actualCallback->notify(convertToV1_0(localCallback->getStatus()),
+                                   localCallback->getPreparedModel());
             return prepareModelReturn;
         }
         localCallback->wait();
         if (localCallback->getStatus() != ErrorStatus::NONE) {
-            actualCallback->notify(localCallback->getStatus(), localCallback->getPreparedModel());
+            actualCallback->notify(convertToV1_0(localCallback->getStatus()),
+                                   localCallback->getPreparedModel());
         } else {
             actualCallback->notify(
-                    ErrorStatus::NONE,
+                    V1_0::ErrorStatus::NONE,
                     new TestPreparedModel10(localCallback->getPreparedModel(), mErrorStatus));
         }
         return prepareModelReturn;
     }
 
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
@@ -378,7 +378,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_2(
+    Return<V1_0::ErrorStatus> prepareModel_1_2(
             const V1_2::Model& model, ExecutionPreference preference,
             const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
             const CacheToken& token,
@@ -386,12 +386,12 @@
         return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
                                                actualCallback);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
@@ -407,10 +407,9 @@
     Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
         return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
     }
-    Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
-                                              const hidl_vec<hidl_handle>& dataCache,
-                                              const CacheToken& token,
-                                              const sp<V1_2::IPreparedModelCallback>& callback) {
+    Return<V1_0::ErrorStatus> prepareModelFromCache(
+            const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+            const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
         return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
     }
 
@@ -430,7 +429,7 @@
                                             getSupportedOperations_1_1_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
@@ -443,7 +442,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
@@ -465,7 +464,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
diff --git a/nn/runtime/test/TestExtensions.cpp b/nn/runtime/test/TestExtensions.cpp
index fd9f17d..7931a52 100644
--- a/nn/runtime/test/TestExtensions.cpp
+++ b/nn/runtime/test/TestExtensions.cpp
@@ -45,20 +45,20 @@
     ~TestDriver() override {}
 
     Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override {
-        cb(ErrorStatus::NONE, {
-                                      {.name = kTestExtension1},
-                                      {.name = kTestExtension2},
-                                      {.name = kTestExtension3},
-                              });
+        cb(V1_0::ErrorStatus::NONE, {
+                                            {.name = kTestExtension1},
+                                            {.name = kTestExtension2},
+                                            {.name = kTestExtension3},
+                                    });
         return Void();
     }
 
     Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
-        cb(ErrorStatus::NONE, {/* Dummy zero-filled capabilities. */});
+        cb(V1_3::ErrorStatus::NONE, {/* Dummy zero-filled capabilities. */});
         return Void();
     }
 
-    Return<void> getSupportedOperations_1_3(const Model&, getSupportedOperations_cb) override {
+    Return<void> getSupportedOperations_1_3(const Model&, getSupportedOperations_1_3_cb) override {
         CHECK(false) << "not implemented";
         return Void();
     }
diff --git a/nn/runtime/test/TestIntrospectionControl.cpp b/nn/runtime/test/TestIntrospectionControl.cpp
index d2ea4c0..b567f02 100644
--- a/nn/runtime/test/TestIntrospectionControl.cpp
+++ b/nn/runtime/test/TestIntrospectionControl.cpp
@@ -54,6 +54,8 @@
 using WrapperModel = nn::test_wrapper::Model;
 using WrapperOperandType = nn::test_wrapper::OperandType;
 using WrapperType = nn::test_wrapper::Type;
+using nn::convertToV1_0;
+using nn::convertToV1_3;
 
 template <typename T>
 using MQDescriptorSync = hardware::MQDescriptorSync<T>;
@@ -70,14 +72,14 @@
     ~TestDriver() override {}
 
     Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
-        cb(ErrorStatus::NONE, mCapabilities);
+        cb(V1_3::ErrorStatus::NONE, mCapabilities);
         return Void();
     }
 
     Return<void> getSupportedOperations_1_3(const Model& model,
-                                            getSupportedOperations_cb cb) override {
+                                            getSupportedOperations_1_3_cb cb) override {
         if (!android::nn::validateModel(model)) {
-            cb(ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
+            cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
             return Void();
         }
         const size_t count = model.main.operations.size();
@@ -85,7 +87,7 @@
         std::transform(
                 model.main.operations.begin(), model.main.operations.end(), supported.begin(),
                 [this](Operation op) { return mSupportedOps[static_cast<int32_t>(op.type)]; });
-        cb(ErrorStatus::NONE, supported);
+        cb(V1_3::ErrorStatus::NONE, supported);
         return Void();
     }
 
@@ -313,50 +315,52 @@
         : SamplePreparedModel(model, driver, ExecutionPreference::FAST_SINGLE_ANSWER),
           mSuccess(success) {}
 
-    Return<ErrorStatus> execute(const V1_0::Request&,
-                                const sp<V1_0::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
+                                      const sp<V1_0::IExecutionCallback>& callback) override {
         switch (mSuccess) {
             case Success::PASS_NEITHER:
-                callback->notify(ErrorStatus::NONE);
-                return ErrorStatus::NONE;
+                callback->notify(V1_0::ErrorStatus::NONE);
+                return V1_0::ErrorStatus::NONE;
             case Success::FAIL_LAUNCH:
-                callback->notify(ErrorStatus::GENERAL_FAILURE);
-                return ErrorStatus::GENERAL_FAILURE;
+                callback->notify(V1_0::ErrorStatus::GENERAL_FAILURE);
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
             case Success::FAIL_WAIT:
-                callback->notify(ErrorStatus::GENERAL_FAILURE);
-                return ErrorStatus::NONE;
+                callback->notify(V1_0::ErrorStatus::GENERAL_FAILURE);
+                return V1_0::ErrorStatus::NONE;
             default:
                 ADD_FAILURE() << "Unexpected Success kind";
-                return ErrorStatus::GENERAL_FAILURE;
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
         }
     }
 
-    Return<ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming measure,
-                                    const sp<V1_2::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming measure,
+                                          const sp<V1_2::IExecutionCallback>& callback) override {
         EXPECT_EQ(measure, MeasureTiming::YES);
         switch (mSuccess) {
             case Success::PASS_NEITHER:
             case Success::PASS_DEVICE:
             case Success::PASS_DRIVER:
             case Success::PASS_BOTH:
-                callback->notify_1_2(ErrorStatus::NONE, {}, expectedTimingMap.at(mSuccess));
-                return ErrorStatus::NONE;
+                callback->notify_1_2(V1_0::ErrorStatus::NONE, {}, expectedTimingMap.at(mSuccess));
+                return V1_0::ErrorStatus::NONE;
             case Success::FAIL_LAUNCH:
-                callback->notify(ErrorStatus::GENERAL_FAILURE);
-                return ErrorStatus::GENERAL_FAILURE;
+                callback->notify(V1_0::ErrorStatus::GENERAL_FAILURE);
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
             case Success::FAIL_WAIT:
-                callback->notify(ErrorStatus::GENERAL_FAILURE);
-                return ErrorStatus::NONE;
+                callback->notify(V1_0::ErrorStatus::GENERAL_FAILURE);
+                return V1_0::ErrorStatus::NONE;
             default:
                 ADD_FAILURE() << "Unexpected Success kind";
-                return ErrorStatus::GENERAL_FAILURE;
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
         }
     }
 
-    Return<ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming measure,
-                                    const sp<V1_2::IExecutionCallback>& callback) override {
+    Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming measure,
+                                          const OptionalTimePoint&,
+                                          const sp<V1_3::IExecutionCallback>& callback) override {
         // Use a dummy V1_0::Request because execute_1_2 ignores request entirely.
-        return execute_1_2(V1_0::Request{}, measure, callback);
+        const V1_0::ErrorStatus status = execute_1_2(V1_0::Request{}, measure, callback);
+        return convertToV1_3(status);
     }
 
     Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming measure,
@@ -367,7 +371,7 @@
             case Success::PASS_DEVICE:
             case Success::PASS_DRIVER:
             case Success::PASS_BOTH:
-                cb(ErrorStatus::NONE, {}, expectedTimingMap.at(mSuccess));
+                cb(V1_0::ErrorStatus::NONE, {}, expectedTimingMap.at(mSuccess));
                 return Void();
             case Success::FAIL_LAUNCH:
             case Success::FAIL_WAIT:
@@ -375,19 +379,24 @@
                 // runtime may call it even for asynchronous execution, so we
                 // need to tolerate Success::FAIL_WAIT here, not just
                 // Success::FAIL_LAUNCH.
-                cb(ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
+                cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
                 return Void();
             default:
                 ADD_FAILURE() << "Unexpected Success kind";
-                cb(ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
+                cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
                 return Void();
         }
     }
 
     Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming measure,
+                                          const OptionalTimePoint&,
                                           executeSynchronously_1_3_cb cb) override {
+        const auto wrappedCb = [&cb](V1_0::ErrorStatus status,
+                                     const hidl_vec<OutputShape>& outputShapes, Timing timing) {
+            cb(convertToV1_3(status), outputShapes, timing);
+        };
         // Use a dummy V1_0::Request because executeSynchronously ignores request entirely.
-        return executeSynchronously(V1_0::Request{}, measure, cb);
+        return executeSynchronously(V1_0::Request{}, measure, wrappedCb);
     }
 
     // ExecutionBurstServer::create has an overload that will use
@@ -401,7 +410,7 @@
         const sp<V1_2::IBurstContext> burst = ExecutionBurstServer::create(
                 callback, requestChannel, resultChannel, this, std::chrono::microseconds{0});
 
-        cb(burst == nullptr ? ErrorStatus::GENERAL_FAILURE : ErrorStatus::NONE, burst);
+        cb(burst == nullptr ? V1_0::ErrorStatus::GENERAL_FAILURE : V1_0::ErrorStatus::NONE, burst);
         return Void();
     }
 
@@ -417,13 +426,13 @@
     TestPreparedModel12(const HidlModel& model, const SampleDriver* driver, Success success)
         : mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
 
-    Return<ErrorStatus> execute(const V1_0::Request& request,
-                                const sp<V1_0::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
+                                      const sp<V1_0::IExecutionCallback>& callback) override {
         return mLatestPreparedModel->execute(request, callback);
     }
 
-    Return<ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
-                                    const sp<V1_2::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
+                                          const sp<V1_2::IExecutionCallback>& callback) override {
         return mLatestPreparedModel->execute_1_2(request, measure, callback);
     }
 
@@ -451,8 +460,8 @@
     TestPreparedModel10(const HidlModel& model, const SampleDriver* driver, Success success)
         : mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
 
-    Return<ErrorStatus> execute(const V1_0::Request& request,
-                                const sp<V1_0::IExecutionCallback>& callback) override {
+    Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
+                                      const sp<V1_0::IExecutionCallback>& callback) override {
         return mLatestPreparedModel->execute(request, callback);
     }
 
@@ -474,7 +483,7 @@
                 .relaxedFloat32toFloat16PerformanceTensor = kPerf,
                 .operandPerformance =
                         nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)};
-        _hidl_cb(ErrorStatus::NONE, capabilities);
+        _hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
         return Void();
     }
 
@@ -482,9 +491,9 @@
                                             getSupportedOperations_1_3_cb cb) override {
         if (nn::validateModel(model)) {
             std::vector<bool> supported(model.main.operations.size(), true);
-            cb(ErrorStatus::NONE, supported);
+            cb(V1_3::ErrorStatus::NONE, supported);
         } else {
-            cb(ErrorStatus::INVALID_ARGUMENT, {});
+            cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
         }
         return Void();
     }
@@ -493,41 +502,42 @@
                                             getSupportedOperations_1_2_cb cb) override {
         if (nn::validateModel(model)) {
             std::vector<bool> supported(model.operations.size(), true);
-            cb(ErrorStatus::NONE, supported);
+            cb(V1_0::ErrorStatus::NONE, supported);
         } else {
             std::vector<bool> supported;
-            cb(ErrorStatus::INVALID_ARGUMENT, supported);
+            cb(V1_0::ErrorStatus::INVALID_ARGUMENT, supported);
         }
         return Void();
     }
 
-    Return<ErrorStatus> prepareModel_1_3(const HidlModel& model, ExecutionPreference,
-                                         const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
-                                         const CacheToken&,
-                                         const sp<IPreparedModelCallback>& callback) override {
-        callback->notify_1_3(ErrorStatus::NONE, new TestPreparedModel13(model, this, mSuccess));
-        return ErrorStatus::NONE;
+    Return<V1_3::ErrorStatus> prepareModel_1_3(
+            const HidlModel& model, ExecutionPreference, Priority, const OptionalTimePoint&,
+            const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
+            const sp<V1_3::IPreparedModelCallback>& callback) override {
+        callback->notify_1_3(V1_3::ErrorStatus::NONE,
+                             new TestPreparedModel13(model, this, mSuccess));
+        return V1_3::ErrorStatus::NONE;
     }
 
-    Return<ErrorStatus> prepareModel_1_2(
+    Return<V1_0::ErrorStatus> prepareModel_1_2(
             const V1_2::Model& model, ExecutionPreference, const hidl_vec<hidl_handle>&,
             const hidl_vec<hidl_handle>&, const CacheToken&,
             const sp<V1_2::IPreparedModelCallback>& callback) override {
-        callback->notify_1_2(ErrorStatus::NONE,
+        callback->notify_1_2(V1_0::ErrorStatus::NONE,
                              new TestPreparedModel12(nn::convertToV1_3(model), this, mSuccess));
-        return ErrorStatus::NONE;
+        return V1_0::ErrorStatus::NONE;
     }
 
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference,
             const sp<V1_0::IPreparedModelCallback>& callback) override {
-        callback->notify(ErrorStatus::NONE,
+        callback->notify(V1_0::ErrorStatus::NONE,
                          new TestPreparedModel10(nn::convertToV1_3(model), this, mSuccess));
-        return ErrorStatus::NONE;
+        return V1_0::ErrorStatus::NONE;
     }
 
-    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
-                                     const sp<V1_0::IPreparedModelCallback>& callback) override {
+    Return<V1_0::ErrorStatus> prepareModel(
+            const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override {
         return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
                                 callback);
     }
@@ -562,19 +572,19 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_2(
+    Return<V1_0::ErrorStatus> prepareModel_1_2(
             const V1_2::Model& model, ExecutionPreference preference,
             const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
             const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) override {
         return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
                                                callback);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
@@ -590,10 +600,9 @@
     Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
         return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
     }
-    Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
-                                              const hidl_vec<hidl_handle>& dataCache,
-                                              const CacheToken& token,
-                                              const sp<V1_2::IPreparedModelCallback>& callback) {
+    Return<V1_0::ErrorStatus> prepareModelFromCache(
+            const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+            const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
         return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
     }
 
@@ -613,7 +622,7 @@
                                             getSupportedOperations_1_1_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
@@ -626,7 +635,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp
index dbc800a..92ebb55 100644
--- a/nn/runtime/test/TestPartitioning.cpp
+++ b/nn/runtime/test/TestPartitioning.cpp
@@ -290,26 +290,28 @@
     // Dummy class -- a prepared model must not be nullptr.
     class PartitioningPreparedModel : public IPreparedModel {
        public:
-        Return<ErrorStatus> execute(const V1_0::Request&,
-                                    const sp<V1_0::IExecutionCallback>&) override {
-            return ErrorStatus::DEVICE_UNAVAILABLE;
+        Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
+                                          const sp<V1_0::IExecutionCallback>&) override {
+            return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
         }
-        Return<ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming,
-                                        const sp<V1_2::IExecutionCallback>&) override {
-            return ErrorStatus::DEVICE_UNAVAILABLE;
+        Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming,
+                                              const sp<V1_2::IExecutionCallback>&) override {
+            return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
         }
-        Return<ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming,
-                                        const sp<V1_2::IExecutionCallback>&) override {
-            return ErrorStatus::DEVICE_UNAVAILABLE;
+        Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming,
+                                              const OptionalTimePoint&,
+                                              const sp<V1_3::IExecutionCallback>&) override {
+            return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
         }
         Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming,
                                           executeSynchronously_cb cb) override {
-            cb(ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
+            cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
             return Void();
         }
         Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming,
+                                              const OptionalTimePoint&,
                                               executeSynchronously_1_3_cb cb) override {
-            cb(ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
+            cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
             return Void();
         }
         Return<void> configureExecutionBurst(
@@ -317,7 +319,7 @@
                 const MQDescriptorSync<V1_2::FmqRequestDatum>& /*requestChannel*/,
                 const MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/,
                 configureExecutionBurst_cb cb) override {
-            cb(ErrorStatus::DEVICE_UNAVAILABLE, nullptr);
+            cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, nullptr);
             return Void();
         }
     };
@@ -339,19 +341,19 @@
     ~PartitioningDriver() override {}
 
     Return<void> getVersionString(getVersionString_cb cb) override {
-        cb(ErrorStatus::NONE, mVersionString);
+        cb(V1_0::ErrorStatus::NONE, mVersionString);
         return Void();
     }
 
-    Return<ErrorStatus> prepareModel_1_3(const Model& model, ExecutionPreference,
-                                         const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
-                                         const CacheToken&,
-                                         const sp<IPreparedModelCallback>& cb) override {
-        ErrorStatus status = ErrorStatus::NONE;
+    Return<V1_3::ErrorStatus> prepareModel_1_3(
+            const Model& model, ExecutionPreference, Priority, const OptionalTimePoint&,
+            const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
+            const sp<V1_3::IPreparedModelCallback>& cb) override {
+        V1_3::ErrorStatus status = V1_3::ErrorStatus::NONE;
         if (mOEM != OEMYes) {
             for (const auto& operation : model.main.operations) {
                 if (operation.type == OperationType::OEM_OPERATION) {
-                    status = ErrorStatus::INVALID_ARGUMENT;
+                    status = V1_3::ErrorStatus::INVALID_ARGUMENT;
                     break;
                 }
             }
@@ -363,14 +365,14 @@
     Return<DeviceStatus> getStatus() override { return DeviceStatus::AVAILABLE; }
 
     Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
-        cb(ErrorStatus::NONE, mCapabilities);
+        cb(V1_3::ErrorStatus::NONE, mCapabilities);
         return Void();
     }
 
     Return<void> getSupportedOperations_1_3(const Model& model,
-                                            getSupportedOperations_cb cb) override {
+                                            getSupportedOperations_1_3_cb cb) override {
         if (!android::nn::validateModel(model)) {
-            cb(ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
+            cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
             return Void();
         }
 
@@ -387,20 +389,20 @@
                 supported[i] = true;
             }
         }
-        cb(ErrorStatus::NONE, supported);
+        cb(V1_3::ErrorStatus::NONE, supported);
         return Void();
     }
 
     Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
-        cb(ErrorStatus::NONE, /*numModelCache=*/1, /*numDataCache=*/1);
+        cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/1, /*numDataCache=*/1);
         return Void();
     }
 
-    Return<ErrorStatus> prepareModelFromCache(
+    Return<V1_0::ErrorStatus> prepareModelFromCache(
             const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
             const sp<V1_2::IPreparedModelCallback>& callback) override {
-        callback->notify_1_2(ErrorStatus::NONE, new PartitioningPreparedModel);
-        return ErrorStatus::NONE;
+        callback->notify_1_2(V1_0::ErrorStatus::NONE, new PartitioningPreparedModel);
+        return V1_0::ErrorStatus::NONE;
     }
 
    private:
@@ -424,7 +426,7 @@
                                             getSupportedOperations_1_2_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_2(
+    Return<V1_0::ErrorStatus> prepareModel_1_2(
             const V1_2::Model& model, ExecutionPreference preference,
             const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
             const CacheToken& token,
@@ -442,10 +444,9 @@
     Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
         return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
     }
-    Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
-                                              const hidl_vec<hidl_handle>& dataCache,
-                                              const CacheToken& token,
-                                              const sp<V1_2::IPreparedModelCallback>& callback) {
+    Return<V1_0::ErrorStatus> prepareModelFromCache(
+            const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+            const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
         return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
     }
     Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
@@ -455,7 +456,7 @@
                                             getSupportedOperations_1_1_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
@@ -468,7 +469,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
@@ -492,7 +493,7 @@
                                             getSupportedOperations_1_1_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
@@ -505,7 +506,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
@@ -529,7 +530,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
diff --git a/nn/runtime/test/TestPartitioningRandom.cpp b/nn/runtime/test/TestPartitioningRandom.cpp
index 094fe01..0d92764 100644
--- a/nn/runtime/test/TestPartitioningRandom.cpp
+++ b/nn/runtime/test/TestPartitioningRandom.cpp
@@ -531,12 +531,12 @@
                 .relaxedFloat32toFloat16PerformanceScalar = kPerf,
                 .relaxedFloat32toFloat16PerformanceTensor = kPerf,
                 .operandPerformance = nn::nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf)};
-        _hidl_cb(ErrorStatus::NONE, capabilities);
+        _hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
         return Void();
     }
 
     Return<void> getSupportedOperations_1_3(const HidlModel& model,
-                                            getSupportedOperations_cb cb) override {
+                                            getSupportedOperations_1_3_cb cb) override {
         if (nn::validateModel(model)) {
             const size_t count = model.main.operations.size();
             std::vector<bool> supported(count);
@@ -544,35 +544,36 @@
                 supported[i] = (mSignatures.count(RandomPartitioningTest::getSignature(
                                         model, model.main.operations[i])) != 0);
             }
-            cb(ErrorStatus::NONE, supported);
+            cb(V1_3::ErrorStatus::NONE, supported);
         } else {
-            cb(ErrorStatus::INVALID_ARGUMENT, {});
+            cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
         }
         return Void();
     }
 
-    Return<ErrorStatus> prepareModel_1_3(
-            const HidlModel& model, ExecutionPreference preference,
-            const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
-            const CacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback) override {
+    Return<V1_3::ErrorStatus> prepareModel_1_3(
+            const HidlModel& model, ExecutionPreference preference, Priority priority,
+            const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
+            const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+            const sp<V1_3::IPreparedModelCallback>& callback) override {
         // NOTE: We verify that all operations in the model are supported.
-        ErrorStatus outStatus = ErrorStatus::INVALID_ARGUMENT;
+        V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT;
         auto ret = getSupportedOperations_1_3(
-                model,
-                [&outStatus](ErrorStatus inStatus, const hidl_vec<bool>& supportedOperations) {
-                    if (inStatus == ErrorStatus::NONE) {
+                model, [&outStatus](V1_3::ErrorStatus inStatus,
+                                    const hidl_vec<bool>& supportedOperations) {
+                    if (inStatus == V1_3::ErrorStatus::NONE) {
                         if (std::all_of(supportedOperations.begin(), supportedOperations.end(),
                                         [](bool v) { return v; })) {
-                            outStatus = ErrorStatus::NONE;
+                            outStatus = V1_3::ErrorStatus::NONE;
                         }
                     }
                 });
-        if (ret.isOk() && (outStatus == ErrorStatus::NONE)) {
-            return SampleDriver::prepareModel_1_3(model, preference, modelCache, dataCache, token,
-                                                  callback);
+        if (ret.isOk() && (outStatus == V1_3::ErrorStatus::NONE)) {
+            return SampleDriver::prepareModel_1_3(model, preference, priority, deadline, modelCache,
+                                                  dataCache, token, callback);
         } else {
-            callback->notify_1_3(ErrorStatus::INVALID_ARGUMENT, nullptr);
-            return ErrorStatus::INVALID_ARGUMENT;
+            callback->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
+            return V1_3::ErrorStatus::INVALID_ARGUMENT;
         }
     }
 
@@ -592,7 +593,7 @@
                                             getSupportedOperations_1_2_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_2(
+    Return<V1_0::ErrorStatus> prepareModel_1_2(
             const V1_2::Model& model, ExecutionPreference preference,
             const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
             const CacheToken& token,
@@ -610,10 +611,9 @@
     Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
         return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
     }
-    Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
-                                              const hidl_vec<hidl_handle>& dataCache,
-                                              const CacheToken& token,
-                                              const sp<V1_2::IPreparedModelCallback>& callback) {
+    Return<V1_0::ErrorStatus> prepareModelFromCache(
+            const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+            const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
         return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
     }
     Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
@@ -623,7 +623,7 @@
                                             getSupportedOperations_1_1_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
@@ -636,7 +636,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
@@ -658,7 +658,7 @@
                                             getSupportedOperations_1_1_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
@@ -671,7 +671,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
@@ -693,7 +693,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mLatestDriver->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mLatestDriver->prepareModel(model, actualCallback);
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
index 82b6150..8b44cc8 100644
--- a/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
+++ b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
@@ -150,7 +150,7 @@
 }
 
 Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
-    cb(ErrorStatus::NONE,
+    cb(V1_0::ErrorStatus::NONE,
        {
                {
                        .name = EXAMPLE_FIBONACCI_EXTENSION_NAME,
@@ -180,7 +180,7 @@
             .relaxedFloat32toFloat16PerformanceScalar = kPerf,
             .relaxedFloat32toFloat16PerformanceTensor = kPerf,
             .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf)};
-    cb(ErrorStatus::NONE, capabilities);
+    cb(V1_3::ErrorStatus::NONE, capabilities);
     return Void();
 }
 
@@ -188,7 +188,7 @@
                                                          getSupportedOperations_1_3_cb cb) {
     VLOG(DRIVER) << "getSupportedOperations()";
     if (!validateModel(model)) {
-        cb(ErrorStatus::INVALID_ARGUMENT, {});
+        cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
         return Void();
     }
     const size_t count = model.main.operations.size();
@@ -197,13 +197,13 @@
         const Operation& operation = model.main.operations[i];
         if (fibonacci_op::isFibonacciOperation(operation, model)) {
             if (!fibonacci_op::validate(operation, model)) {
-                cb(ErrorStatus::INVALID_ARGUMENT, {});
+                cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
                 return Void();
             }
             supported[i] = true;
         }
     }
-    cb(ErrorStatus::NONE, supported);
+    cb(V1_3::ErrorStatus::NONE, supported);
     return Void();
 }
 
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 5d85d11..ee944dd 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -66,7 +66,7 @@
                                             getSupportedOperations_1_1_cb _hidl_cb) override {
         return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel_1_1(
+    Return<V1_0::ErrorStatus> prepareModel_1_1(
             const V1_1::Model& model, ExecutionPreference preference,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
@@ -79,7 +79,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mDriverV1_2->prepareModel(model, actualCallback);
@@ -102,7 +102,7 @@
                                         getSupportedOperations_cb _hidl_cb) override {
         return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
     }
-    Return<ErrorStatus> prepareModel(
+    Return<V1_0::ErrorStatus> prepareModel(
             const V1_0::Model& model,
             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
         return mDriverV1_2->prepareModel(model, actualCallback);