android-nn-driver: specify ns to avoid collisions
Specify namespaces to avoid collisions between frameworks and hidl
types.
Test: build
Change-Id: I26ebdda0cd17dd7ec4ec5ffeb40030fc6e4dd7bd
diff --git a/1.3/ArmnnDriver.hpp b/1.3/ArmnnDriver.hpp
index b6b55fa..8292d69 100644
--- a/1.3/ArmnnDriver.hpp
+++ b/1.3/ArmnnDriver.hpp
@@ -95,9 +95,9 @@
{
ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_1()");
- if (!(preference == ExecutionPreference::LOW_POWER ||
- preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
- preference == ExecutionPreference::SUSTAINED_SPEED))
+ if (!(preference == V1_1::ExecutionPreference::LOW_POWER ||
+ preference == V1_1::ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == V1_1::ExecutionPreference::SUSTAINED_SPEED))
{
ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
@@ -138,9 +138,9 @@
{
ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_2()");
- if (!(preference == ExecutionPreference::LOW_POWER ||
- preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
- preference == ExecutionPreference::SUSTAINED_SPEED))
+ if (!(preference == V1_1::ExecutionPreference::LOW_POWER ||
+ preference == V1_1::ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == V1_1::ExecutionPreference::SUSTAINED_SPEED))
{
ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_2: Invalid execution preference");
cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
@@ -185,9 +185,9 @@
{
ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_3()");
- if (!(preference == ExecutionPreference::LOW_POWER ||
- preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
- preference == ExecutionPreference::SUSTAINED_SPEED))
+ if (!(preference == V1_1::ExecutionPreference::LOW_POWER ||
+ preference == V1_1::ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == V1_1::ExecutionPreference::SUSTAINED_SPEED))
{
ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_3: Invalid execution preference");
cb->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
@@ -226,7 +226,7 @@
return Void();
}
- Return<DeviceStatus> getStatus() override
+ Return<V1_0::DeviceStatus> getStatus() override
{
ALOGV("hal_1_3::ArmnnDriver::getStatus()");
@@ -260,7 +260,7 @@
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
- Return<ErrorStatus> prepareModelFromCache_1_3(
+ Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
const V1_3::OptionalTimePoint&,
const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
@@ -268,17 +268,17 @@
const sp<V1_3::IPreparedModelCallback>& callback)
{
ALOGV("hal_1_3::ArmnnDriver::prepareModelFromCache()");
- callback->notify_1_3(ErrorStatus::GENERAL_FAILURE, nullptr);
- return ErrorStatus::GENERAL_FAILURE;
+ callback->notify_1_3(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr);
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
Return<void> allocate(const V1_3::BufferDesc& /*desc*/,
- const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
- const hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
- const hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ const android::hardware::hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const android::hardware::hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const android::hardware::hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
allocate_cb cb) {
ALOGV("hal_1_3::ArmnnDriver::allocate()");
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
return Void();
}
diff --git a/1.3/ArmnnDriverImpl.hpp b/1.3/ArmnnDriverImpl.hpp
index 2b39d4e..af4e6c4 100644
--- a/1.3/ArmnnDriverImpl.hpp
+++ b/1.3/ArmnnDriverImpl.hpp
@@ -25,7 +25,7 @@
class ArmnnDriverImpl
{
public:
- static Return<V1_3::ErrorStatus> prepareArmnnModel_1_3(const armnn::IRuntimePtr& runtime,
+ static android::hardware::Return<V1_3::ErrorStatus> prepareArmnnModel_1_3(const armnn::IRuntimePtr& runtime,
const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
const DriverOptions& options,
const V1_3::Model& model,
@@ -33,7 +33,7 @@
bool float32ToFloat16 = false,
V1_3::Priority priority = V1_3::Priority::MEDIUM);
- static Return<void> getCapabilities_1_3(const armnn::IRuntimePtr& runtime,
+ static android::hardware::Return<void> getCapabilities_1_3(const armnn::IRuntimePtr& runtime,
V1_3::IDevice::getCapabilities_1_3_cb cb);
};
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index b37ac24..b3ba627 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -38,8 +38,8 @@
void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback,
V1_0::ErrorStatus errorStatus,
- std::vector<OutputShape>,
- const Timing,
+ std::vector<V1_2::OutputShape>,
+ const V1_2::Timing,
std::string callingFunction)
{
Return<void> returned = callback->notify(errorStatus);
@@ -53,8 +53,8 @@
void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback,
V1_0::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing timing,
std::string callingFunction)
{
Return<void> returned = callback->notify_1_2(errorStatus, outputShapes, timing);
@@ -178,8 +178,8 @@
}
auto cb = [callback](V1_0::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string callingFunction)
{
NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
@@ -191,7 +191,7 @@
template<typename HalVersion>
Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2(
const V1_0::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
const sp<V1_2::IExecutionCallback>& callback)
{
if (callback.get() == nullptr)
@@ -201,8 +201,8 @@
}
auto cb = [callback](V1_0::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string callingFunction)
{
NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
@@ -240,7 +240,7 @@
template<typename HalVersion>
Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForOutputs(
armnn::OutputTensors& outputs,
- std::vector<OutputShape> &outputShapes,
+ std::vector<V1_2::OutputShape> &outputShapes,
const V1_0::Request& request,
const std::vector<android::nn::RunTimePoolInfo>& memPools)
{
@@ -302,7 +302,7 @@
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
- std::vector<OutputShape> outputShapes(request.outputs.size());
+ std::vector<V1_2::OutputShape> outputShapes(request.outputs.size());
auto errorStatus = PrepareMemoryForOutputs(outputs, outputShapes, request, memPools);
if (errorStatus != V1_0::ErrorStatus::NONE)
@@ -332,8 +332,8 @@
template<typename HalVersion>
Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const V1_0::Request& request,
- MeasureTiming measureTiming,
- executeSynchronously_cb cb)
+ V1_2::MeasureTiming measureTiming,
+ V1_2::IPreparedModel::executeSynchronously_cb cb)
{
ALOGV("ArmnnPreparedModel_1_2::executeSynchronously(): %s", GetModelSummary(m_Model).c_str());
m_RequestCount++;
@@ -346,7 +346,7 @@
TimePoint driverStart;
- if (measureTiming == MeasureTiming::YES)
+ if (measureTiming == V1_2::MeasureTiming::YES)
{
driverStart = Now();
}
@@ -359,8 +359,8 @@
}
auto cbWrapper = [cb](V1_0::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string)
{
cb(errorStatus, outputShapes, timing);
@@ -405,7 +405,7 @@
DumpTensorsIfRequired("Input", inputTensors);
- std::vector<OutputShape> outputShapes(outputTensors.size());
+ std::vector<V1_2::OutputShape> outputShapes(outputTensors.size());
for (unsigned int i = 0; i < outputTensors.size(); i++)
{
std::pair<int, armnn::Tensor> outputTensorPair = outputTensors[i];
@@ -418,14 +418,14 @@
// run it
try
{
- if (cb.ctx.measureTimings == MeasureTiming::YES)
+ if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
{
deviceStart = Now();
}
armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
- if (cb.ctx.measureTimings == MeasureTiming::YES)
+ if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
{
deviceEnd = Now();
}
@@ -494,10 +494,10 @@
outputTensors.emplace_back(i, outputTensor);
}
- auto nullCallback = [](V1_0::ErrorStatus, std::vector<OutputShape>, const Timing&, std::string) {};
+ auto nullCallback = [](V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, const V1_2::Timing&, std::string) {};
CallbackContext_1_2 callbackContext;
callbackContext.callback = nullCallback;
- callbackContext.ctx.measureTimings = MeasureTiming::NO;
+ callbackContext.ctx.measureTimings = V1_2::MeasureTiming::NO;
auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>();
return ExecuteGraph(memPools,
inputTensors,
@@ -507,11 +507,11 @@
template<typename HalVersion>
Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const V1_0::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
CallbackAsync_1_2 callback)
{
ExecutionContext_1_2 ctx;
- if (measureTiming == MeasureTiming::YES)
+ if (measureTiming == V1_2::MeasureTiming::YES)
{
ctx.measureTimings = measureTiming;
ctx.driverStart = Now();
diff --git a/ArmnnPreparedModel_1_2.hpp b/ArmnnPreparedModel_1_2.hpp
index e68614a..049d347 100644
--- a/ArmnnPreparedModel_1_2.hpp
+++ b/ArmnnPreparedModel_1_2.hpp
@@ -51,11 +51,11 @@
virtual Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
const sp<V1_0::IExecutionCallback>& callback) override;
- virtual Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
+ virtual Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, V1_2::MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback) override;
virtual Return<void> executeSynchronously(const V1_0::Request &request,
- MeasureTiming measure,
+ V1_2::MeasureTiming measure,
V1_2::IPreparedModel::executeSynchronously_cb cb) override;
virtual Return<void> configureExecutionBurst(
@@ -77,7 +77,7 @@
private:
Return<V1_0::ErrorStatus> Execute(const V1_0::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
CallbackAsync_1_2 callback);
Return<V1_0::ErrorStatus> PrepareMemoryForInputs(
@@ -87,7 +87,7 @@
Return<V1_0::ErrorStatus> PrepareMemoryForOutputs(
armnn::OutputTensors& outputs,
- std::vector<OutputShape> &outputShapes,
+ std::vector<V1_2::OutputShape> &outputShapes,
const V1_0::Request& request,
const std::vector<android::nn::RunTimePoolInfo>& memPools);
diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp
index d33873f..48ae87d 100644
--- a/ArmnnPreparedModel_1_3.cpp
+++ b/ArmnnPreparedModel_1_3.cpp
@@ -27,7 +27,7 @@
namespace {
-static const Timing g_NoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+static const V1_2::Timing g_NoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
using namespace armnn_driver;
using TimePoint = std::chrono::steady_clock::time_point;
@@ -44,8 +44,8 @@
void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback,
V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape>,
- const Timing,
+ std::vector<V1_2::OutputShape>,
+ const V1_2::Timing,
std::string callingFunction)
{
Return<void> returned = callback->notify(convertToV1_0(errorStatus));
@@ -59,8 +59,8 @@
void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback,
V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing timing,
std::string callingFunction)
{
Return<void> returned = callback->notify_1_2(convertToV1_0(errorStatus), outputShapes, timing);
@@ -74,8 +74,8 @@
void NotifyCallbackAndCheck(const ::android::sp<V1_3::IExecutionCallback>& callback,
V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing timing,
std::string callingFunction)
{
Return<void> returned = callback->notify_1_3(errorStatus, outputShapes, timing);
@@ -87,7 +87,7 @@
}
}
-bool ValidateRequestArgument(const RequestArgument& requestArg, const armnn::TensorInfo& tensorInfo)
+bool ValidateRequestArgument(const V1_0::RequestArgument& requestArg, const armnn::TensorInfo& tensorInfo)
{
if (requestArg.dimensions.size() != 0)
{
@@ -112,7 +112,7 @@
return true;
}
-armnn::Tensor GetTensorForRequestArgument(const RequestArgument& requestArg,
+armnn::Tensor GetTensorForRequestArgument(const V1_0::RequestArgument& requestArg,
const armnn::TensorInfo& tensorInfo,
const std::vector<::android::nn::RunTimePoolInfo>& requestPools)
{
@@ -201,21 +201,21 @@
}
auto cb = [callback](V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string callingFunction)
{
NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
};
- return convertToV1_0(Execute(convertToV1_3(request), MeasureTiming::NO, cb));
+ return convertToV1_0(Execute(convertToV1_3(request), V1_2::MeasureTiming::NO, cb));
}
template<typename HalVersion>
Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::execute_1_2(
const V1_0::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
const sp<V1_2::IExecutionCallback>& callback)
{
if (callback.get() == nullptr)
@@ -225,8 +225,8 @@
}
auto cb = [callback](V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string callingFunction)
{
NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
@@ -238,7 +238,7 @@
template<typename HalVersion>
Return <V1_3::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::execute_1_3(
const V1_3::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
const V1_3::OptionalTimePoint&,
const V1_3::OptionalTimeoutDuration&,
const sp<V1_3::IExecutionCallback>& callback)
@@ -250,8 +250,8 @@
}
auto cb = [callback](V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string callingFunction)
{
NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
@@ -266,7 +266,7 @@
class ArmnnFencedExecutionCallback : public V1_3::IFencedExecutionCallback
{
public:
- ArmnnFencedExecutionCallback(V1_3::ErrorStatus errorStatus, Timing timing, Timing fenceTiming)
+ ArmnnFencedExecutionCallback(V1_3::ErrorStatus errorStatus, V1_2::Timing timing, V1_2::Timing fenceTiming)
: m_ErrorStatus(errorStatus), m_Timing(timing), m_FenceTiming(fenceTiming) {}
~ArmnnFencedExecutionCallback() {}
@@ -277,33 +277,33 @@
}
private:
V1_3::ErrorStatus m_ErrorStatus;
- Timing m_Timing;
- Timing m_FenceTiming;
+ V1_2::Timing m_Timing;
+ V1_2::Timing m_FenceTiming;
};
template<typename HalVersion>
Return<void> ArmnnPreparedModel_1_3<HalVersion>::executeFenced(const V1_3::Request& request,
const hidl_vec<hidl_handle>& fenceWaitFor,
- MeasureTiming measureTiming,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration&,
+ V1_2::MeasureTiming measureTiming,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration&,
executeFenced_cb cb)
{
ALOGV("ArmnnPreparedModel_1_3::executeFenced(...)");
if (cb == nullptr)
{
ALOGE("ArmnnPreparedModel_1_3::executeFenced invalid callback passed");
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
return Void();
}
- if (deadline.getDiscriminator() != OptionalTimePoint::hidl_discriminator::none)
+ if (deadline.getDiscriminator() != V1_3::OptionalTimePoint::hidl_discriminator::none)
{
ALOGW("ArmnnPreparedModel_1_3::executeFenced parameter deadline is set but not supported.");
}
- if (loopTimeoutDuration.getDiscriminator() != OptionalTimeoutDuration::hidl_discriminator::none)
+ if (loopTimeoutDuration.getDiscriminator() != V1_3::OptionalTimeoutDuration::hidl_discriminator::none)
{
ALOGW("ArmnnPreparedModel_1_3::executeFenced parameter loopTimeoutDuration is set but not supported.");
}
@@ -311,12 +311,12 @@
if (!android::nn::validateRequest(request, m_Model, /*allowUnspecifiedOutput=*/false))
{
ALOGV("ArmnnPreparedModel_1_3::executeFenced outputs must be specified for fenced execution ");
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
return Void();
}
ExecutionContext_1_3 ctx;
- if (measureTiming == MeasureTiming::YES)
+ if (measureTiming == V1_2::MeasureTiming::YES)
{
ctx.measureTimings = measureTiming;
ctx.driverStart = Now();
@@ -339,20 +339,20 @@
auto fenceNativeHandle = fenceWaitFor[index].getNativeHandle();
if (!fenceNativeHandle)
{
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
return Void();
}
if (sync_wait(fenceNativeHandle->data[0], -1) < 0)
{
ALOGE("ArmnnPreparedModel_1_3::executeFenced sync fence failed.");
- cb(ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
return Void();
}
}
TimePoint fenceExecutionStart;
- if (measureTiming == MeasureTiming::YES)
+ if (measureTiming == V1_2::MeasureTiming::YES)
{
fenceExecutionStart = Now();
}
@@ -368,14 +368,14 @@
auto [status, outShapes, timings, message] = PrepareMemoryForIO(*inputs, *outputs, *memPools, request);
if (status != V1_3::ErrorStatus::NONE)
{
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
return Void();
}
ALOGV("ArmnnPreparedModel_1_3::executeFenced(...) before ExecuteGraph");
// call it with nullCallback for now as we will report the error status from here..
- auto nullCallback = [](V1_3::ErrorStatus, std::vector<OutputShape>, const Timing&, std::string) {};
+ auto nullCallback = [](V1_3::ErrorStatus, std::vector<V1_2::OutputShape>, const V1_2::Timing&, std::string) {};
CallbackContext_1_3 cbCtx;
cbCtx.callback = nullCallback;
cbCtx.ctx = ctx;
@@ -388,9 +388,9 @@
}
ALOGV("ArmnnPreparedModel_1_3::executeFenced(...) after ExecuteGraph");
- Timing timing = g_NoTiming;
- Timing fenceTiming = g_NoTiming;
- if (measureTiming == MeasureTiming::YES)
+ V1_2::Timing timing = g_NoTiming;
+ V1_2::Timing fenceTiming = g_NoTiming;
+ if (measureTiming == V1_2::MeasureTiming::YES)
{
fenceTiming.timeOnDevice = MicrosecondsDuration(ctx.deviceEnd, ctx.deviceStart);
fenceTiming.timeInDriver = MicrosecondsDuration(ctx.driverEnd, fenceExecutionStart);
@@ -399,8 +399,8 @@
}
sp<ArmnnFencedExecutionCallback> armnnFencedExecutionCallback =
- new ArmnnFencedExecutionCallback(ErrorStatus::NONE, timing, fenceTiming);
- cb(ErrorStatus::NONE, hidl_handle(nullptr), armnnFencedExecutionCallback);
+ new ArmnnFencedExecutionCallback(V1_3::ErrorStatus::NONE, timing, fenceTiming);
+ cb(V1_3::ErrorStatus::NONE, hidl_handle(nullptr), armnnFencedExecutionCallback);
return Void();
}
@@ -433,7 +433,7 @@
template<typename HalVersion>
Return<V1_3::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::PrepareMemoryForOutputs(
armnn::OutputTensors& outputs,
- std::vector<OutputShape> &outputShapes,
+ std::vector<V1_2::OutputShape> &outputShapes,
const V1_3::Request& request,
const std::vector<android::nn::RunTimePoolInfo>& memPools)
{
@@ -492,7 +492,7 @@
}
template<typename HalVersion>
-std::tuple<V1_3::ErrorStatus, hidl_vec<OutputShape>, Timing, std::string>
+std::tuple<V1_3::ErrorStatus, hidl_vec<V1_2::OutputShape>, V1_2::Timing, std::string>
ArmnnPreparedModel_1_3<HalVersion>::PrepareMemoryForIO(armnn::InputTensors& inputs,
armnn::OutputTensors& outputs,
std::vector<android::nn::RunTimePoolInfo>& memPools,
@@ -500,7 +500,7 @@
{
if (!setRunTimePoolInfosFromMemoryPools(&memPools, request.pools))
{
- return {ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
+ return {V1_3::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
}
// add the inputs and outputs with their data
@@ -508,10 +508,10 @@
{
if (PrepareMemoryForInputs(inputs, request, memPools) != V1_3::ErrorStatus::NONE)
{
- return {ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
+ return {V1_3::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
}
- std::vector<OutputShape> outputShapes(request.outputs.size());
+ std::vector<V1_2::OutputShape> outputShapes(request.outputs.size());
auto errorStatus = PrepareMemoryForOutputs(outputs, outputShapes, request, memPools);
if (errorStatus != V1_3::ErrorStatus::NONE)
@@ -522,12 +522,12 @@
catch (armnn::Exception& e)
{
ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
- return {ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
+ return {V1_3::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
}
catch (std::exception& e)
{
ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what());
- return {ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
+ return {V1_3::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
}
return {V1_3::ErrorStatus::NONE, {}, g_NoTiming, "ArmnnPreparedModel_1_3::execute"};
@@ -538,7 +538,7 @@
Return<void> ArmnnPreparedModel_1_3<HalVersion>::ExecuteSynchronously(const V1_3::Request& request,
CallbackContext cbCtx)
{
- if (cbCtx.ctx.measureTimings == MeasureTiming::YES)
+ if (cbCtx.ctx.measureTimings == V1_2::MeasureTiming::YES)
{
cbCtx.ctx.driverStart = Now();
}
@@ -587,7 +587,7 @@
template<typename HalVersion>
Return<void> ArmnnPreparedModel_1_3<HalVersion>::executeSynchronously(const V1_0::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
executeSynchronously_cb cb)
{
ALOGV("ArmnnPreparedModel_1_3::executeSynchronously(): %s", GetModelSummary(m_Model).c_str());
@@ -600,8 +600,8 @@
}
auto cbWrapper = [cb](V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string)
{
cb(convertToV1_0(errorStatus), outputShapes, timing);
@@ -618,7 +618,7 @@
template<typename HalVersion>
Return<void> ArmnnPreparedModel_1_3<HalVersion>::executeSynchronously_1_3(
const V1_3::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
executeSynchronously_1_3_cb cb)
@@ -632,20 +632,20 @@
return Void();
}
- if (deadline.getDiscriminator() != OptionalTimePoint::hidl_discriminator::none)
+ if (deadline.getDiscriminator() != V1_3::OptionalTimePoint::hidl_discriminator::none)
{
ALOGW("ArmnnPreparedModel_1_3::executeSynchronously_1_3 parameter deadline is set but not supported.");
}
- if (loopTimeoutDuration.getDiscriminator() != OptionalTimeoutDuration::hidl_discriminator::none)
+ if (loopTimeoutDuration.getDiscriminator() != V1_3::OptionalTimeoutDuration::hidl_discriminator::none)
{
ALOGW(
"ArmnnPreparedModel_1_3::executeSynchronously_1_3 parameter loopTimeoutDuration is set but not supported.");
}
auto cbWrapper = [cb](V1_3::ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- const Timing& timing,
+ std::vector<V1_2::OutputShape> outputShapes,
+ const V1_2::Timing& timing,
std::string)
{
cb(errorStatus, outputShapes, timing);
@@ -695,7 +695,7 @@
DumpTensorsIfRequired("Input", inputTensors);
- std::vector<OutputShape> outputShapes(outputTensors.size());
+ std::vector<V1_2::OutputShape> outputShapes(outputTensors.size());
for (unsigned int i = 0; i < outputTensors.size(); i++)
{
std::pair<int, armnn::Tensor> outputTensorPair = outputTensors[i];
@@ -708,14 +708,14 @@
// run it
try
{
- if (cb.ctx.measureTimings == MeasureTiming::YES)
+ if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
{
cb.ctx.deviceStart = Now();
}
armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
- if (cb.ctx.measureTimings == MeasureTiming::YES)
+ if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
{
cb.ctx.deviceEnd = Now();
}
@@ -743,10 +743,10 @@
DumpTensorsIfRequired("Output", outputTensors);
- if (cb.ctx.measureTimings == MeasureTiming::YES)
+ if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
{
cb.ctx.driverEnd = Now();
- Timing timing;
+ V1_2::Timing timing;
timing.timeOnDevice = MicrosecondsDuration(cb.ctx.deviceEnd, cb.ctx.deviceStart);
timing.timeInDriver = MicrosecondsDuration(cb.ctx.driverEnd, cb.ctx.driverStart);
ALOGV("ArmnnPreparedModel_1_3::execute timing - Device = %" PRIu64 " Driver = %" PRIu64, timing.timeOnDevice,
@@ -783,10 +783,10 @@
outputTensors.emplace_back(i, outputTensor);
}
- auto nullCallback = [](V1_3::ErrorStatus, std::vector<OutputShape>, const Timing&, std::string) {};
+ auto nullCallback = [](V1_3::ErrorStatus, std::vector<V1_2::OutputShape>, const V1_2::Timing&, std::string) {};
CallbackContext_1_3 callbackContext;
callbackContext.callback = nullCallback;
- callbackContext.ctx.measureTimings = MeasureTiming::NO;
+ callbackContext.ctx.measureTimings = V1_2::MeasureTiming::NO;
auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>();
auto errorStatus = ExecuteGraph(memPools,
@@ -798,11 +798,11 @@
template<typename HalVersion>
Return <V1_3::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::Execute(const V1_3::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
CallbackAsync_1_3 callback)
{
ExecutionContext_1_3 ctx;
- if (measureTiming == MeasureTiming::YES)
+ if (measureTiming == V1_2::MeasureTiming::YES)
{
ctx.measureTimings = measureTiming;
ctx.driverStart = Now();
diff --git a/ArmnnPreparedModel_1_3.hpp b/ArmnnPreparedModel_1_3.hpp
index 5010bbd..5dcc202 100644
--- a/ArmnnPreparedModel_1_3.hpp
+++ b/ArmnnPreparedModel_1_3.hpp
@@ -58,7 +58,7 @@
Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
const sp<V1_0::IExecutionCallback>& callback) override;
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
+ Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, V1_2::MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback) override;
Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request,
@@ -68,18 +68,18 @@
const sp<V1_3::IExecutionCallback>& callback) override;
Return<void> executeSynchronously(const V1_0::Request &request,
- MeasureTiming measure,
+ V1_2::MeasureTiming measure,
V1_3::IPreparedModel::executeSynchronously_cb cb) override;
Return<void> executeSynchronously_1_3(const V1_3::Request &request,
- MeasureTiming measure,
+ V1_2::MeasureTiming measure,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
V1_3::IPreparedModel::executeSynchronously_1_3_cb cb) override;
Return<void> executeFenced(const V1_3::Request& request,
const android::hardware::hidl_vec<android::hardware::hidl_handle>& fenceWaitFor,
- MeasureTiming measure,
+ V1_2::MeasureTiming measure,
const V1_3::OptionalTimePoint& deadline,
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const V1_3::OptionalTimeoutDuration& duration,
@@ -110,7 +110,7 @@
private:
Return <V1_3::ErrorStatus> Execute(const V1_3::Request& request,
- MeasureTiming measureTiming,
+ V1_2::MeasureTiming measureTiming,
CallbackAsync_1_3 callback);
Return<V1_3::ErrorStatus> PrepareMemoryForInputs(
@@ -120,11 +120,11 @@
Return<V1_3::ErrorStatus> PrepareMemoryForOutputs(
armnn::OutputTensors& outputs,
- std::vector<OutputShape> &outputShapes,
+ std::vector<V1_2::OutputShape> &outputShapes,
const V1_3::Request& request,
const std::vector<android::nn::RunTimePoolInfo>& memPools);
- std::tuple<V1_3::ErrorStatus, hidl_vec<OutputShape>, Timing, std::string> PrepareMemoryForIO(
+ std::tuple<V1_3::ErrorStatus, android::hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing, std::string> PrepareMemoryForIO(
armnn::InputTensors& inputs,
armnn::OutputTensors& outputs,
std::vector<android::nn::RunTimePoolInfo>& memPools,