Cleanup HalInterfaces.h
Prior to this CL, HalInterfaces.h polluted the global namespace through
the "using" declarations. This CL creates a new ::android::nn::hal
namespace and places the names there instead.
This CL also changes the namespace of the Callback objects from
::android::hardware::neuralnetworks::V1_2::implementation to
::android::nn to be consistent with the other parts of frameworks/ml/nn.
Fixes: 72880287
Test: mma
Change-Id: I399ad32f5c541a493429c6fa8e192169a903ea9f
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index 681060a..bff70a2 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -37,6 +37,8 @@
namespace {
+using namespace hal;
+
class OperationExecutionContext : public IOperationExecutionContext {
DISALLOW_IMPLICIT_CONSTRUCTORS(OperationExecutionContext);
diff --git a/nn/common/ExecutionBurstController.cpp b/nn/common/ExecutionBurstController.cpp
index 55ef9ad..7e9de83 100644
--- a/nn/common/ExecutionBurstController.cpp
+++ b/nn/common/ExecutionBurstController.cpp
@@ -27,14 +27,16 @@
namespace android::nn {
namespace {
-using ::android::hardware::MQDescriptorSync;
+using namespace hal;
+
+using hardware::MQDescriptorSync;
using FmqRequestDescriptor = MQDescriptorSync<FmqRequestDatum>;
using FmqResultDescriptor = MQDescriptorSync<FmqResultDatum>;
constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
std::numeric_limits<uint64_t>::max()};
-class BurstContextDeathHandler : public hardware::hidl_death_recipient {
+class BurstContextDeathHandler : public hidl_death_recipient {
public:
using Callback = std::function<void()>;
@@ -516,7 +518,7 @@
const std::shared_ptr<RequestChannelSender>& requestChannelSender,
const std::shared_ptr<ResultChannelReceiver>& resultChannelReceiver,
const sp<IBurstContext>& burstContext, const sp<ExecutionBurstCallback>& callback,
- const sp<hardware::hidl_death_recipient>& deathHandler)
+ const sp<hidl_death_recipient>& deathHandler)
: mRequestChannelSender(requestChannelSender),
mResultChannelReceiver(resultChannelReceiver),
mBurstContext(burstContext),
diff --git a/nn/common/ExecutionBurstServer.cpp b/nn/common/ExecutionBurstServer.cpp
index 28f73e2..74bc340 100644
--- a/nn/common/ExecutionBurstServer.cpp
+++ b/nn/common/ExecutionBurstServer.cpp
@@ -29,6 +29,8 @@
namespace android::nn {
namespace {
+using namespace hal;
+
constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
std::numeric_limits<uint64_t>::max()};
diff --git a/nn/common/GraphDump.cpp b/nn/common/GraphDump.cpp
index c5798b3..9fe6bf3 100644
--- a/nn/common/GraphDump.cpp
+++ b/nn/common/GraphDump.cpp
@@ -28,6 +28,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
// class Dumper is a wrapper around an std::ostream (if instantiated
// with a pointer to a stream) or around LOG(INFO) (otherwise).
//
diff --git a/nn/common/OperationResolver.cpp b/nn/common/OperationResolver.cpp
index b2c03b9..bd1ad03 100644
--- a/nn/common/OperationResolver.cpp
+++ b/nn/common/OperationResolver.cpp
@@ -23,6 +23,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
// TODO(b/119608412): Find a way to not reference every operation here.
const OperationRegistration* register_ABS();
const OperationRegistration* register_ADD();
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index e128afb..9219ddd 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -27,6 +27,8 @@
namespace {
+using namespace hal;
+
bool validateOperandTypes(const std::vector<OperandType>& expectedTypes, const char* tag,
uint32_t operandCount,
std::function<OperandType(uint32_t)> getOperandType) {
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index 18eedd7..fc54932 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -33,6 +33,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
const char kVLogPropKey[] = "debug.nn.vlog";
int vLogMask = ~0;
@@ -1781,7 +1783,7 @@
using OpPerf = Capabilities::OperandPerformance;
// Note: range presents enumerators in declaration order, not in numerical order.
- static constexpr ::android::hardware::hidl_enum_range<OperandType> kOperandTypeRange;
+ static constexpr hidl_enum_range<OperandType> kOperandTypeRange;
hidl_vec<OpPerf> ret(kOperandTypeRange.end() - kOperandTypeRange.begin());
diff --git a/nn/common/ValidateHal.cpp b/nn/common/ValidateHal.cpp
index 421730a..da440d8 100644
--- a/nn/common/ValidateHal.cpp
+++ b/nn/common/ValidateHal.cpp
@@ -27,6 +27,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
template <class T_Model>
struct ModelToHalVersion;
template <>
diff --git a/nn/common/include/CpuExecutor.h b/nn/common/include/CpuExecutor.h
index 987337a..a9b5c19 100644
--- a/nn/common/include/CpuExecutor.h
+++ b/nn/common/include/CpuExecutor.h
@@ -35,7 +35,7 @@
// may change during execution.
struct RunTimeOperandInfo {
// TODO Storing the type here is redundant, as it won't change during execution.
- OperandType type;
+ hal::OperandType type;
// The type and dimensions of the operand. The dimensions can
// change at runtime. We include the type because it's useful
// to pass together with the dimension to the functions implementing
@@ -62,14 +62,14 @@
// The length of the buffer.
uint32_t length;
// Whether this is a temporary variable, a model input, a constant, etc.
- OperandLifeTime lifetime;
+ hal::OperandLifeTime lifetime;
// Keeps track of how many operations have yet to make use
// of this temporary variable. When the count is decremented to 0,
// we free the buffer. For non-temporary variables, this count is
// always 0.
uint32_t numberOfUsesLeft;
- Operand::ExtraParams extraParams;
+ hal::Operand::ExtraParams extraParams;
Shape shape() const {
return {
@@ -103,12 +103,12 @@
// RunTimePoolInfo objects.
class RunTimePoolInfo {
public:
- static std::optional<RunTimePoolInfo> createFromHidlMemory(const hidl_memory& hidlMemory);
+ static std::optional<RunTimePoolInfo> createFromHidlMemory(const hal::hidl_memory& hidlMemory);
static RunTimePoolInfo createFromExistingBuffer(uint8_t* buffer);
uint8_t* getBuffer() const;
bool update() const;
- hidl_memory getHidlMemory() const;
+ hal::hidl_memory getHidlMemory() const;
private:
class RunTimePoolInfoImpl;
@@ -118,7 +118,7 @@
};
bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
- const hidl_vec<hidl_memory>& pools);
+ const hal::hidl_vec<hal::hidl_memory>& pools);
// This class is used to execute a model on the CPU.
class CpuExecutor {
@@ -140,11 +140,11 @@
// specified in the constructor.
// The model must outlive the executor. We prevent it from being modified
// while this is executing.
- int run(const Model& model, const Request& request,
+ int run(const hal::Model& model, const hal::Request& request,
const std::vector<RunTimePoolInfo>& modelPoolInfos,
const std::vector<RunTimePoolInfo>& requestPoolInfos);
- const std::vector<OutputShape>& getOutputShapes() const {
+ const std::vector<hal::OutputShape>& getOutputShapes() const {
CHECK(mFinished) << "getOutputShapes() called by an unfinished CpuExecutor.";
return mOutputShapes;
}
@@ -153,7 +153,7 @@
bool initializeRunTimeInfo(const std::vector<RunTimePoolInfo>& modelPoolInfos,
const std::vector<RunTimePoolInfo>& requestPoolInfos);
// Runs one operation of the graph.
- int executeOperation(const Operation& entry);
+ int executeOperation(const hal::Operation& entry);
// Decrement the usage count for the operands listed. Frees the memory
// allocated for any temporary variable with a count of zero.
void freeNoLongerUsedOperands(const std::vector<uint32_t>& inputs);
@@ -164,8 +164,8 @@
// The model and the request that we'll execute. Only valid while run()
// is being executed.
- const Model* mModel = nullptr;
- const Request* mRequest = nullptr;
+ const hal::Model* mModel = nullptr;
+ const hal::Request* mRequest = nullptr;
// We're copying the list of all the dimensions from the model, as
// these may be modified when we run the operations. Since we're
@@ -176,7 +176,7 @@
std::vector<RunTimeOperandInfo> mOperands;
// The output operand shapes returning to the runtime.
- std::vector<OutputShape> mOutputShapes;
+ std::vector<hal::OutputShape> mOutputShapes;
// Whether execution is finished and mOutputShapes is ready
bool mFinished = false;
@@ -234,20 +234,18 @@
}
inline bool IsNullInput(const RunTimeOperandInfo *input) {
- return input->lifetime == OperandLifeTime::NO_VALUE;
+ return input->lifetime == hal::OperandLifeTime::NO_VALUE;
}
-inline int NumInputsWithValues(const Operation &operation,
- std::vector<RunTimeOperandInfo> &operands) {
- const std::vector<uint32_t> &inputs = operation.inputs;
- return std::count_if(inputs.begin(), inputs.end(),
- [&operands](uint32_t i) {
- return !IsNullInput(&operands[i]);
- });
+inline int NumInputsWithValues(const hal::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands) {
+ const std::vector<uint32_t>& inputs = operation.inputs;
+ return std::count_if(inputs.begin(), inputs.end(),
+ [&operands](uint32_t i) { return !IsNullInput(&operands[i]); });
}
-inline int NumOutputs(const Operation &operation) {
- return operation.outputs.size();
+inline int NumOutputs(const hal::Operation& operation) {
+ return operation.outputs.size();
}
inline size_t NumDimensions(const RunTimeOperandInfo *operand) {
@@ -258,16 +256,14 @@
return operand->shape().dimensions[i];
}
-inline RunTimeOperandInfo *GetInput(const Operation &operation,
- std::vector<RunTimeOperandInfo> &operands,
- int index) {
- return &operands[operation.inputs[index]];
+inline RunTimeOperandInfo* GetInput(const hal::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands, int index) {
+ return &operands[operation.inputs[index]];
}
-inline RunTimeOperandInfo *GetOutput(const Operation &operation,
- std::vector<RunTimeOperandInfo> &operands,
- int index) {
- return &operands[operation.outputs[index]];
+inline RunTimeOperandInfo* GetOutput(const hal::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands, int index) {
+ return &operands[operation.outputs[index]];
}
} // anonymous namespace
diff --git a/nn/common/include/ExecutionBurstController.h b/nn/common/include/ExecutionBurstController.h
index 5e32726..8330aa2 100644
--- a/nn/common/include/ExecutionBurstController.h
+++ b/nn/common/include/ExecutionBurstController.h
@@ -48,8 +48,8 @@
* request.
* @return Serialized FMQ request data.
*/
-std::vector<FmqRequestDatum> serialize(const Request& request, MeasureTiming measure,
- const std::vector<int32_t>& slots);
+std::vector<hal::FmqRequestDatum> serialize(const hal::Request& request, hal::MeasureTiming measure,
+ const std::vector<int32_t>& slots);
/**
* Deserialize the FMQ result data.
@@ -60,8 +60,8 @@
* @param data Serialized FMQ result data.
* @return Result object if successfully deserialized, std::nullopt otherwise.
*/
-std::optional<std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>> deserialize(
- const std::vector<FmqResultDatum>& data);
+std::optional<std::tuple<hal::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>> deserialize(
+ const std::vector<hal::FmqResultDatum>& data);
/**
* ResultChannelReceiver is responsible for waiting on the channel until the
@@ -73,9 +73,9 @@
* invalidating, unblocking the receiver.
*/
class ResultChannelReceiver {
- using FmqResultDescriptor = ::android::hardware::MQDescriptorSync<FmqResultDatum>;
+ using FmqResultDescriptor = ::android::hardware::MQDescriptorSync<hal::FmqResultDatum>;
using FmqResultChannel =
- hardware::MessageQueue<FmqResultDatum, hardware::kSynchronizedReadWrite>;
+ hardware::MessageQueue<hal::FmqResultDatum, hardware::kSynchronizedReadWrite>;
public:
/**
@@ -102,7 +102,8 @@
* @return Result object if successfully received, std::nullopt if error or
* if the receiver object was invalidated.
*/
- std::optional<std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>> getBlocking();
+ std::optional<std::tuple<hal::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing>>
+ getBlocking();
/**
* Method to mark the channel as invalid, unblocking any current or future
@@ -111,7 +112,7 @@
void invalidate();
// prefer calling ResultChannelReceiver::getBlocking
- std::optional<std::vector<FmqResultDatum>> getPacketBlocking();
+ std::optional<std::vector<hal::FmqResultDatum>> getPacketBlocking();
ResultChannelReceiver(std::unique_ptr<FmqResultChannel> fmqResultChannel, bool blocking);
@@ -127,9 +128,9 @@
* available.
*/
class RequestChannelSender {
- using FmqRequestDescriptor = ::android::hardware::MQDescriptorSync<FmqRequestDatum>;
+ using FmqRequestDescriptor = ::android::hardware::MQDescriptorSync<hal::FmqRequestDatum>;
using FmqRequestChannel =
- hardware::MessageQueue<FmqRequestDatum, hardware::kSynchronizedReadWrite>;
+ hardware::MessageQueue<hal::FmqRequestDatum, hardware::kSynchronizedReadWrite>;
public:
/**
@@ -155,7 +156,8 @@
* the request.
* @return 'true' on successful send, 'false' otherwise.
*/
- bool send(const Request& request, MeasureTiming measure, const std::vector<int32_t>& slots);
+ bool send(const hal::Request& request, hal::MeasureTiming measure,
+ const std::vector<int32_t>& slots);
/**
* Method to mark the channel as invalid, causing all future calls to
@@ -165,7 +167,7 @@
void invalidate();
// prefer calling RequestChannelSender::send
- bool sendPacket(const std::vector<FmqRequestDatum>& packet);
+ bool sendPacket(const std::vector<hal::FmqRequestDatum>& packet);
RequestChannelSender(std::unique_ptr<FmqRequestChannel> fmqRequestChannel, bool blocking);
@@ -200,13 +202,14 @@
* efficiency, if two hidl_memory objects represent the same underlying
* buffer, they must use the same key.
*/
- class ExecutionBurstCallback : public IBurstCallback {
+ class ExecutionBurstCallback : public hal::IBurstCallback {
DISALLOW_COPY_AND_ASSIGN(ExecutionBurstCallback);
public:
ExecutionBurstCallback() = default;
- Return<void> getMemories(const hidl_vec<int32_t>& slots, getMemories_cb cb) override;
+ hal::Return<void> getMemories(const hal::hidl_vec<int32_t>& slots,
+ getMemories_cb cb) override;
/**
* This function performs one of two different actions:
@@ -224,7 +227,7 @@
* @return Unique slot identifiers where each returned slot element
* corresponds to a memory resource element in "memories".
*/
- std::vector<int32_t> getSlots(const hidl_vec<hidl_memory>& memories,
+ std::vector<int32_t> getSlots(const hal::hidl_vec<hal::hidl_memory>& memories,
const std::vector<intptr_t>& keys);
/*
@@ -242,13 +245,13 @@
std::pair<bool, int32_t> freeMemory(intptr_t key);
private:
- int32_t getSlotLocked(const hidl_memory& memory, intptr_t key);
+ int32_t getSlotLocked(const hal::hidl_memory& memory, intptr_t key);
int32_t allocateSlotLocked();
std::mutex mMutex;
std::stack<int32_t, std::vector<int32_t>> mFreeSlots;
std::map<intptr_t, int32_t> mMemoryIdToSlot;
- std::vector<hidl_memory> mMemoryCache;
+ std::vector<hal::hidl_memory> mMemoryCache;
};
/**
@@ -264,15 +267,15 @@
* efficient manner.
* @return ExecutionBurstController Execution burst controller object.
*/
- static std::unique_ptr<ExecutionBurstController> create(const sp<IPreparedModel>& preparedModel,
- bool blocking);
+ static std::unique_ptr<ExecutionBurstController> create(
+ const sp<hal::IPreparedModel>& preparedModel, bool blocking);
// prefer calling ExecutionBurstController::create
ExecutionBurstController(const std::shared_ptr<RequestChannelSender>& requestChannelSender,
const std::shared_ptr<ResultChannelReceiver>& resultChannelReceiver,
- const sp<IBurstContext>& burstContext,
+ const sp<hal::IBurstContext>& burstContext,
const sp<ExecutionBurstCallback>& callback,
- const sp<hardware::hidl_death_recipient>& deathHandler = nullptr);
+ const sp<hal::hidl_death_recipient>& deathHandler = nullptr);
// explicit destructor to unregister the death recipient
~ExecutionBurstController();
@@ -289,8 +292,9 @@
* - dynamic output shapes from the execution
* - any execution time measurements of the execution
*/
- std::tuple<ErrorStatus, std::vector<OutputShape>, Timing> compute(
- const Request& request, MeasureTiming measure, const std::vector<intptr_t>& memoryIds);
+ std::tuple<hal::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing> compute(
+ const hal::Request& request, hal::MeasureTiming measure,
+ const std::vector<intptr_t>& memoryIds);
// TODO: combine "compute" and "tryCompute" back into a single function.
// "tryCompute" was created later to return the "fallback" boolean. This
@@ -311,8 +315,9 @@
* - whether or not a failed burst execution should be re-run using a
* different path (e.g., IPreparedModel::executeSynchronously)
*/
- std::tuple<ErrorStatus, std::vector<OutputShape>, Timing, bool> tryCompute(
- const Request& request, MeasureTiming measure, const std::vector<intptr_t>& memoryIds);
+ std::tuple<hal::ErrorStatus, std::vector<hal::OutputShape>, hal::Timing, bool> tryCompute(
+ const hal::Request& request, hal::MeasureTiming measure,
+ const std::vector<intptr_t>& memoryIds);
/**
* Propagate a user's freeing of memory to the service.
@@ -325,9 +330,9 @@
std::mutex mMutex;
const std::shared_ptr<RequestChannelSender> mRequestChannelSender;
const std::shared_ptr<ResultChannelReceiver> mResultChannelReceiver;
- const sp<IBurstContext> mBurstContext;
+ const sp<hal::IBurstContext> mBurstContext;
const sp<ExecutionBurstCallback> mMemoryCache;
- const sp<hardware::hidl_death_recipient> mDeathHandler;
+ const sp<hal::hidl_death_recipient> mDeathHandler;
};
} // namespace android::nn
diff --git a/nn/common/include/ExecutionBurstServer.h b/nn/common/include/ExecutionBurstServer.h
index aeab862..f838012 100644
--- a/nn/common/include/ExecutionBurstServer.h
+++ b/nn/common/include/ExecutionBurstServer.h
@@ -31,9 +31,9 @@
namespace android::nn {
-using ::android::hardware::MQDescriptorSync;
-using FmqRequestDescriptor = MQDescriptorSync<FmqRequestDatum>;
-using FmqResultDescriptor = MQDescriptorSync<FmqResultDatum>;
+using hardware::MQDescriptorSync;
+using FmqRequestDescriptor = MQDescriptorSync<hal::FmqRequestDatum>;
+using FmqResultDescriptor = MQDescriptorSync<hal::FmqResultDatum>;
/**
* Function to serialize results.
@@ -45,8 +45,9 @@
* @param timing Timing information of the execution.
* @return Serialized FMQ result data.
*/
-std::vector<FmqResultDatum> serialize(ErrorStatus errorStatus,
- const std::vector<OutputShape>& outputShapes, Timing timing);
+std::vector<hal::FmqResultDatum> serialize(hal::ErrorStatus errorStatus,
+ const std::vector<hal::OutputShape>& outputShapes,
+ hal::Timing timing);
/**
* Deserialize the FMQ request data.
@@ -58,8 +59,8 @@
* @param data Serialized FMQ request data.
* @return Request object if successfully deserialized, std::nullopt otherwise.
*/
-std::optional<std::tuple<Request, std::vector<int32_t>, MeasureTiming>> deserialize(
- const std::vector<FmqRequestDatum>& data);
+std::optional<std::tuple<hal::Request, std::vector<int32_t>, hal::MeasureTiming>> deserialize(
+ const std::vector<hal::FmqRequestDatum>& data);
/**
* RequestChannelReceiver is responsible for waiting on the channel until the
@@ -72,7 +73,7 @@
*/
class RequestChannelReceiver {
using FmqRequestChannel =
- hardware::MessageQueue<FmqRequestDatum, hardware::kSynchronizedReadWrite>;
+ hardware::MessageQueue<hal::FmqRequestDatum, hardware::kSynchronizedReadWrite>;
public:
/**
@@ -96,7 +97,7 @@
* @return Request object if successfully received, std::nullopt if error or
* if the receiver object was invalidated.
*/
- std::optional<std::tuple<Request, std::vector<int32_t>, MeasureTiming>> getBlocking();
+ std::optional<std::tuple<hal::Request, std::vector<int32_t>, hal::MeasureTiming>> getBlocking();
/**
* Method to mark the channel as invalid, unblocking any current or future
@@ -107,7 +108,7 @@
RequestChannelReceiver(std::unique_ptr<FmqRequestChannel> fmqRequestChannel, bool blocking);
private:
- std::optional<std::vector<FmqRequestDatum>> getPacketBlocking();
+ std::optional<std::vector<hal::FmqRequestDatum>> getPacketBlocking();
const std::unique_ptr<FmqRequestChannel> mFmqRequestChannel;
std::atomic<bool> mTeardown{false};
@@ -121,7 +122,7 @@
*/
class ResultChannelSender {
using FmqResultChannel =
- hardware::MessageQueue<FmqResultDatum, hardware::kSynchronizedReadWrite>;
+ hardware::MessageQueue<hal::FmqResultDatum, hardware::kSynchronizedReadWrite>;
public:
/**
@@ -142,10 +143,11 @@
* @param timing Timing information of the execution.
* @return 'true' on successful send, 'false' otherwise.
*/
- bool send(ErrorStatus errorStatus, const std::vector<OutputShape>& outputShapes, Timing timing);
+ bool send(hal::ErrorStatus errorStatus, const std::vector<hal::OutputShape>& outputShapes,
+ hal::Timing timing);
// prefer calling ResultChannelSender::send
- bool sendPacket(const std::vector<FmqResultDatum>& packet);
+ bool sendPacket(const std::vector<hal::FmqResultDatum>& packet);
ResultChannelSender(std::unique_ptr<FmqResultChannel> fmqResultChannel, bool blocking);
@@ -159,7 +161,7 @@
* deserializing a request object from a FMQ, performing the inference, and
* serializing the result back across another FMQ.
*/
-class ExecutionBurstServer : public IBurstContext {
+class ExecutionBurstServer : public hal::IBurstContext {
DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutionBurstServer);
public:
@@ -199,7 +201,7 @@
* @param memory Memory resource to be cached.
* @param slot Slot identifier corresponding to the memory resource.
*/
- virtual void addCacheEntry(const hidl_memory& memory, int32_t slot) = 0;
+ virtual void addCacheEntry(const hal::hidl_memory& memory, int32_t slot) = 0;
/**
* Removes an entry specified by a slot from the cache.
@@ -224,9 +226,9 @@
* @return Result of the execution, including the status of the
* execution, dynamic output shapes, and any timing information.
*/
- virtual std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
- const Request& request, const std::vector<int32_t>& slots,
- MeasureTiming measure) = 0;
+ virtual std::tuple<hal::ErrorStatus, hal::hidl_vec<hal::OutputShape>, hal::Timing> execute(
+ const hal::Request& request, const std::vector<int32_t>& slots,
+ hal::MeasureTiming measure) = 0;
};
/**
@@ -248,7 +250,7 @@
* @result IBurstContext Handle to the burst context.
*/
static sp<ExecutionBurstServer> create(
- const sp<IBurstCallback>& callback, const FmqRequestDescriptor& requestChannel,
+ const sp<hal::IBurstCallback>& callback, const FmqRequestDescriptor& requestChannel,
const FmqResultDescriptor& resultChannel,
std::shared_ptr<IBurstExecutorWithCache> executorWithCache);
@@ -271,19 +273,19 @@
* execution.
* @result IBurstContext Handle to the burst context.
*/
- static sp<ExecutionBurstServer> create(const sp<IBurstCallback>& callback,
+ static sp<ExecutionBurstServer> create(const sp<hal::IBurstCallback>& callback,
const FmqRequestDescriptor& requestChannel,
const FmqResultDescriptor& resultChannel,
- IPreparedModel* preparedModel);
+ hal::IPreparedModel* preparedModel);
- ExecutionBurstServer(const sp<IBurstCallback>& callback,
+ ExecutionBurstServer(const sp<hal::IBurstCallback>& callback,
std::unique_ptr<RequestChannelReceiver> requestChannel,
std::unique_ptr<ResultChannelSender> resultChannel,
std::shared_ptr<IBurstExecutorWithCache> cachedExecutor);
~ExecutionBurstServer();
// Used by the NN runtime to preemptively remove any stored memory.
- Return<void> freeMemory(int32_t slot) override;
+ hal::Return<void> freeMemory(int32_t slot) override;
private:
// Ensures all cache entries contained in mExecutorWithCache are present in
@@ -300,7 +302,7 @@
std::thread mWorker;
std::mutex mMutex;
std::atomic<bool> mTeardown{false};
- const sp<IBurstCallback> mCallback;
+ const sp<hal::IBurstCallback> mCallback;
const std::unique_ptr<RequestChannelReceiver> mRequestChannelReceiver;
const std::unique_ptr<ResultChannelSender> mResultChannelSender;
const std::shared_ptr<IBurstExecutorWithCache> mExecutorWithCache;
diff --git a/nn/common/include/HalInterfaces.h b/nn/common/include/HalInterfaces.h
index 9040cc4..4421ae3 100644
--- a/nn/common/include/HalInterfaces.h
+++ b/nn/common/include/HalInterfaces.h
@@ -33,57 +33,64 @@
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
-using ::android::sp;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_handle;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::neuralnetworks::V1_0::DataLocation;
-using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
+namespace android::nn::hal {
+
+using android::sp;
+using hardware::hidl_array;
+using hardware::hidl_death_recipient;
+using hardware::hidl_enum_range;
+using hardware::hidl_handle;
+using hardware::hidl_memory;
+using hardware::hidl_string;
+using hardware::hidl_vec;
+using hardware::Return;
+using hardware::Void;
+using hardware::neuralnetworks::V1_0::DataLocation;
+using hardware::neuralnetworks::V1_0::DeviceStatus;
+using hardware::neuralnetworks::V1_0::ErrorStatus;
+using hardware::neuralnetworks::V1_0::FusedActivationFunc;
+using hardware::neuralnetworks::V1_0::OperandLifeTime;
+using hardware::neuralnetworks::V1_0::PerformanceInfo;
+using hardware::neuralnetworks::V1_0::Request;
+using hardware::neuralnetworks::V1_0::RequestArgument;
+using hardware::neuralnetworks::V1_1::ExecutionPreference;
+using hardware::neuralnetworks::V1_2::Capabilities;
+using hardware::neuralnetworks::V1_2::Constant;
+using hardware::neuralnetworks::V1_2::DeviceType;
+using hardware::neuralnetworks::V1_2::Extension;
+using hardware::neuralnetworks::V1_2::FmqRequestDatum;
+using hardware::neuralnetworks::V1_2::FmqResultDatum;
+using hardware::neuralnetworks::V1_2::IBurstCallback;
+using hardware::neuralnetworks::V1_2::IBurstContext;
+using hardware::neuralnetworks::V1_2::IDevice;
+using hardware::neuralnetworks::V1_2::IExecutionCallback;
+using hardware::neuralnetworks::V1_2::IPreparedModel;
+using hardware::neuralnetworks::V1_2::IPreparedModelCallback;
+using hardware::neuralnetworks::V1_2::MeasureTiming;
+using hardware::neuralnetworks::V1_2::Model;
+using hardware::neuralnetworks::V1_2::Operand;
+using hardware::neuralnetworks::V1_2::OperandType;
+using hardware::neuralnetworks::V1_2::OperandTypeRange;
+using hardware::neuralnetworks::V1_2::Operation;
+using hardware::neuralnetworks::V1_2::OperationType;
+using hardware::neuralnetworks::V1_2::OperationTypeRange;
+using hardware::neuralnetworks::V1_2::OutputShape;
+using hardware::neuralnetworks::V1_2::SymmPerChannelQuantParams;
+using hardware::neuralnetworks::V1_2::Timing;
+using hidl::allocator::V1_0::IAllocator;
+using hidl::memory::V1_0::IMemory;
+
+namespace V1_0 = hardware::neuralnetworks::V1_0;
+namespace V1_1 = hardware::neuralnetworks::V1_1;
+namespace V1_2 = hardware::neuralnetworks::V1_2;
+
+} // namespace android::nn::hal
+
+// TODO: remove after b/137663811 is addressed
+namespace android::nn {
+using namespace hal;
+}
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
-using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
-using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
-using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
-using ::android::hardware::neuralnetworks::V1_0::Request;
-using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
-using ::android::hardware::neuralnetworks::V1_2::Capabilities;
-using ::android::hardware::neuralnetworks::V1_2::Constant;
-using ::android::hardware::neuralnetworks::V1_2::DeviceType;
-using ::android::hardware::neuralnetworks::V1_2::Extension;
-using ::android::hardware::neuralnetworks::V1_2::FmqRequestDatum;
-using ::android::hardware::neuralnetworks::V1_2::FmqResultDatum;
-using ::android::hardware::neuralnetworks::V1_2::IBurstCallback;
-using ::android::hardware::neuralnetworks::V1_2::IBurstContext;
-using ::android::hardware::neuralnetworks::V1_2::IDevice;
-using ::android::hardware::neuralnetworks::V1_2::IExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
-using ::android::hardware::neuralnetworks::V1_2::IPreparedModelCallback;
-using ::android::hardware::neuralnetworks::V1_2::MeasureTiming;
-using ::android::hardware::neuralnetworks::V1_2::Model;
-using ::android::hardware::neuralnetworks::V1_2::Operand;
-using ::android::hardware::neuralnetworks::V1_2::OperandType;
-using ::android::hardware::neuralnetworks::V1_2::OperandTypeRange;
-using ::android::hardware::neuralnetworks::V1_2::Operation;
-using ::android::hardware::neuralnetworks::V1_2::OperationType;
-using ::android::hardware::neuralnetworks::V1_2::OperationTypeRange;
-using ::android::hardware::neuralnetworks::V1_2::OutputShape;
-using ::android::hardware::neuralnetworks::V1_2::SymmPerChannelQuantParams;
-using ::android::hardware::neuralnetworks::V1_2::Timing;
-using ::android::hidl::allocator::V1_0::IAllocator;
-using ::android::hidl::memory::V1_0::IMemory;
-
-namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
-namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
-namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
-
-namespace android {
-namespace nn {
-
-} // namespace nn
-} // namespace android
#endif // FRAMEWORKS_ML_COMMON_HAL_INTERFACES_H
diff --git a/nn/common/include/OperationResolver.h b/nn/common/include/OperationResolver.h
index c10427f..c569ca5 100644
--- a/nn/common/include/OperationResolver.h
+++ b/nn/common/include/OperationResolver.h
@@ -25,7 +25,7 @@
// Encapsulates an operation implementation.
struct OperationRegistration {
- OperationType type;
+ hal::OperationType type;
const char* name;
// Validates operand types, shapes, and any values known during graph creation.
@@ -47,7 +47,7 @@
bool allowZeroSizedInput = false;
} flags;
- OperationRegistration(OperationType type, const char* name,
+ OperationRegistration(hal::OperationType type, const char* name,
std::function<bool(const IOperationValidationContext*)> validate,
std::function<bool(IOperationExecutionContext*)> prepare,
std::function<bool(IOperationExecutionContext*)> execute, Flag flags)
@@ -62,7 +62,7 @@
// A registry of operation implementations.
class IOperationResolver {
public:
- virtual const OperationRegistration* findOperation(OperationType operationType) const = 0;
+ virtual const OperationRegistration* findOperation(hal::OperationType operationType) const = 0;
virtual ~IOperationResolver() {}
};
@@ -86,7 +86,7 @@
return &instance;
}
- const OperationRegistration* findOperation(OperationType operationType) const override;
+ const OperationRegistration* findOperation(hal::OperationType operationType) const override;
private:
BuiltinOperationResolver();
@@ -116,11 +116,11 @@
// .allowZeroSizedInput = true);
//
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
-#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \
- const OperationRegistration* register_##identifier() { \
- static OperationRegistration registration(OperationType::identifier, operationName, \
- validate, prepare, execute, {__VA_ARGS__}); \
- return ®istration; \
+#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \
+ const OperationRegistration* register_##identifier() { \
+ static OperationRegistration registration(hal::OperationType::identifier, operationName, \
+ validate, prepare, execute, {__VA_ARGS__}); \
+ return ®istration; \
}
#else
// This version ignores CPU execution logic (prepare and execute).
@@ -129,7 +129,7 @@
#define NN_REGISTER_OPERATION(identifier, operationName, validate, unused_prepare, unused_execute, \
...) \
const OperationRegistration* register_##identifier() { \
- static OperationRegistration registration(OperationType::identifier, operationName, \
+ static OperationRegistration registration(hal::OperationType::identifier, operationName, \
validate, nullptr, nullptr, {__VA_ARGS__}); \
return ®istration; \
}
diff --git a/nn/common/include/OperationsUtils.h b/nn/common/include/OperationsUtils.h
index 4111a2c..fb787ef 100644
--- a/nn/common/include/OperationsUtils.h
+++ b/nn/common/include/OperationsUtils.h
@@ -17,6 +17,7 @@
#ifndef FRAMEWORKS_ML_COMMON_OPERATIONS_UTILS_H
#define FRAMEWORKS_ML_COMMON_OPERATIONS_UTILS_H
+#include "HalInterfaces.h"
#include "Utils.h"
#include <cstdint>
@@ -43,11 +44,11 @@
// Stores operand type information. "Shape" is a historical name.
struct Shape {
- OperandType type;
+ hal::OperandType type;
std::vector<uint32_t> dimensions;
float scale;
int32_t offset;
- Operand::ExtraParams extraParams;
+ hal::Operand::ExtraParams extraParams;
};
// Provides information available during graph creation to validate an operation.
@@ -72,12 +73,12 @@
virtual HalVersion getHalVersion() const = 0;
virtual uint32_t getNumInputs() const = 0;
- virtual OperandType getInputType(uint32_t index) const = 0;
+ virtual hal::OperandType getInputType(uint32_t index) const = 0;
virtual Shape getInputShape(uint32_t index) const = 0;
- virtual const Operand::ExtraParams getInputExtraParams(uint32_t index) const = 0;
+ virtual const hal::Operand::ExtraParams getInputExtraParams(uint32_t index) const = 0;
virtual uint32_t getNumOutputs() const = 0;
- virtual OperandType getOutputType(uint32_t index) const = 0;
+ virtual hal::OperandType getOutputType(uint32_t index) const = 0;
virtual Shape getOutputShape(uint32_t index) const = 0;
};
@@ -87,13 +88,13 @@
virtual ~IOperationExecutionContext() {}
virtual uint32_t getNumInputs() const = 0;
- virtual OperandType getInputType(uint32_t index) const = 0;
+ virtual hal::OperandType getInputType(uint32_t index) const = 0;
virtual Shape getInputShape(uint32_t index) const = 0;
virtual const void* getInputBuffer(uint32_t index) const = 0;
- virtual const Operand::ExtraParams getInputExtraParams(uint32_t index) const = 0;
+ virtual const hal::Operand::ExtraParams getInputExtraParams(uint32_t index) const = 0;
virtual uint32_t getNumOutputs() const = 0;
- virtual OperandType getOutputType(uint32_t index) const = 0;
+ virtual hal::OperandType getOutputType(uint32_t index) const = 0;
virtual Shape getOutputShape(uint32_t index) const = 0;
virtual void* getOutputBuffer(uint32_t index) = 0;
@@ -121,11 +122,11 @@
// Verifies that the number and types of operation inputs are as expected.
bool validateInputTypes(const IOperationValidationContext* context,
- const std::vector<OperandType>& expectedTypes);
+ const std::vector<hal::OperandType>& expectedTypes);
// Verifies that the number and types of operation outputs are as expected.
bool validateOutputTypes(const IOperationValidationContext* context,
- const std::vector<OperandType>& expectedTypes);
+ const std::vector<hal::OperandType>& expectedTypes);
// Verifies that the HAL version specified in the context is greater or equal
// than the minimal supported HAL version.
diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h
index 80ea884..63f6162 100644
--- a/nn/common/include/Utils.h
+++ b/nn/common/include/Utils.h
@@ -160,25 +160,27 @@
// Return a vector with one entry for each non extension OperandType, set to the
// specified PerformanceInfo value. The vector will be sorted by OperandType.
-hidl_vec<Capabilities::OperandPerformance> nonExtensionOperandPerformance(PerformanceInfo perf);
+hal::hidl_vec<hal::Capabilities::OperandPerformance> nonExtensionOperandPerformance(
+ hal::PerformanceInfo perf);
// Update the vector entry corresponding to the specified OperandType with the
// specified PerformanceInfo value. The vector must already have an entry for
// that OperandType, and must be sorted by OperandType.
-void update(hidl_vec<Capabilities::OperandPerformance>* operandPerformance, OperandType type,
- PerformanceInfo perf);
+void update(hal::hidl_vec<hal::Capabilities::OperandPerformance>* operandPerformance,
+ hal::OperandType type, hal::PerformanceInfo perf);
// Look for a vector entry corresponding to the specified OperandType. If
// found, return the associated PerformanceInfo. If not, return a pessimistic
// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType.
-PerformanceInfo lookup(const hidl_vec<Capabilities::OperandPerformance>& operandPerformance,
- OperandType type);
+hal::PerformanceInfo lookup(
+ const hal::hidl_vec<hal::Capabilities::OperandPerformance>& operandPerformance,
+ hal::OperandType type);
// Returns true if an operand type is an extension type.
-bool isExtensionOperandType(OperandType type);
+bool isExtensionOperandType(hal::OperandType type);
// Returns true if an operation type is an extension type.
-bool isExtensionOperationType(OperationType type);
+bool isExtensionOperationType(hal::OperationType type);
// Returns the amount of space needed to store a value of the specified
// dimensions and type. For a tensor with unspecified rank or at least one
@@ -187,7 +189,8 @@
// Aborts if the specified type is an extension type.
//
// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
-uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
+uint32_t nonExtensionOperandSizeOfData(hal::OperandType type,
+ const std::vector<uint32_t>& dimensions);
// Returns the amount of space needed to store a value of the dimensions and
// type of this operand. For a tensor with unspecified rank or at least one
@@ -196,7 +199,7 @@
// Aborts if the specified type is an extension type.
//
// See also TypeManager::getSizeOfData(const Operand&).
-inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) {
+inline uint32_t nonExtensionOperandSizeOfData(const hal::Operand& operand) {
return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
}
@@ -208,16 +211,16 @@
bool nonExtensionOperandTypeIsScalar(int type);
// Returns the name of the operation type in ASCII.
-std::string getOperationName(OperationType opCode);
+std::string getOperationName(hal::OperationType opCode);
// Returns the name of the operand type in ASCII.
-std::string getOperandTypeName(OperandType type);
+std::string getOperandTypeName(hal::OperandType type);
// Whether an operand of tensor type has unspecified dimensions.
//
// Undefined behavior if the operand type is a scalar type.
bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
-bool tensorHasUnspecifiedDimensions(const Operand& operand);
+bool tensorHasUnspecifiedDimensions(const hal::Operand& operand);
bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
// Returns the number of padding bytes needed to align data of the
@@ -230,9 +233,9 @@
uint32_t alignBytesNeeded(uint32_t index, size_t length);
// Does a detailed LOG(INFO) of the model
-void logModelToInfo(const V1_0::Model& model);
-void logModelToInfo(const V1_1::Model& model);
-void logModelToInfo(const V1_2::Model& model);
+void logModelToInfo(const hal::V1_0::Model& model);
+void logModelToInfo(const hal::V1_1::Model& model);
+void logModelToInfo(const hal::V1_2::Model& model);
inline std::string toString(uint32_t obj) {
return std::to_string(obj);
@@ -265,17 +268,18 @@
}
bool validateOperandSymmPerChannelQuantParams(
- const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
- const char* tag);
+ const hal::Operand& halOperand,
+ const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag);
// Validates an operand type.
//
// extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
//
// If allowPartial is true, the dimensions may be underspecified.
-int validateOperandType(const ANeuralNetworksOperandType& type,
- const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
- const char* tag, bool allowPartial);
+int validateOperandType(
+ const ANeuralNetworksOperandType& type,
+ const hal::Extension::OperandTypeInformation* const extensionOperandTypeInfo,
+ const char* tag, bool allowPartial);
int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
const char* tag);
@@ -283,7 +287,7 @@
// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA.
int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
const uint32_t* inputIndexes, uint32_t outputCount,
- const uint32_t* outputIndexes, const std::vector<Operand>& operands,
+ const uint32_t* outputIndexes, const std::vector<hal::Operand>& operands,
HalVersion halVersion);
inline size_t getSizeFromInts(int lower, int higher) {
@@ -292,25 +296,25 @@
// Convert ANEURALNETWORKS_* result code to ErrorStatus.
// Not guaranteed to be a 1-to-1 mapping.
-ErrorStatus convertResultCodeToErrorStatus(int resultCode);
+hal::ErrorStatus convertResultCodeToErrorStatus(int resultCode);
// Convert ErrorStatus to ANEURALNETWORKS_* result code.
// Not guaranteed to be a 1-to-1 mapping.
-int convertErrorStatusToResultCode(ErrorStatus status);
+int convertErrorStatusToResultCode(hal::ErrorStatus status);
// Versioning
-bool compliantWithV1_0(const V1_0::Capabilities& capabilities);
-bool compliantWithV1_0(const V1_1::Capabilities& capabilities);
-bool compliantWithV1_0(const V1_2::Capabilities& capabilities);
-bool compliantWithV1_1(const V1_0::Capabilities& capabilities);
-bool compliantWithV1_1(const V1_1::Capabilities& capabilities);
-bool compliantWithV1_1(const V1_2::Capabilities& capabilities);
-bool compliantWithV1_2(const V1_0::Capabilities& capabilities);
-bool compliantWithV1_2(const V1_1::Capabilities& capabilities);
-bool compliantWithV1_2(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_0(const hal::V1_0::Capabilities& capabilities);
+bool compliantWithV1_0(const hal::V1_1::Capabilities& capabilities);
+bool compliantWithV1_0(const hal::V1_2::Capabilities& capabilities);
+bool compliantWithV1_1(const hal::V1_0::Capabilities& capabilities);
+bool compliantWithV1_1(const hal::V1_1::Capabilities& capabilities);
+bool compliantWithV1_1(const hal::V1_2::Capabilities& capabilities);
+bool compliantWithV1_2(const hal::V1_0::Capabilities& capabilities);
+bool compliantWithV1_2(const hal::V1_1::Capabilities& capabilities);
+bool compliantWithV1_2(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_0(const V1_2::Operand& operand);
+bool compliantWithV1_0(const hal::V1_2::Operand& operand);
// If noncompliantOperations != nullptr, then
// precondition: noncompliantOperations->empty()
@@ -318,34 +322,34 @@
// operations; if the compliance check fails for some reason
// other than a noncompliant operation,
// *noncompliantOperations consists of the indices of all operations
-bool compliantWithV1_0(const V1_0::Model& model);
-bool compliantWithV1_0(const V1_1::Model& model);
-bool compliantWithV1_0(const V1_2::Model& model,
+bool compliantWithV1_0(const hal::V1_0::Model& model);
+bool compliantWithV1_0(const hal::V1_1::Model& model);
+bool compliantWithV1_0(const hal::V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_1(const V1_0::Model& model);
-bool compliantWithV1_1(const V1_1::Model& model);
-bool compliantWithV1_1(const V1_2::Model& model,
+bool compliantWithV1_1(const hal::V1_0::Model& model);
+bool compliantWithV1_1(const hal::V1_1::Model& model);
+bool compliantWithV1_1(const hal::V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities);
-V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities);
-V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities);
-V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities);
-V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities);
-V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities);
-V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities);
-V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities);
-V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities);
+hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities);
+hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities);
+hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities);
+hal::V1_1::Capabilities convertToV1_1(const hal::V1_0::Capabilities& capabilities);
+hal::V1_1::Capabilities convertToV1_1(const hal::V1_1::Capabilities& capabilities);
+hal::V1_1::Capabilities convertToV1_1(const hal::V1_2::Capabilities& capabilities);
+hal::V1_2::Capabilities convertToV1_2(const hal::V1_0::Capabilities& capabilities);
+hal::V1_2::Capabilities convertToV1_2(const hal::V1_1::Capabilities& capabilities);
+hal::V1_2::Capabilities convertToV1_2(const hal::V1_2::Capabilities& capabilities);
-V1_0::Model convertToV1_0(const V1_0::Model& model);
-V1_0::Model convertToV1_0(const V1_1::Model& model);
-V1_0::Model convertToV1_0(const V1_2::Model& model);
-V1_1::Model convertToV1_1(const V1_0::Model& model);
-V1_1::Model convertToV1_1(const V1_1::Model& model);
-V1_1::Model convertToV1_1(const V1_2::Model& model);
-V1_2::Model convertToV1_2(const V1_0::Model& model);
-V1_2::Model convertToV1_2(const V1_1::Model& model);
-V1_2::Model convertToV1_2(const V1_2::Model& model);
+hal::V1_0::Model convertToV1_0(const hal::V1_0::Model& model);
+hal::V1_0::Model convertToV1_0(const hal::V1_1::Model& model);
+hal::V1_0::Model convertToV1_0(const hal::V1_2::Model& model);
+hal::V1_1::Model convertToV1_1(const hal::V1_0::Model& model);
+hal::V1_1::Model convertToV1_1(const hal::V1_1::Model& model);
+hal::V1_1::Model convertToV1_1(const hal::V1_2::Model& model);
+hal::V1_2::Model convertToV1_2(const hal::V1_0::Model& model);
+hal::V1_2::Model convertToV1_2(const hal::V1_1::Model& model);
+hal::V1_2::Model convertToV1_2(const hal::V1_2::Model& model);
// The IModelSlicer abstract class provides methods to create from an original
// model a "slice" of that model consisting of the subset of operations that is
@@ -381,24 +385,24 @@
//
class IModelSlicer {
public:
- virtual std::optional<std::pair<V1_0::Model, std::function<uint32_t(uint32_t)>>>
+ virtual std::optional<std::pair<hal::V1_0::Model, std::function<uint32_t(uint32_t)>>>
getSliceV1_0() = 0;
- virtual std::optional<std::pair<V1_1::Model, std::function<uint32_t(uint32_t)>>>
+ virtual std::optional<std::pair<hal::V1_1::Model, std::function<uint32_t(uint32_t)>>>
getSliceV1_1() = 0;
virtual ~IModelSlicer() = default;
};
-V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type);
-V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type);
+hal::V1_0::OperationType uncheckedConvertToV1_0(hal::V1_2::OperationType type);
+hal::V1_1::OperationType uncheckedConvertToV1_1(hal::V1_2::OperationType type);
-V1_0::Operand convertToV1_0(const V1_2::Operand& operand);
+hal::V1_0::Operand convertToV1_0(const hal::V1_2::Operand& operand);
-V1_2::Operand convertToV1_2(const V1_0::Operand& operand);
-V1_2::Operand convertToV1_2(const V1_2::Operand& operand);
+hal::V1_2::Operand convertToV1_2(const hal::V1_0::Operand& operand);
+hal::V1_2::Operand convertToV1_2(const hal::V1_2::Operand& operand);
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands);
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands);
+hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_0::Operand>& operands);
+hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_2::Operand>& operands);
#ifdef NN_DEBUGGABLE
uint32_t getProp(const char* str, uint32_t defaultValue = 0);
diff --git a/nn/common/include/ValidateHal.h b/nn/common/include/ValidateHal.h
index 3bb641c..30e79ac 100644
--- a/nn/common/include/ValidateHal.h
+++ b/nn/common/include/ValidateHal.h
@@ -44,20 +44,20 @@
// are correctly defined, as these are specific to each implementation.
// Each driver should do their own validation of OEM types.
template <class T_Model>
-bool validateRequest(const Request& request, const T_Model& model);
+bool validateRequest(const hal::Request& request, const T_Model& model);
// Verfies that the execution preference is valid.
-bool validateExecutionPreference(ExecutionPreference preference);
+bool validateExecutionPreference(hal::ExecutionPreference preference);
-bool validOperationType(V1_0::OperationType operation);
-bool validOperationType(V1_1::OperationType operation);
-bool validOperationType(V1_2::OperationType operation);
+bool validOperationType(hal::V1_0::OperationType operation);
+bool validOperationType(hal::V1_1::OperationType operation);
+bool validOperationType(hal::V1_2::OperationType operation);
-bool validOperandType(V1_0::OperandType operand);
-bool validOperandType(V1_2::OperandType operand);
+bool validOperandType(hal::V1_0::OperandType operand);
+bool validOperandType(hal::V1_2::OperandType operand);
// Verfies that the memory pool is valid in the specified HAL version.
-bool validatePool(const hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
+bool validatePool(const hal::hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
} // namespace nn
} // namespace android
diff --git a/nn/common/operations/Activation.cpp b/nn/common/operations/Activation.cpp
index 31ce03c..f320d99 100644
--- a/nn/common/operations/Activation.cpp
+++ b/nn/common/operations/Activation.cpp
@@ -18,6 +18,7 @@
#include "ActivationFunctor.h"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -28,6 +29,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
namespace activation {
constexpr uint32_t kNumInputs = 1;
diff --git a/nn/common/operations/ArgMinMax.cpp b/nn/common/operations/ArgMinMax.cpp
index b323d51..64d4606 100644
--- a/nn/common/operations/ArgMinMax.cpp
+++ b/nn/common/operations/ArgMinMax.cpp
@@ -18,14 +18,17 @@
#define LOG_TAG "Operations"
-#include "Operations.h"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
+#include "Operations.h"
#include "Tracing.h"
namespace android {
namespace nn {
+using namespace hal;
+
template <typename In, typename Out>
static void argMinMaxImpl(const In* inputData, const Shape& inputShape,
int32_t axis, bool isArgMin,
diff --git a/nn/common/operations/BidirectionalSequenceLSTM.cpp b/nn/common/operations/BidirectionalSequenceLSTM.cpp
index c7391da..a186b72 100644
--- a/nn/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/nn/common/operations/BidirectionalSequenceLSTM.cpp
@@ -30,6 +30,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/nn/common/operations/BidirectionalSequenceLSTM.h b/nn/common/operations/BidirectionalSequenceLSTM.h
index 06c9b03..df0a5be 100644
--- a/nn/common/operations/BidirectionalSequenceLSTM.h
+++ b/nn/common/operations/BidirectionalSequenceLSTM.h
@@ -33,11 +33,12 @@
class BidirectionalSequenceLSTM {
public:
- BidirectionalSequenceLSTM(const Operation& operation,
+ BidirectionalSequenceLSTM(const hardware::neuralnetworks::V1_2::Operation& operation,
std::vector<RunTimeOperandInfo>& operands);
- bool Prepare(const Operation& operation, std::vector<RunTimeOperandInfo>& operands,
- Shape* fwOutputShape, Shape* bwOutputShape);
+ bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands, Shape* fwOutputShape,
+ Shape* bwOutputShape);
bool Eval();
// Input Tensors of size {max_time, n_batch, n_input}
diff --git a/nn/common/operations/BidirectionalSequenceRNN.cpp b/nn/common/operations/BidirectionalSequenceRNN.cpp
index 32ab00f..1e37f16 100644
--- a/nn/common/operations/BidirectionalSequenceRNN.cpp
+++ b/nn/common/operations/BidirectionalSequenceRNN.cpp
@@ -16,6 +16,7 @@
#define LOG_TAG "Operations"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
@@ -49,6 +50,8 @@
namespace {
+using namespace hal;
+
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
diff --git a/nn/common/operations/Broadcast.cpp b/nn/common/operations/Broadcast.cpp
index 9ff6ae8..1323254 100644
--- a/nn/common/operations/Broadcast.cpp
+++ b/nn/common/operations/Broadcast.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -30,6 +31,9 @@
namespace android {
namespace nn {
+
+using namespace hal;
+
namespace broadcast {
constexpr uint32_t kNumInputs = 3;
diff --git a/nn/common/operations/Cast.cpp b/nn/common/operations/Cast.cpp
index f569767..b14c8b0 100644
--- a/nn/common/operations/Cast.cpp
+++ b/nn/common/operations/Cast.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "Cast.h"
+#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
@@ -25,6 +26,8 @@
namespace {
+using namespace hal;
+
template <typename FromT, typename ToT>
void copyCast(const FromT* in, ToT* out, int numElements) {
std::transform(in, in + numElements, out, [](FromT a) -> ToT {
diff --git a/nn/common/operations/ChannelShuffle.cpp b/nn/common/operations/ChannelShuffle.cpp
index 997b033..65f45ed 100644
--- a/nn/common/operations/ChannelShuffle.cpp
+++ b/nn/common/operations/ChannelShuffle.cpp
@@ -25,6 +25,8 @@
namespace nn {
namespace channel_shuffle {
+using namespace hal;
+
constexpr char kOperationName[] = "CHANNEL_SHUFFLE";
constexpr uint32_t kNumInputs = 3;
diff --git a/nn/common/operations/Comparisons.cpp b/nn/common/operations/Comparisons.cpp
index 1820dde..0ccecaf 100644
--- a/nn/common/operations/Comparisons.cpp
+++ b/nn/common/operations/Comparisons.cpp
@@ -34,6 +34,8 @@
namespace {
+using namespace hal;
+
template <typename DataType, typename ComparisonType>
bool compute(const std::function<bool(ComparisonType, ComparisonType)>& func, const DataType* aData,
const Shape& aShape, const DataType* bData, const Shape& bShape, bool8* outputData,
diff --git a/nn/common/operations/Concatenation.cpp b/nn/common/operations/Concatenation.cpp
index 04bb2e2..e8b332b 100644
--- a/nn/common/operations/Concatenation.cpp
+++ b/nn/common/operations/Concatenation.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -35,6 +36,8 @@
namespace {
+using namespace hal;
+
template <typename T>
bool concatenation(const std::vector<const T*>& inputDataPtrs,
const std::vector<Shape>& inputShapes, int32_t axis, T* outputData,
diff --git a/nn/common/operations/Conv2D.cpp b/nn/common/operations/Conv2D.cpp
index 0debed2..678e2d6 100644
--- a/nn/common/operations/Conv2D.cpp
+++ b/nn/common/operations/Conv2D.cpp
@@ -17,10 +17,11 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
-#include "Utils.h"
#include "Tracing.h"
+#include "Utils.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -39,6 +40,8 @@
namespace {
+using namespace hal;
+
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
diff --git a/nn/common/operations/Dequantize.cpp b/nn/common/operations/Dequantize.cpp
index 51403d2..85c071d 100644
--- a/nn/common/operations/Dequantize.cpp
+++ b/nn/common/operations/Dequantize.cpp
@@ -33,6 +33,8 @@
namespace {
+using namespace hal;
+
template <typename InputType, typename OutputType>
bool compute(const InputType* inputData, const Shape& inputShape, OutputType* outputData) {
const int numElements = getNumberOfElements(inputShape);
diff --git a/nn/common/operations/Elementwise.cpp b/nn/common/operations/Elementwise.cpp
index 114007d..3610cc0 100644
--- a/nn/common/operations/Elementwise.cpp
+++ b/nn/common/operations/Elementwise.cpp
@@ -35,6 +35,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline bool compute(float func(float), const T* input, const Shape& shape, T* output) {
const auto size = getNumberOfElements(shape);
diff --git a/nn/common/operations/EmbeddingLookup.cpp b/nn/common/operations/EmbeddingLookup.cpp
index d705d95..f3b2911 100644
--- a/nn/common/operations/EmbeddingLookup.cpp
+++ b/nn/common/operations/EmbeddingLookup.cpp
@@ -19,6 +19,7 @@
#include "EmbeddingLookup.h"
#include "CpuExecutor.h"
+#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -26,6 +27,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
EmbeddingLookup::EmbeddingLookup(const Operation& operation,
std::vector<RunTimeOperandInfo>& operands) {
value_ = GetInput(operation, operands, kValueTensor);
diff --git a/nn/common/operations/EmbeddingLookup.h b/nn/common/operations/EmbeddingLookup.h
index cc7cecc..ab3c38c 100644
--- a/nn/common/operations/EmbeddingLookup.h
+++ b/nn/common/operations/EmbeddingLookup.h
@@ -28,9 +28,8 @@
class EmbeddingLookup {
public:
- EmbeddingLookup(
- const Operation &operation,
- std::vector<RunTimeOperandInfo> &operands);
+ EmbeddingLookup(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands);
bool Eval();
diff --git a/nn/common/operations/FullyConnected.cpp b/nn/common/operations/FullyConnected.cpp
index 509a70e..3dc7478 100644
--- a/nn/common/operations/FullyConnected.cpp
+++ b/nn/common/operations/FullyConnected.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -41,6 +42,8 @@
namespace {
+using namespace hal;
+
// executionMutex is used to protect concurrent access of non-threadsafe resources
// like gemmlowp::GemmContext.
// std::mutex is safe for pthreads on Android.
diff --git a/nn/common/operations/Gather.cpp b/nn/common/operations/Gather.cpp
index e41947c..53d0671 100644
--- a/nn/common/operations/Gather.cpp
+++ b/nn/common/operations/Gather.cpp
@@ -37,6 +37,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline bool eval(const T* inputData, const Shape& inputShape, int32_t axis,
const int32_t* indicesData, const Shape& indicesShape, T* outputData) {
diff --git a/nn/common/operations/GenerateProposals.cpp b/nn/common/operations/GenerateProposals.cpp
index fd28a74..67d614f 100644
--- a/nn/common/operations/GenerateProposals.cpp
+++ b/nn/common/operations/GenerateProposals.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -32,6 +33,8 @@
namespace {
+using namespace hal;
+
struct BoxEncodingCorner {
float x1, y1, x2, y2;
};
diff --git a/nn/common/operations/HalOperation.h b/nn/common/operations/HalOperation.h
index f74fce6..d4011e3 100644
--- a/nn/common/operations/HalOperation.h
+++ b/nn/common/operations/HalOperation.h
@@ -17,21 +17,13 @@
#ifndef FRAMEWORKS_ML_COMMON_OPERATIONS_HAL_OPERATION_H
#define FRAMEWORKS_ML_COMMON_OPERATIONS_HAL_OPERATION_H
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
+namespace android::hardware::neuralnetworks::V1_2 {
// Individual operation implementations should not depend on the HAL interface,
// but we have some that do. We use a forward declaration instead of an explicit
// blueprint dependency to hide this fact.
struct Operation;
-} // namespace V1_2
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
-
-using ::android::hardware::neuralnetworks::V1_2::Operation;
+} // namespace android::hardware::neuralnetworks::V1_2
#endif // FRAMEWORKS_ML_COMMON_OPERATIONS_HAL_OPERATION_H
diff --git a/nn/common/operations/HashtableLookup.cpp b/nn/common/operations/HashtableLookup.cpp
index d718e8c..67cdffd 100644
--- a/nn/common/operations/HashtableLookup.cpp
+++ b/nn/common/operations/HashtableLookup.cpp
@@ -19,6 +19,7 @@
#include "HashtableLookup.h"
#include "CpuExecutor.h"
+#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -28,6 +29,8 @@
namespace {
+using namespace hal;
+
int greater(const void* a, const void* b) {
return *static_cast<const int*>(a) - *static_cast<const int*>(b);
}
diff --git a/nn/common/operations/HashtableLookup.h b/nn/common/operations/HashtableLookup.h
index 3f84905..8e12929 100644
--- a/nn/common/operations/HashtableLookup.h
+++ b/nn/common/operations/HashtableLookup.h
@@ -28,9 +28,8 @@
class HashtableLookup {
public:
- HashtableLookup(
- const Operation &operation,
- std::vector<RunTimeOperandInfo> &operands);
+ HashtableLookup(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands);
bool Eval();
diff --git a/nn/common/operations/HeatmapMaxKeypoint.cpp b/nn/common/operations/HeatmapMaxKeypoint.cpp
index 65a4afa..14fda45 100644
--- a/nn/common/operations/HeatmapMaxKeypoint.cpp
+++ b/nn/common/operations/HeatmapMaxKeypoint.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -42,6 +43,8 @@
namespace {
+using namespace hal;
+
// This function uses Taylor expansion up to the quatratic term to approximate bicubic
// upscaling result.
// 2nd order Taylor expansion: D(x) = D - b'x + 1/2 * x'Ax
diff --git a/nn/common/operations/InstanceNormalization.cpp b/nn/common/operations/InstanceNormalization.cpp
index c61da09..19d1c5f 100644
--- a/nn/common/operations/InstanceNormalization.cpp
+++ b/nn/common/operations/InstanceNormalization.cpp
@@ -42,6 +42,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline bool instanceNormNhwc(const T* inputData, const Shape& inputShape, T gamma, T beta,
T epsilon, T* outputData, const Shape& outputShape) {
diff --git a/nn/common/operations/L2Normalization.cpp b/nn/common/operations/L2Normalization.cpp
index 566dace..92cabc7 100644
--- a/nn/common/operations/L2Normalization.cpp
+++ b/nn/common/operations/L2Normalization.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
@@ -38,6 +39,8 @@
namespace {
+using namespace hal;
+
inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, int32_t axis,
float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("l2normFloat32");
diff --git a/nn/common/operations/LSHProjection.cpp b/nn/common/operations/LSHProjection.cpp
index 742e887..3735717 100644
--- a/nn/common/operations/LSHProjection.cpp
+++ b/nn/common/operations/LSHProjection.cpp
@@ -19,6 +19,7 @@
#include "LSHProjection.h"
#include "CpuExecutor.h"
+#include "HalInterfaces.h"
#include "Tracing.h"
#include "Utils.h"
@@ -27,6 +28,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
LSHProjection::LSHProjection(const Operation& operation,
std::vector<RunTimeOperandInfo>& operands) {
input_ = GetInput(operation, operands, kInputTensor);
diff --git a/nn/common/operations/LSHProjection.h b/nn/common/operations/LSHProjection.h
index 2ebf916..9155aaa 100644
--- a/nn/common/operations/LSHProjection.h
+++ b/nn/common/operations/LSHProjection.h
@@ -36,10 +36,11 @@
class LSHProjection {
public:
- LSHProjection(const Operation& operation, std::vector<RunTimeOperandInfo>& operands);
+ LSHProjection(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands);
- static bool Prepare(const Operation& operation, std::vector<RunTimeOperandInfo>& operands,
- Shape* outputShape);
+ static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands, Shape* outputShape);
template <typename T>
bool Eval();
diff --git a/nn/common/operations/LSTM.cpp b/nn/common/operations/LSTM.cpp
index a772274..3dba298 100644
--- a/nn/common/operations/LSTM.cpp
+++ b/nn/common/operations/LSTM.cpp
@@ -31,6 +31,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/nn/common/operations/LSTM.h b/nn/common/operations/LSTM.h
index 606038f..9672d00 100644
--- a/nn/common/operations/LSTM.h
+++ b/nn/common/operations/LSTM.h
@@ -45,11 +45,12 @@
class LSTMCell {
public:
- LSTMCell(const Operation& operation, std::vector<RunTimeOperandInfo>& operands);
+ LSTMCell(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands);
- bool Prepare(const Operation& operation, std::vector<RunTimeOperandInfo>& operands,
- Shape* scratchShape, Shape* outputStateShape, Shape* cellStateShape,
- Shape* outputShape);
+ bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands, Shape* scratchShape,
+ Shape* outputStateShape, Shape* cellStateShape, Shape* outputShape);
bool Eval();
// Input Tensors of size {n_batch, n_input}
diff --git a/nn/common/operations/LogSoftmax.cpp b/nn/common/operations/LogSoftmax.cpp
index d0a5cb9..4132ef9 100644
--- a/nn/common/operations/LogSoftmax.cpp
+++ b/nn/common/operations/LogSoftmax.cpp
@@ -27,6 +27,8 @@
namespace nn {
namespace log_softmax {
+using namespace hal;
+
constexpr char kOperationName[] = "LOG_SOFTMAX";
constexpr uint32_t kNumInputs = 3;
diff --git a/nn/common/operations/LogicalAndOr.cpp b/nn/common/operations/LogicalAndOr.cpp
index 9d4968d..6ada724 100644
--- a/nn/common/operations/LogicalAndOr.cpp
+++ b/nn/common/operations/LogicalAndOr.cpp
@@ -34,6 +34,8 @@
namespace {
+using namespace hal;
+
bool compute(const std::function<bool(bool, bool)>& func, const bool8* aData, const Shape& aShape,
const bool8* bData, const Shape& bShape, bool8* outputData, const Shape& outputShape) {
IndexedShapeWrapper aShapeIndexed(aShape);
diff --git a/nn/common/operations/LogicalNot.cpp b/nn/common/operations/LogicalNot.cpp
index c715388..8b41813 100644
--- a/nn/common/operations/LogicalNot.cpp
+++ b/nn/common/operations/LogicalNot.cpp
@@ -16,6 +16,7 @@
#define LOG_TAG "Operations"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -31,6 +32,8 @@
namespace {
+using namespace hal;
+
bool compute(const bool8* input, const Shape& shape, bool8* output) {
const auto size = getNumberOfElements(shape);
for (uint32_t i = 0; i < size; ++i) {
diff --git a/nn/common/operations/MaximumMinimum.cpp b/nn/common/operations/MaximumMinimum.cpp
index d56e311..bd2c11d 100644
--- a/nn/common/operations/MaximumMinimum.cpp
+++ b/nn/common/operations/MaximumMinimum.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "MaximumMinimum.h"
+#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -27,6 +28,8 @@
namespace {
+using namespace hal;
+
template <typename T>
bool evalGeneric(const T* aData, const Shape& aShape, const T* bData, const Shape& bShape,
bool isMinimum, T* outputData, const Shape& outputShape) {
diff --git a/nn/common/operations/Multinomial.cpp b/nn/common/operations/Multinomial.cpp
index 797063d..9ccb389 100644
--- a/nn/common/operations/Multinomial.cpp
+++ b/nn/common/operations/Multinomial.cpp
@@ -34,6 +34,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/nn/common/operations/Multinomial.h b/nn/common/operations/Multinomial.h
index c12f5b3..1cb7382 100644
--- a/nn/common/operations/Multinomial.h
+++ b/nn/common/operations/Multinomial.h
@@ -32,7 +32,7 @@
class Multinomial {
public:
- Multinomial(const android::hardware::neuralnetworks::V1_2::Operation& operation,
+ Multinomial(const hardware::neuralnetworks::V1_2::Operation& operation,
std::vector<RunTimeOperandInfo>& operands);
static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
diff --git a/nn/common/operations/Neg.cpp b/nn/common/operations/Neg.cpp
index 7c61028..48d962c 100644
--- a/nn/common/operations/Neg.cpp
+++ b/nn/common/operations/Neg.cpp
@@ -37,6 +37,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline bool compute(const T* input, const Shape& shape, T* output) {
const auto size = getNumberOfElements(shape);
diff --git a/nn/common/operations/PRelu.cpp b/nn/common/operations/PRelu.cpp
index cd7f081..98491f6 100644
--- a/nn/common/operations/PRelu.cpp
+++ b/nn/common/operations/PRelu.cpp
@@ -28,6 +28,8 @@
namespace nn {
namespace prelu {
+using namespace hal;
+
constexpr char kOperationName[] = "PRELU";
constexpr uint32_t kNumInputs = 2;
diff --git a/nn/common/operations/Pooling.cpp b/nn/common/operations/Pooling.cpp
index 3e3b781..a32be7c 100644
--- a/nn/common/operations/Pooling.cpp
+++ b/nn/common/operations/Pooling.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
@@ -25,6 +26,9 @@
namespace android {
namespace nn {
+
+using namespace hal;
+
namespace pooling {
constexpr uint32_t kInputTensor = 0;
diff --git a/nn/common/operations/Pow.cpp b/nn/common/operations/Pow.cpp
index 99e1099..40c4adf 100644
--- a/nn/common/operations/Pow.cpp
+++ b/nn/common/operations/Pow.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "Pow.h"
+#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
@@ -28,6 +29,8 @@
namespace {
+using namespace hal;
+
template <typename T>
bool evalGeneric(const T* baseData, const Shape& baseShape, const T* exponentData,
const Shape& exponentShape, T* outputData, const Shape& outputShape) {
diff --git a/nn/common/operations/Quantize.cpp b/nn/common/operations/Quantize.cpp
index b2ec9e3..368492a 100644
--- a/nn/common/operations/Quantize.cpp
+++ b/nn/common/operations/Quantize.cpp
@@ -36,6 +36,8 @@
namespace {
+using namespace hal;
+
bool quantizeFloat32ToQuant8(const float* inputData, uint8_t* outputData,
const Shape& outputShape) {
NNTRACE_COMP("quantizeFloat32ToQuant8");
diff --git a/nn/common/operations/QuantizedLSTM.cpp b/nn/common/operations/QuantizedLSTM.cpp
index 62060f9..7b9be03 100644
--- a/nn/common/operations/QuantizedLSTM.cpp
+++ b/nn/common/operations/QuantizedLSTM.cpp
@@ -20,6 +20,7 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "Tracing.h"
@@ -31,6 +32,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/nn/common/operations/QuantizedLSTM.h b/nn/common/operations/QuantizedLSTM.h
index 9ea479f..5131416 100644
--- a/nn/common/operations/QuantizedLSTM.h
+++ b/nn/common/operations/QuantizedLSTM.h
@@ -13,10 +13,10 @@
class QuantizedLSTMCell {
public:
- QuantizedLSTMCell(const android::hardware::neuralnetworks::V1_2::Operation& operation,
+ QuantizedLSTMCell(const hardware::neuralnetworks::V1_2::Operation& operation,
std::vector<RunTimeOperandInfo>& operands);
- static bool prepare(const android::hardware::neuralnetworks::V1_2::Operation& operation,
+ static bool prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
std::vector<RunTimeOperandInfo>& operands, Shape* cellStateShape,
Shape* outputShape);
bool eval();
diff --git a/nn/common/operations/RNN.cpp b/nn/common/operations/RNN.cpp
index 2394961..dcb5928 100644
--- a/nn/common/operations/RNN.cpp
+++ b/nn/common/operations/RNN.cpp
@@ -27,6 +27,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
RNN::RNN(const Operation& operation,
std::vector<RunTimeOperandInfo>& operands) {
NNTRACE_TRANS("RNN::RNN");
diff --git a/nn/common/operations/RNN.h b/nn/common/operations/RNN.h
index b09f185..c0dbcc5 100644
--- a/nn/common/operations/RNN.h
+++ b/nn/common/operations/RNN.h
@@ -28,10 +28,12 @@
class RNN {
public:
- RNN(const Operation& operation, std::vector<RunTimeOperandInfo>& operands);
+ RNN(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands);
- static bool Prepare(const Operation& operation, std::vector<RunTimeOperandInfo>& operands,
- Shape* hiddenStateShape, Shape* outputShape);
+ static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands, Shape* hiddenStateShape,
+ Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/nn/common/operations/Reduce.cpp b/nn/common/operations/Reduce.cpp
index faa205e..3aa530c 100644
--- a/nn/common/operations/Reduce.cpp
+++ b/nn/common/operations/Reduce.cpp
@@ -42,6 +42,8 @@
namespace {
+using namespace hal;
+
template <typename T>
inline bool compute(IOperationExecutionContext* context, T init, T func(T, T)) {
const Shape inputShape = context->getInputShape(kInputTensor);
diff --git a/nn/common/operations/ResizeImageOps.cpp b/nn/common/operations/ResizeImageOps.cpp
index 453bf84..34ad208 100644
--- a/nn/common/operations/ResizeImageOps.cpp
+++ b/nn/common/operations/ResizeImageOps.cpp
@@ -29,6 +29,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
namespace resize_image {
constexpr uint32_t kNumInputs = 4;
diff --git a/nn/common/operations/RoiAlign.cpp b/nn/common/operations/RoiAlign.cpp
index ddf7b8e..231f357 100644
--- a/nn/common/operations/RoiAlign.cpp
+++ b/nn/common/operations/RoiAlign.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -48,6 +49,8 @@
namespace {
+using namespace hal;
+
template <typename T_Input, typename T_Roi>
inline bool roiAlignNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/nn/common/operations/RoiPooling.cpp b/nn/common/operations/RoiPooling.cpp
index bfcf78b..3c87b91 100644
--- a/nn/common/operations/RoiPooling.cpp
+++ b/nn/common/operations/RoiPooling.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -46,6 +47,8 @@
namespace {
+using namespace hal;
+
template <typename T_Input, typename T_Roi>
inline bool roiPoolingNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/nn/common/operations/SVDF.cpp b/nn/common/operations/SVDF.cpp
index 669e92e..844361d 100644
--- a/nn/common/operations/SVDF.cpp
+++ b/nn/common/operations/SVDF.cpp
@@ -27,6 +27,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
SVDF::SVDF(const Operation& operation,
std::vector<RunTimeOperandInfo>& operands) {
NNTRACE_TRANS("SVDF::SVDF");
diff --git a/nn/common/operations/SVDF.h b/nn/common/operations/SVDF.h
index e7f8d47..257fd11 100644
--- a/nn/common/operations/SVDF.h
+++ b/nn/common/operations/SVDF.h
@@ -36,10 +36,12 @@
class SVDF {
public:
- SVDF(const Operation& operation, std::vector<RunTimeOperandInfo>& operands);
+ SVDF(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands);
- static bool Prepare(const Operation& operation, std::vector<RunTimeOperandInfo>& operands,
- Shape* stateShape, Shape* outputShape);
+ static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
+ std::vector<RunTimeOperandInfo>& operands, Shape* stateShape,
+ Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/nn/common/operations/Select.cpp b/nn/common/operations/Select.cpp
index 2a26c1e..e9b6645 100644
--- a/nn/common/operations/Select.cpp
+++ b/nn/common/operations/Select.cpp
@@ -35,6 +35,8 @@
namespace {
+using namespace hal;
+
template <typename T>
bool compute(const bool8* conditionData, const Shape& conditionShape, const T* aData,
const Shape& aShape, const T* bData, const Shape& bShape, T* outputData,
diff --git a/nn/common/operations/Slice.cpp b/nn/common/operations/Slice.cpp
index 4ecfcb5..fc28f7d 100644
--- a/nn/common/operations/Slice.cpp
+++ b/nn/common/operations/Slice.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
@@ -36,6 +37,8 @@
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
+using namespace hal;
+
namespace {
template <typename T>
diff --git a/nn/common/operations/Softmax.cpp b/nn/common/operations/Softmax.cpp
index 209f60e..a93fc86 100644
--- a/nn/common/operations/Softmax.cpp
+++ b/nn/common/operations/Softmax.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -41,6 +42,8 @@
namespace {
+using namespace hal;
+
inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta,
int32_t axis, float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("softmaxFloatSlow32");
diff --git a/nn/common/operations/StridedSlice.cpp b/nn/common/operations/StridedSlice.cpp
index c8fba2d..0310567 100644
--- a/nn/common/operations/StridedSlice.cpp
+++ b/nn/common/operations/StridedSlice.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "Operations.h"
#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
@@ -28,6 +29,8 @@
namespace android {
namespace nn {
+using namespace hal;
+
bool stridedSliceGeneric(const uint8_t* inputData, const Shape& inputShape,
const int32_t* beginData, const int32_t* endData,
const int32_t* stridesData, int32_t beginMask, int32_t endMask,
diff --git a/nn/common/operations/Tile.cpp b/nn/common/operations/Tile.cpp
index e5f4a61..380f5ac 100644
--- a/nn/common/operations/Tile.cpp
+++ b/nn/common/operations/Tile.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "Tile.h"
+#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
@@ -25,6 +26,8 @@
namespace {
+using namespace hal;
+
template <typename T>
void CopyMultipleTimes(const T* in_data, int32_t in_size, int32_t multiplier, T* out_data) {
for (int i = 0; i < multiplier; ++i) {
diff --git a/nn/common/operations/TopK_V2.cpp b/nn/common/operations/TopK_V2.cpp
index 5d377c0..010d380 100644
--- a/nn/common/operations/TopK_V2.cpp
+++ b/nn/common/operations/TopK_V2.cpp
@@ -18,6 +18,7 @@
#include "TopK_V2.h"
+#include "HalInterfaces.h"
#include "OperationsUtils.h"
#include <algorithm>
@@ -28,6 +29,8 @@
namespace {
+using namespace hal;
+
template <typename T>
bool evalGeneric(const T* inputData, const Shape& inputShape, const int32_t k, T* valuesData,
const Shape& /*valuesShape*/, int32_t* indicesData,
diff --git a/nn/common/operations/Transpose.cpp b/nn/common/operations/Transpose.cpp
index e72eb10..8ae0ccd 100644
--- a/nn/common/operations/Transpose.cpp
+++ b/nn/common/operations/Transpose.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -39,6 +40,8 @@
namespace {
+using namespace hal;
+
template <typename T>
bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm,
const Shape& permShape, T* outputData, const Shape& outputShape) {
diff --git a/nn/common/operations/TransposeConv2D.cpp b/nn/common/operations/TransposeConv2D.cpp
index e6ec1a6..42d6cf4 100644
--- a/nn/common/operations/TransposeConv2D.cpp
+++ b/nn/common/operations/TransposeConv2D.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -39,6 +40,8 @@
namespace {
+using namespace hal;
+
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
diff --git a/nn/common/operations/UnidirectionalSequenceLSTM.cpp b/nn/common/operations/UnidirectionalSequenceLSTM.cpp
index f321169..abed464 100644
--- a/nn/common/operations/UnidirectionalSequenceLSTM.cpp
+++ b/nn/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -83,6 +83,8 @@
namespace {
+using namespace hal;
+
inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor) {
return context->getInputBuffer(tensor) != nullptr;
}
diff --git a/nn/common/operations/UnidirectionalSequenceRNN.cpp b/nn/common/operations/UnidirectionalSequenceRNN.cpp
index cf5c620..5747907 100644
--- a/nn/common/operations/UnidirectionalSequenceRNN.cpp
+++ b/nn/common/operations/UnidirectionalSequenceRNN.cpp
@@ -16,6 +16,7 @@
#define LOG_TAG "Operations"
+#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
@@ -37,6 +38,8 @@
namespace {
+using namespace hal;
+
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);