Move setRequestArgumentArray to ModelArgumentInfo
This CL also renames setRequestArgumentArray to createRequestArguments
and makes the function return the hidl_vec by-value.
Bug: N/A
Test: mma
Change-Id: I4f4b222bfa25d7a8bee2bad80425e0192d0aec09
diff --git a/nn/runtime/Manager.cpp b/nn/runtime/Manager.cpp
index d23508f..164c9b6 100644
--- a/nn/runtime/Manager.cpp
+++ b/nn/runtime/Manager.cpp
@@ -219,26 +219,6 @@
return {ANEURALNETWORKS_NO_ERROR, std::make_shared<DriverPreparedModel>(preparedModel)};
}
-// Convert ModelArgumentInfo to HIDL RequestArgument. For pointer arguments, use the location
-// information in ptrArgsLocations.
-static void setRequestArgumentArray(const std::vector<ModelArgumentInfo>& argumentInfos,
- const std::vector<DataLocation>& ptrArgsLocations,
- hidl_vec<RequestArgument>* ioInfos) {
- size_t count = argumentInfos.size();
- ioInfos->resize(count);
- uint32_t ptrArgsIndex = 0;
- for (size_t i = 0; i < count; i++) {
- const auto& info = argumentInfos[i];
- (*ioInfos)[i] = {
- .hasNoValue = info.state == ModelArgumentInfo::HAS_NO_VALUE,
- .location = info.state == ModelArgumentInfo::POINTER
- ? ptrArgsLocations[ptrArgsIndex++]
- : info.locationAndLength,
- .dimensions = info.dimensions,
- };
- }
-}
-
// Figures out how to place each of the input or outputs in a buffer. This just
// does the layout and memory allocation, it does not copy data. Aligns each
// input a bit.
@@ -324,8 +304,8 @@
}
Request request;
- setRequestArgumentArray(inputs, inputPtrArgsLocations, &request.inputs);
- setRequestArgumentArray(outputs, outputPtrArgsLocations, &request.outputs);
+ request.inputs = createRequestArguments(inputs, inputPtrArgsLocations);
+ request.outputs = createRequestArguments(outputs, outputPtrArgsLocations);
uint32_t count = localMemories.size();
request.pools.resize(count);
for (uint32_t i = 0; i < count; i++) {
@@ -551,8 +531,8 @@
const std::vector<DataLocation> outputPtrArgsLocations = fixPointerArguments(outputs);
Request request;
- setRequestArgumentArray(inputs, inputPtrArgsLocations, &request.inputs);
- setRequestArgumentArray(outputs, outputPtrArgsLocations, &request.outputs);
+ request.inputs = createRequestArguments(inputs, inputPtrArgsLocations);
+ request.outputs = createRequestArguments(outputs, outputPtrArgsLocations);
if (!DeviceManager::get()->syncExecCpu()) {
// TODO: use a thread pool
diff --git a/nn/runtime/ModelArgumentInfo.cpp b/nn/runtime/ModelArgumentInfo.cpp
index 52935d1..fa6b709 100644
--- a/nn/runtime/ModelArgumentInfo.cpp
+++ b/nn/runtime/ModelArgumentInfo.cpp
@@ -111,5 +111,24 @@
return ANEURALNETWORKS_NO_ERROR;
}
+hidl_vec<RequestArgument> createRequestArguments(
+ const std::vector<ModelArgumentInfo>& argumentInfos,
+ const std::vector<DataLocation>& ptrArgsLocations) {
+ const size_t count = argumentInfos.size();
+ hidl_vec<RequestArgument> ioInfos(count);
+ uint32_t ptrArgsIndex = 0;
+ for (size_t i = 0; i < count; i++) {
+ const auto& info = argumentInfos[i];
+ ioInfos[i] = {
+ .hasNoValue = info.state == ModelArgumentInfo::HAS_NO_VALUE,
+ .location = info.state == ModelArgumentInfo::POINTER
+ ? ptrArgsLocations[ptrArgsIndex++]
+ : info.locationAndLength,
+ .dimensions = info.dimensions,
+ };
+ }
+ return ioInfos;
+}
+
} // namespace nn
} // namespace android
diff --git a/nn/runtime/ModelArgumentInfo.h b/nn/runtime/ModelArgumentInfo.h
index 6840324..dd42c32 100644
--- a/nn/runtime/ModelArgumentInfo.h
+++ b/nn/runtime/ModelArgumentInfo.h
@@ -51,7 +51,13 @@
int updateDimensionInfo(const hal::Operand& operand, const ANeuralNetworksOperandType* newType);
};
+// Convert ModelArgumentInfo to HIDL RequestArgument. For pointer arguments, use the location
+// information in ptrArgsLocations.
+hal::hidl_vec<hal::RequestArgument> createRequestArguments(
+ const std::vector<ModelArgumentInfo>& argumentInfos,
+ const std::vector<hal::DataLocation>& ptrArgsLocations);
+
} // namespace nn
} // namespace android
-#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MODEL_ARGUMENT_INFO_H
\ No newline at end of file
+#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MODEL_ARGUMENT_INFO_H