Allow implicit padding
- Upadte the documentation to support implicit padding.
- Removed error logging for implicit padding input.
Bug: 63905942
Test: mm
Test: NeuralNetworksTest pass
Change-Id: I5149433ccb393ed390ad9e24013249ef37c6aba8
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index 171ab67..9b00628 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -341,7 +341,8 @@
}
} break;
case OperationType::DEPTHWISE_CONV_2D: {
- if (!parameterCountIs(11, 1) && !parameterCountIs(8, 1)) {
+ if ((ins.size() != 11 && ins.size() != 8) ||
+ !parameterCountIs(ins.size(), 1)) {
return ANEURALNETWORKS_BAD_DATA;
}
const RunTimeOperandInfo& input = mOperands[ins[0]];
@@ -354,7 +355,7 @@
int32_t depth_multiplier;
int32_t activation;
- if (parameterCountIs(11, 1)) {
+ if (ins.size() == 11) {
padding_left = getScalarData<int32_t>(mOperands[ins[3]]);
padding_right = getScalarData<int32_t>(mOperands[ins[4]]);
padding_top = getScalarData<int32_t>(mOperands[ins[5]]);
@@ -429,7 +430,8 @@
} break;
case OperationType::CONV_2D: {
- if (!parameterCountIs(10, 1) && !parameterCountIs(7, 1)) {
+ if ((ins.size() != 10 && ins.size() != 7) ||
+ !parameterCountIs(ins.size(), 1)) {
return ANEURALNETWORKS_BAD_DATA;
}
const RunTimeOperandInfo& input = mOperands[ins[0]];
@@ -441,7 +443,7 @@
int32_t stride_width, stride_height;
int32_t activation;
- if (parameterCountIs(10, 1)) {
+ if (ins.size() == 10) {
padding_left = getScalarData<int32_t>(mOperands[ins[3]]);
padding_right = getScalarData<int32_t>(mOperands[ins[4]]);
padding_top = getScalarData<int32_t>(mOperands[ins[5]]);
@@ -507,7 +509,8 @@
}
} break;
case OperationType::AVERAGE_POOL_2D: {
- if (!parameterCountIs(10, 1) && !parameterCountIs(7, 1)) {
+ if ((ins.size() != 10 && ins.size() != 7) ||
+ !parameterCountIs(ins.size(), 1)) {
return ANEURALNETWORKS_BAD_DATA;
}
const RunTimeOperandInfo& input = mOperands[ins[0]];
@@ -517,7 +520,7 @@
int32_t filter_width, filter_height;
int32_t activation;
- if (parameterCountIs(10, 1)) {
+ if (ins.size() == 10) {
padding_left = getScalarData<int32_t>(mOperands[ins[1]]);
padding_right = getScalarData<int32_t>(mOperands[ins[2]]);
padding_top = getScalarData<int32_t>(mOperands[ins[3]]);
@@ -584,7 +587,8 @@
}
} break;
case OperationType::L2_POOL_2D: {
- if (!parameterCountIs(10, 1) && !parameterCountIs(7, 1)) {
+ if ((ins.size() != 10 && ins.size() != 7) ||
+ !parameterCountIs(ins.size(), 1)) {
return ANEURALNETWORKS_BAD_DATA;
}
const RunTimeOperandInfo& input = mOperands[ins[0]];
@@ -594,7 +598,7 @@
int32_t filter_width, filter_height;
int32_t activation;
- if (parameterCountIs(10, 1)) {
+ if (ins.size() == 10) {
padding_left = getScalarData<int32_t>(mOperands[ins[1]]);
padding_right = getScalarData<int32_t>(mOperands[ins[2]]);
padding_top = getScalarData<int32_t>(mOperands[ins[3]]);
@@ -645,7 +649,8 @@
}
} break;
case OperationType::MAX_POOL_2D: {
- if (!parameterCountIs(10, 1) && !parameterCountIs(7, 1)) {
+ if ((ins.size() != 10 && ins.size() != 7) ||
+ !parameterCountIs(ins.size(), 1)) {
return ANEURALNETWORKS_BAD_DATA;
}
const RunTimeOperandInfo& input = mOperands[ins[0]];
@@ -655,7 +660,7 @@
int32_t filter_width, filter_height;
int32_t activation;
- if (parameterCountIs(10, 1)) {
+ if (ins.size() == 10) {
padding_left = getScalarData<int32_t>(mOperands[ins[1]]);
padding_right = getScalarData<int32_t>(mOperands[ins[2]]);
padding_top = getScalarData<int32_t>(mOperands[ins[3]]);
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index c0fc5cf..e3c7c1c 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -145,7 +145,9 @@
*
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
* * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
* * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
@@ -158,6 +160,17 @@
* * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
* Specifies the activation to invoke on the result of each addition.
*
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 2: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the filter width.
+ * * 5: An INT32 value, specifying the filter height.
+ * * 6: An INT32 value, and has to be one of the {@link FuseCode} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
*/
@@ -210,7 +223,9 @@
*
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
* * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
* specifying the filter.
@@ -229,6 +244,23 @@
* * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
* Specifies the activation to invoke on the result of each addition.
*
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should
+ * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 4: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * * 5: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * * 6: An INT32 value, and has to be one of the {@link FuseCode} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
* For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the following
@@ -261,7 +293,9 @@
*
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter.
@@ -281,6 +315,24 @@
* * 10: An INT32 value, and has to be one of the {@link FuseCode} values.
* Specifies the activation to invoke on the result of each addition.
*
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should
+ * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 4: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * * 5: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * * 6: An INT32 value, specifying the depthwise multiplier.
+ * * 7: An INT32 value, and has to be one of the {@link FuseCode} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
* For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the following
@@ -464,7 +516,9 @@
*
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
* * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
* * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
@@ -477,6 +531,17 @@
* * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
* Specifies the activation to invoke on the result of each addition.
*
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 2: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the filter width.
+ * * 5: An INT32 value, specifying the filter height.
+ * * 6: An INT32 value, and has to be one of the {@link FuseCode} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
*/
@@ -682,7 +747,9 @@
*
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
* * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
* * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
@@ -695,6 +762,17 @@
* * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
* Specifies the activation to invoke on the result of each addition.
*
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 2: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the filter width.
+ * * 5: An INT32 value, specifying the filter height.
+ * * 6: An INT32 value, and has to be one of the {@link FuseCode} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
*/
@@ -1047,6 +1125,17 @@
} FuseCode;
/**
+ * Implicit padding algorithms.
+ *
+ */
+typedef enum {
+ /** SAME padding. */
+ ANEURALNETWORKS_PADDING_SAME = 1,
+ /** VALID padding. */
+ ANEURALNETWORKS_PADDING_VALID = 2,
+} PaddingCode;
+
+/**
* Execution preferences.
*/
typedef enum {