arm_compute v20.05
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 6592b2b..218e6ce 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -306,7 +306,7 @@
}
// Create convolution node and connect
- NodeID deconv_nid = g.add_node<DeconvolutionLayerNode>(deconv_info);
+ NodeID deconv_nid = g.add_node<DeconvolutionLayerNode>(descriptors::DeconvolutionLayerDescriptor{ deconv_info });
g.add_connection(input.node_id, input.index, deconv_nid, 0);
g.add_connection(w_nid, 0, deconv_nid, 1);
if(has_bias)
@@ -438,7 +438,7 @@
check_nodeidx_pair(input0, g);
check_nodeidx_pair(input1, g);
- NodeID nid = g.add_node<EltwiseLayerNode>(operation);
+ NodeID nid = g.add_node<EltwiseLayerNode>(descriptors::EltwiseLayerDescriptor{ operation });
g.add_connection(input0.node_id, input0.index, nid, 0);
g.add_connection(input1.node_id, input1.index, nid, 1);
@@ -571,9 +571,9 @@
return norm_planar_yuv_nid;
}
-NodeID GraphBuilder::add_pad_node(Graph &g, NodeParams params, NodeIdxPair input, PaddingList padding)
+NodeID GraphBuilder::add_pad_node(Graph &g, NodeParams params, NodeIdxPair input, const PaddingList &paddings, PixelValue pad_value)
{
- return create_simple_single_input_output_node<PadLayerNode>(g, params, input, padding);
+ return create_simple_single_input_output_node<PadLayerNode>(g, params, input, paddings, pad_value);
}
NodeID GraphBuilder::add_permute_node(Graph &g, NodeParams params, NodeIdxPair input, PermutationVector perm, DataLayout layout)
diff --git a/src/graph/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
index de31847..0159592 100644
--- a/src/graph/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -169,9 +169,8 @@
// Create backend tensor handle
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
info.set_data_layout(tensor_desc.layout);
- auto backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
- return std::move(backend_tensor_handle);
+ return support::cpp14::make_unique<CLTensorHandle>(info);
}
std::unique_ptr<ITensorHandle> CLDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index e6684f6..312e09a 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -158,7 +158,7 @@
wrap_function->register_tensor(input2);
wrap_function->register_tensor(output);
- return std::move(wrap_function);
+ return RETURN_UNIQUE_PTR(wrap_function);
}
template <>
std::unique_ptr<IFunction> create_detection_post_process_layer<CPPDetectionPostProcessLayer, CLTargetInfo>(DetectionPostProcessLayerNode &node)
@@ -214,7 +214,7 @@
wrap_function->register_tensor(output2);
wrap_function->register_tensor(output3);
- return std::move(wrap_function);
+ return RETURN_UNIQUE_PTR(wrap_function);
}
} // namespace detail
diff --git a/src/graph/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
index 83e2436..bb674ce 100644
--- a/src/graph/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,9 +111,8 @@
// Create backend tensor handle
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
info.set_data_layout(tensor_desc.layout);
- auto backend_tensor_handle = support::cpp14::make_unique<GCTensorHandle>(info);
- return std::move(backend_tensor_handle);
+ return support::cpp14::make_unique<GCTensorHandle>(info);
}
std::unique_ptr<ITensorHandle> GCDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index 017b4f0..b568b79 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -122,9 +122,8 @@
// Create backend tensor handle
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
info.set_data_layout(tensor_desc.layout);
- auto backend_tensor_handle = support::cpp14::make_unique<NETensorHandle>(info);
- return std::move(backend_tensor_handle);
+ return support::cpp14::make_unique<NETensorHandle>(info);
}
std::unique_ptr<ITensorHandle> NEDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index ddb4ccf..454215e 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -80,78 +80,6 @@
namespace detail
{
-// Specialized functions
-template <>
-std::unique_ptr<IFunction> create_convolution_layer<NEConvolutionLayerFunctions, NETargetInfo>(ConvolutionLayerNode &node,
- GraphContext &ctx)
-{
- validate_node<NETargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
-
- // Extract IO and info
- NETargetInfo::TensorType *input = get_backing_tensor<NETargetInfo>(node.input(0));
- NETargetInfo::TensorType *weights = get_backing_tensor<NETargetInfo>(node.input(1));
- NETargetInfo::TensorType *biases = get_backing_tensor<NETargetInfo>(node.input(2));
- NETargetInfo::TensorType *output = get_backing_tensor<NETargetInfo>(node.output(0));
-
- const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
-
- if(is_quantized)
- {
- biases->info()->set_data_type(DataType::S32);
- }
-
- const PadStrideInfo conv_info = node.convolution_info();
- const ConvolutionMethod conv_algorithm = node.convolution_method();
- const ActivationLayerInfo fused_act = node.fused_activation();
-
- // Create and configure function (we assume that functions have been validated before creation)
- std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::NEON);
- std::unique_ptr<IFunction> func;
- std::string func_name;
-
- if(conv_algorithm == ConvolutionMethod::Direct)
- {
- std::tie(func, func_name) = create_named_memory_managed_function<NEDirectConvolutionLayer>(
- std::string("DirectConvolutionLayer"), mm, input, weights, biases, output, conv_info, fused_act);
- }
- else if(conv_algorithm == ConvolutionMethod::GEMM)
- {
- std::tie(func, func_name) = create_named_memory_managed_function<NEGEMMConvolutionLayer>(
- std::string("GEMMConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(), Size2D(1, 1), fused_act);
- }
- else if(conv_algorithm == ConvolutionMethod::Winograd)
- {
- std::tie(func, func_name) = create_named_memory_managed_function<NEWinogradConvolutionLayer>(
- std::string("WinogradConvolutionLayer"), mm, input, weights, biases, output, conv_info, fused_act);
- }
- else
- {
- std::tie(func, func_name) = create_named_memory_managed_function<NEConvolutionLayer>(
- std::string("ConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(), Size2D(1, 1), fused_act);
- }
-
- // Log info
- std::ostringstream qss;
- if(is_quantized)
- {
- qss << " Input QuantInfo: " << input->info()->quantization_info()
- << " Weights QuantInfo: " << weights->info()->quantization_info()
- << " Output QuantInfo: " << output->info()->quantization_info();
- }
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << func_name
- << " Target: " << NETargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << qss.str()
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
- << std::endl);
- return func;
-}
-
template <>
std::unique_ptr<IFunction> create_normalization_layer<NENormalizationLayer, NETargetInfo>(NormalizationLayerNode &node, GraphContext &ctx)
{
@@ -179,7 +107,7 @@
<< " Normalization info: " << norm_info.type()
<< std::endl);
- return std::move(func);
+ return RETURN_UNIQUE_PTR(func);
}
} // namespace detail
diff --git a/src/graph/mutators/GroupedConvolutionMutator.cpp b/src/graph/mutators/GroupedConvolutionMutator.cpp
index bb452f9..f8494a8 100644
--- a/src/graph/mutators/GroupedConvolutionMutator.cpp
+++ b/src/graph/mutators/GroupedConvolutionMutator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,6 +32,8 @@
#include "arm_compute/core/utils/misc/Cast.h"
+#include "support/StringSupport.h"
+
#include <set>
namespace arm_compute
diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp
index b7f081d..ae53b8f 100644
--- a/src/graph/mutators/NodeFusionMutator.cpp
+++ b/src/graph/mutators/NodeFusionMutator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -294,13 +294,20 @@
void NodeFusionMutator::mutate(Graph &g)
{
// Supported activations when fusing
- const std::set<Activation> supported_fused_activations = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU };
+ const std::set<Activation> supported_fused_activations_conv = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU };
+ const std::set<Activation> supported_fused_activations_eltwise = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU,
+ Activation::TANH, Activation::LOGISTIC
+ };
// Preconditions
auto empty_prec = [](INode &)
{
return true;
};
+ auto cl_target_prec = [](INode & n)
+ {
+ return n.assigned_target() == Target::CL;
+ };
auto qs8_prec = [&g](INode & n)
{
ARM_COMPUTE_ERROR_ON(n.output(0) == nullptr);
@@ -315,9 +322,11 @@
};
// Fusion mutations
- detail::fuse_layer<BatchNormalizationLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<BatchNormalizationLayerNode>, supported_fused_activations);
- detail::fuse_layer<ConvolutionLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<ConvolutionLayerNode>, supported_fused_activations);
- detail::fuse_layer<DepthwiseConvolutionLayerNode, ActivationLayerNode>(g, qs8_prec, detail::fuse_node_with_activation<DepthwiseConvolutionLayerNode>, supported_fused_activations);
+ detail::fuse_layer<BatchNormalizationLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<BatchNormalizationLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<ConvolutionLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<ConvolutionLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<DepthwiseConvolutionLayerNode, ActivationLayerNode>(g, qs8_prec, detail::fuse_node_with_activation<DepthwiseConvolutionLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<FullyConnectedLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<FullyConnectedLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<EltwiseLayerNode, ActivationLayerNode>(g, cl_target_prec, detail::fuse_node_with_activation<EltwiseLayerNode>, supported_fused_activations_eltwise);
detail::fuse_layer<ConvolutionLayerNode, BatchNormalizationLayerNode>(g, empty_prec, detail::fuse_convolution_with_batch_normalization);
detail::fuse_layer<DepthwiseConvolutionLayerNode, BatchNormalizationLayerNode>(g, empty_prec, detail::fuse_depthwise_convolution_with_batch_normalization);
}
diff --git a/src/graph/mutators/SyntheticDataTypeMutator.cpp b/src/graph/mutators/SyntheticDataTypeMutator.cpp
index b318df9..0a9f505 100644
--- a/src/graph/mutators/SyntheticDataTypeMutator.cpp
+++ b/src/graph/mutators/SyntheticDataTypeMutator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -175,6 +175,10 @@
{
tensor->desc().quant_info = QuantizationInfo(1.f / 128.f, 128);
}
+ else if(act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
+ {
+ tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, 0);
+ }
return true;
};
diff --git a/src/graph/nodes/DeconvolutionLayerNode.cpp b/src/graph/nodes/DeconvolutionLayerNode.cpp
index d4a5b76..2daeaac 100644
--- a/src/graph/nodes/DeconvolutionLayerNode.cpp
+++ b/src/graph/nodes/DeconvolutionLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,8 +32,8 @@
{
namespace graph
{
-DeconvolutionLayerNode::DeconvolutionLayerNode(PadStrideInfo info)
- : _info(std::move(info))
+DeconvolutionLayerNode::DeconvolutionLayerNode(const descriptors::DeconvolutionLayerDescriptor &descriptor)
+ : descriptor(std::move(descriptor))
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -41,7 +41,7 @@
PadStrideInfo DeconvolutionLayerNode::deconvolution_info() const
{
- return _info;
+ return descriptor.info;
}
TensorDescriptor DeconvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
@@ -87,7 +87,13 @@
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
- TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info);
+ TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), descriptor.info);
+
+ if(!descriptor.out_quant_info.empty())
+ {
+ output_info.set_quantization_info(descriptor.out_quant_info);
+ }
+
return output_info;
}
diff --git a/src/graph/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
index 568b882..92d183e 100644
--- a/src/graph/nodes/EltwiseLayerNode.cpp
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,8 +30,8 @@
{
namespace graph
{
-EltwiseLayerNode::EltwiseLayerNode(EltwiseOperation op, ConvertPolicy c_policy, RoundingPolicy r_policy)
- : _op(op), _convert_policy(c_policy), _rounding_policy(r_policy)
+EltwiseLayerNode::EltwiseLayerNode(const descriptors::EltwiseLayerDescriptor &descriptor)
+ : descriptor(descriptor)
{
_input_edges.resize(2, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -39,17 +39,27 @@
EltwiseOperation EltwiseLayerNode::eltwise_operation() const
{
- return _op;
+ return descriptor.op;
}
ConvertPolicy EltwiseLayerNode::convert_policy() const
{
- return _convert_policy;
+ return descriptor.c_policy;
}
RoundingPolicy EltwiseLayerNode::rounding_policy() const
{
- return _rounding_policy;
+ return descriptor.r_policy;
+}
+
+ActivationLayerInfo EltwiseLayerNode::fused_activation() const
+{
+ return descriptor.fused_activation;
+}
+
+void EltwiseLayerNode::set_fused_activation(ActivationLayerInfo fused_activation)
+{
+ descriptor.fused_activation = fused_activation;
}
bool EltwiseLayerNode::forward_descriptors()
@@ -66,12 +76,19 @@
TensorDescriptor EltwiseLayerNode::configure_output(size_t idx) const
{
- ARM_COMPUTE_UNUSED(idx, _op, _convert_policy, _rounding_policy);
+ ARM_COMPUTE_UNUSED(idx);
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- return src->desc();
+ auto output_info = src->desc();
+
+ if(!descriptor.out_quant_info.empty())
+ {
+ output_info.set_quantization_info(descriptor.out_quant_info);
+ }
+
+ return output_info;
}
NodeType EltwiseLayerNode::type() const
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 80fce7b..34c432a 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,6 +38,11 @@
_outputs.resize(1, NullTensorID);
}
+void FullyConnectedLayerNode::set_fused_activation(ActivationLayerInfo fused_activation)
+{
+ _info.activation_info = fused_activation;
+}
+
TensorDescriptor FullyConnectedLayerNode::compute_weights_descriptor(const TensorDescriptor &input_descriptor,
unsigned int num_outputs,
FullyConnectedLayerInfo fc_info,
diff --git a/src/graph/nodes/PadLayerNode.cpp b/src/graph/nodes/PadLayerNode.cpp
index 1bd0776..cbee134 100644
--- a/src/graph/nodes/PadLayerNode.cpp
+++ b/src/graph/nodes/PadLayerNode.cpp
@@ -32,8 +32,8 @@
{
namespace graph
{
-PadLayerNode::PadLayerNode(const PaddingList &padding)
- : _padding(padding)
+PadLayerNode::PadLayerNode(const PaddingList &padding, PixelValue pad_value)
+ : _padding(padding), _pad_value(pad_value)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -44,6 +44,11 @@
return _padding;
}
+PixelValue PadLayerNode::pad_value() const
+{
+ return _pad_value;
+}
+
bool PadLayerNode::forward_descriptors()
{
if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))