blob: 445864c2a98f3a6cb261e2869b6d6936d3d8f03e [file] [log] [blame]
Anthony Barbierdbdab852017-06-23 15:42:00 +01001/*
Jenkinsb3a371b2018-05-23 11:36:53 +01002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbierdbdab852017-06-23 15:42:00 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/runtime/NEON/NEScheduler.h"
30
31#include <cmath>
32#include <tuple>
33
34using namespace arm_compute;
35
Kaizen8938bd32017-09-28 14:38:23 +010036NEDirectConvolutionLayer::NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Jenkinsb3a371b2018-05-23 11:36:53 +010037 : _memory_group(std::move(memory_manager)), _output_stage_kernel(), _conv_kernel(), _input_border_handler(), _activationlayer_function(), _accumulator(), _has_bias(false), _is_fixed_point(false),
38 _is_activationlayer_enabled(false), _dim_split(Window::DimZ)
Anthony Barbierdbdab852017-06-23 15:42:00 +010039{
40}
41
Jenkinsb3a371b2018-05-23 11:36:53 +010042void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
Anthony Barbierdbdab852017-06-23 15:42:00 +010043{
Jenkinsb3a371b2018-05-23 11:36:53 +010044 ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::UNKNOWN);
45
Anthony Barbierdbdab852017-06-23 15:42:00 +010046 // Free accumulator
47 if(_accumulator.buffer() != nullptr)
48 {
49 _accumulator.allocator()->free();
50 }
51
Jenkinsb3a371b2018-05-23 11:36:53 +010052 _dim_split = input->info()->data_layout() == DataLayout::NCHW ? Window::DimZ : Window::DimY;
53
Anthony Barbier8140e1e2017-12-14 23:48:46 +000054 // Check if bias should be added in the convolution result
55 _has_bias = (bias != nullptr);
56
Anthony Barbierdbdab852017-06-23 15:42:00 +010057 // Allocate the intermediate accumulator tensor in case of fixed point input
Anthony Barbierf45d5a92018-01-24 16:23:15 +000058 _is_fixed_point = is_data_type_fixed_point(input->info()->data_type());
59 if(_is_fixed_point)
Anthony Barbierdbdab852017-06-23 15:42:00 +010060 {
Anthony Barbier8140e1e2017-12-14 23:48:46 +000061 const DataType promoted_dt = (input->info()->data_type() == DataType::QS8) ? DataType::QS16 : DataType::QS32;
62 _accumulator.allocator()->init(TensorInfo(output->info()->tensor_shape(), 1, promoted_dt, output->info()->fixed_point_position()));
63 _memory_group.manage(&_accumulator);
64 _conv_kernel.configure(input, weights, &_accumulator, conv_info);
Anthony Barbierf45d5a92018-01-24 16:23:15 +000065
66 // When no bias is provided, we need to downscale the accumulator tensor
67 _output_stage_kernel.configure(&_accumulator, bias, output);
Anthony Barbier8140e1e2017-12-14 23:48:46 +000068 _accumulator.allocator()->allocate();
69 }
70 else
71 {
72 _conv_kernel.configure(input, weights, output, conv_info);
73 if(_has_bias)
Kaizen8938bd32017-09-28 14:38:23 +010074 {
Anthony Barbierf45d5a92018-01-24 16:23:15 +000075 _output_stage_kernel.configure(output, bias);
Kaizen8938bd32017-09-28 14:38:23 +010076 }
Anthony Barbierdbdab852017-06-23 15:42:00 +010077 }
78
79 // Add zero padding XY
Kaizenbf8b01d2017-10-12 14:26:51 +010080 _input_border_handler.configure(input, _conv_kernel.border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
Jenkinsb3a371b2018-05-23 11:36:53 +010081
82 //Configure Activation Layer
83 _is_activationlayer_enabled = act_info.enabled();
84 if(_is_activationlayer_enabled)
85 {
86 _activationlayer_function.configure(output, nullptr, act_info);
87 }
Anthony Barbierdbdab852017-06-23 15:42:00 +010088}
89
Jenkinsb3a371b2018-05-23 11:36:53 +010090Status NEDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info,
91 const ActivationLayerInfo &act_info)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000092{
93 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
94
95 DataType data_type = output->data_type();
96 if(is_data_type_fixed_point(data_type))
97 {
98 // Promote data type in case of fixed point
99 data_type = ((data_type == DataType::QS8) ? DataType::QS16 : DataType::QS32);
100 }
101 TensorInfo accumulator(output->clone()->set_is_resizable(true).reset_padding().set_data_type(data_type));
102
103 // Validate Convolution kernel
104 ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerKernel::validate(input, weights, &accumulator, conv_info));
105
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000106 if(bias != nullptr)
107 {
108 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
109 ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != weights->dimension(3),
110 "Biases size and number of input feature maps should match");
111 ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->num_dimensions() > 1, "Biases should be one dimensional");
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000112 }
113
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000114 // Validate bias kernel
115 ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, bias, output));
116
Jenkinsb3a371b2018-05-23 11:36:53 +0100117 if(act_info.enabled())
118 {
119 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
120 }
121
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000122 return Status{};
123}
124
Anthony Barbierdbdab852017-06-23 15:42:00 +0100125void NEDirectConvolutionLayer::run()
126{
Kaizen8938bd32017-09-28 14:38:23 +0100127 NEScheduler::get().schedule(&_input_border_handler, Window::DimZ);
128
129 _memory_group.acquire();
Anthony Barbierdbdab852017-06-23 15:42:00 +0100130
Jenkinsb3a371b2018-05-23 11:36:53 +0100131 NEScheduler::get().schedule(&_conv_kernel, _dim_split);
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000132 if(_has_bias || _is_fixed_point)
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000133 {
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000134 NEScheduler::get().schedule(&_output_stage_kernel, Window::DimY);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000135 }
Jenkinsb3a371b2018-05-23 11:36:53 +0100136
137 if(_is_activationlayer_enabled)
138 {
139 _activationlayer_function.run();
140 }
Kaizen8938bd32017-09-28 14:38:23 +0100141 _memory_group.release();
Anthony Barbierdbdab852017-06-23 15:42:00 +0100142}