blob: b6e1413f7aaca8d9019a2861db20d050d3ebafde [file] [log] [blame]
Anthony Barbier871448e2017-03-24 14:54:29 +00001/*
Jenkins6a7771e2020-05-28 11:28:36 +01002 * Copyright (c) 2017-2020 ARM Limited.
Anthony Barbier871448e2017-03-24 14:54:29 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Anthony Barbier8140e1e2017-12-14 23:48:46 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000031#include "arm_compute/runtime/CL/CLScheduler.h"
32
33#include <cmath>
Kaizen8938bd32017-09-28 14:38:23 +010034#include <memory>
Anthony Barbier871448e2017-03-24 14:54:29 +000035#include <tuple>
36
Jenkins36ccc902020-02-21 11:10:48 +000037namespace arm_compute
38{
Anthony Barbier06ea0482018-02-22 15:45:35 +000039using namespace arm_compute::misc::shape_calculator;
Anthony Barbierdbdab852017-06-23 15:42:00 +010040
Kaizen8938bd32017-09-28 14:38:23 +010041CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Anthony Barbier06ea0482018-02-22 15:45:35 +000042 : _memory_manager(std::move(memory_manager)), _function()
Anthony Barbierdbdab852017-06-23 15:42:00 +010043{
44}
45
Jenkinsb3a371b2018-05-23 11:36:53 +010046void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Jenkins52ba29e2018-08-29 15:32:11 +000047 const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000048{
Jenkins6a7771e2020-05-28 11:28:36 +010049 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
50}
51
52void CLConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
53 const WeightsInfo &weights_info,
54 const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
55{
Anthony Barbier06ea0482018-02-22 15:45:35 +000056 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Jenkinsb3a371b2018-05-23 11:36:53 +010057 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info,
Jenkins52ba29e2018-08-29 15:32:11 +000058 enable_fast_math, num_groups));
Anthony Barbier06ea0482018-02-22 15:45:35 +000059
Jenkinsb3a371b2018-05-23 11:36:53 +010060 switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), output->info(), conv_info,
61 weights_info, act_info, CLScheduler::get().target(), dilation, enable_fast_math))
Anthony Barbier8140e1e2017-12-14 23:48:46 +000062 {
Jenkinsb3a371b2018-05-23 11:36:53 +010063 case ConvolutionMethod::WINOGRAD:
64 {
Jenkins52ba29e2018-08-29 15:32:11 +000065 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Jenkinsb3a371b2018-05-23 11:36:53 +010066 auto f = arm_compute::support::cpp14::make_unique<CLWinogradConvolutionLayer>(_memory_manager);
Jenkins6a7771e2020-05-28 11:28:36 +010067 f->configure(compile_context, input, weights, biases, output, conv_info, act_info, enable_fast_math);
Jenkinsb3a371b2018-05-23 11:36:53 +010068 _function = std::move(f);
69 break;
70 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000071 case ConvolutionMethod::DIRECT:
Anthony Barbierf45d5a92018-01-24 16:23:15 +000072 {
Jenkins52ba29e2018-08-29 15:32:11 +000073 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Anthony Barbier06ea0482018-02-22 15:45:35 +000074 auto f = arm_compute::support::cpp14::make_unique<CLDirectConvolutionLayer>();
Jenkins6a7771e2020-05-28 11:28:36 +010075 f->configure(compile_context, input, weights, biases, output, conv_info, act_info);
Anthony Barbier06ea0482018-02-22 15:45:35 +000076 _function = std::move(f);
77 break;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000078 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000079 case ConvolutionMethod::GEMM:
Anthony Barbierf45d5a92018-01-24 16:23:15 +000080 {
Anthony Barbier06ea0482018-02-22 15:45:35 +000081 auto f = arm_compute::support::cpp14::make_unique<CLGEMMConvolutionLayer>(_memory_manager);
Jenkins6a7771e2020-05-28 11:28:36 +010082 f->configure(compile_context, input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
Anthony Barbier06ea0482018-02-22 15:45:35 +000083 _function = std::move(f);
84 break;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000085 }
Jenkins4ba87db2019-05-23 17:11:51 +010086 case ConvolutionMethod::FFT:
87 {
88 auto f = arm_compute::support::cpp14::make_unique<CLFFTConvolutionLayer>(_memory_manager);
Jenkins6a7771e2020-05-28 11:28:36 +010089 f->configure(compile_context, input, weights, biases, output, conv_info, act_info);
Jenkins4ba87db2019-05-23 17:11:51 +010090 _function = std::move(f);
91 break;
92 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000093 default:
94 ARM_COMPUTE_ERROR("Not supported.");
95 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +000096 }
97}
98
Anthony Barbier06ea0482018-02-22 15:45:35 +000099Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Jenkins52ba29e2018-08-29 15:32:11 +0000100 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Anthony Barbier871448e2017-03-24 14:54:29 +0000101{
Anthony Barbier06ea0482018-02-22 15:45:35 +0000102 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Jenkins52ba29e2018-08-29 15:32:11 +0000103 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000104
Anthony Barbier06ea0482018-02-22 15:45:35 +0000105 const GPUTarget gpu_target = CLScheduler::get().target();
Anthony Barbier871448e2017-03-24 14:54:29 +0000106
Jenkinsb3a371b2018-05-23 11:36:53 +0100107 switch(CLConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, act_info, gpu_target, dilation, enable_fast_math))
Anthony Barbier871448e2017-03-24 14:54:29 +0000108 {
Jenkinsb3a371b2018-05-23 11:36:53 +0100109 case ConvolutionMethod::WINOGRAD:
110 {
111 //Validate Winograd
Jenkins52ba29e2018-08-29 15:32:11 +0000112 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLWinogradConvolutionLayer is not supported");
Jenkinsb3a371b2018-05-23 11:36:53 +0100113 ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math));
114 break;
115 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000116 case ConvolutionMethod::DIRECT:
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000117 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000118 // Validate direct convolution layer
Jenkins52ba29e2018-08-29 15:32:11 +0000119 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLDirectConvolutionLayer is not supported");
Jenkinsb3a371b2018-05-23 11:36:53 +0100120 ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info));
Anthony Barbier06ea0482018-02-22 15:45:35 +0000121 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000122 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000123 case ConvolutionMethod::GEMM:
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000124 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000125 // Validate gemm-based convolution layer
Jenkins52ba29e2018-08-29 15:32:11 +0000126 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups));
Anthony Barbier06ea0482018-02-22 15:45:35 +0000127 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000128 }
Jenkins4ba87db2019-05-23 17:11:51 +0100129 case ConvolutionMethod::FFT:
130 {
131 // Validate FFT-based convolution layer
132 ARM_COMPUTE_RETURN_ON_ERROR(CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info));
133 break;
134 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000135 default:
136 ARM_COMPUTE_ERROR("Not supported.");
137 break;
Anthony Barbier871448e2017-03-24 14:54:29 +0000138 }
139
Anthony Barbier06ea0482018-02-22 15:45:35 +0000140 return Status{};
141}
Kaizen8938bd32017-09-28 14:38:23 +0100142
Jenkinsb3a371b2018-05-23 11:36:53 +0100143ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info,
144 const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation, bool enable_fast_math)
Anthony Barbier06ea0482018-02-22 15:45:35 +0000145{
Jenkinsb3a371b2018-05-23 11:36:53 +0100146 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
147 ARM_COMPUTE_ERROR_ON_NULLPTR(output);
148 ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000149 ARM_COMPUTE_UNUSED(weights_info);
150 ARM_COMPUTE_UNUSED(gpu_target);
Kaizen8938bd32017-09-28 14:38:23 +0100151
Jenkins52ba29e2018-08-29 15:32:11 +0000152 const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
153 const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
Jenkinsb3a371b2018-05-23 11:36:53 +0100154 const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
155
Jenkins52ba29e2018-08-29 15:32:11 +0000156 /* Input spatial dims, kernel size, IFM/OFM, conv info*/
157 using ConvolutionConfiguration = std::tuple<Size2D, Size2D, Size2D, PadStrideInfo, DataLayout>;
158 using ConfigurationMethod = std::pair<ConvolutionConfiguration, ConvolutionMethod>;
159
160 const std::vector<ConfigurationMethod> known_configs =
161 {
162 // Alexnet
163 ConfigurationMethod(ConvolutionConfiguration(Size2D(27U, 27U), Size2D(5U, 5U), Size2D(48U, 128U), PadStrideInfo(1U, 1U, 2U, 2U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
164 // VGG16 / VGG19
165 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 64U), PadStrideInfo(1U, 1U, 1U, 1U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
166 // Mobilenet 224
167 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
168 // Mobilenet 160
169 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
170 // Mobilenet 224
171 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
172 // Mobilenet 160
173 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
174 };
175
176 const auto find_config = [&](ConfigurationMethod c)
177 {
178 const ConvolutionConfiguration config = c.first;
179 const PadStrideInfo info = std::get<3>(config);
180 const DataLayout data_layout = std::get<4>(config);
181
182 return std::get<0>(config) == Size2D(input->dimension(idx_w), input->dimension(idx_h)) && std::get<1>(config) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h))
183 && std::get<2>(config) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == conv_info.pad_top() && info.pad_right() == conv_info.pad_right()
184 && info.pad_bottom() == conv_info.pad_bottom() && info.pad_left() == conv_info.pad_left() && info.stride() == conv_info.stride() && (data_layout == input->data_layout());
185 };
186
187 std::vector<ConfigurationMethod>::const_iterator found;
188 if((found = std::find_if(known_configs.begin(), known_configs.end(), find_config)) != known_configs.end())
189 {
190 return (*found).second;
191 }
192
Jenkins4ba87db2019-05-23 17:11:51 +0100193 if(dilation != Size2D(1U, 1U))
Jenkinsb3a371b2018-05-23 11:36:53 +0100194 {
195 return ConvolutionMethod::GEMM;
196 }
197 else
198 {
Jenkins975dfe12019-09-02 11:47:54 +0100199 // SRGAN
200 if((input->dimension(idx_h) > 720U) && (output->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
201 && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
202 {
203 return ConvolutionMethod::DIRECT;
204 }
205 if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
Jenkins4ba87db2019-05-23 17:11:51 +0100206 {
207 return ConvolutionMethod::FFT;
208 }
Jenkins975dfe12019-09-02 11:47:54 +0100209 if(input->dimension(idx_c) < 16)
Jenkins4ba87db2019-05-23 17:11:51 +0100210 {
211 return ConvolutionMethod::GEMM;
212 }
Jenkinsb3a371b2018-05-23 11:36:53 +0100213 return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
214 }
Anthony Barbier871448e2017-03-24 14:54:29 +0000215}
216
217void CLConvolutionLayer::run()
218{
Jenkinsb3a371b2018-05-23 11:36:53 +0100219 prepare();
Anthony Barbier06ea0482018-02-22 15:45:35 +0000220 _function->run();
Anthony Barbier871448e2017-03-24 14:54:29 +0000221}
Jenkinsb3a371b2018-05-23 11:36:53 +0100222
223void CLConvolutionLayer::prepare()
224{
225 _function->prepare();
226}
Jenkins36ccc902020-02-21 11:10:48 +0000227} // namespace arm_compute