blob: d794cde1f41bb0332dc171a3d9047b6700eb5575 [file] [log] [blame]
Anthony Barbier871448e2017-03-24 14:54:29 +00001/*
Jenkins4ba87db2019-05-23 17:11:51 +01002 * Copyright (c) 2017-2019 ARM Limited.
Anthony Barbier871448e2017-03-24 14:54:29 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Anthony Barbier8140e1e2017-12-14 23:48:46 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000031#include "arm_compute/runtime/CL/CLScheduler.h"
32
33#include <cmath>
Kaizen8938bd32017-09-28 14:38:23 +010034#include <memory>
Anthony Barbier871448e2017-03-24 14:54:29 +000035#include <tuple>
36
37using namespace arm_compute;
Anthony Barbier06ea0482018-02-22 15:45:35 +000038using namespace arm_compute::misc::shape_calculator;
Anthony Barbierdbdab852017-06-23 15:42:00 +010039
Kaizen8938bd32017-09-28 14:38:23 +010040CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Anthony Barbier06ea0482018-02-22 15:45:35 +000041 : _memory_manager(std::move(memory_manager)), _function()
Anthony Barbierdbdab852017-06-23 15:42:00 +010042{
43}
44
Jenkinsb3a371b2018-05-23 11:36:53 +010045void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Jenkins52ba29e2018-08-29 15:32:11 +000046 const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000047{
Anthony Barbier06ea0482018-02-22 15:45:35 +000048 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Jenkinsb3a371b2018-05-23 11:36:53 +010049 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info,
Jenkins52ba29e2018-08-29 15:32:11 +000050 enable_fast_math, num_groups));
Anthony Barbier06ea0482018-02-22 15:45:35 +000051
Jenkinsb3a371b2018-05-23 11:36:53 +010052 switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), output->info(), conv_info,
53 weights_info, act_info, CLScheduler::get().target(), dilation, enable_fast_math))
Anthony Barbier8140e1e2017-12-14 23:48:46 +000054 {
Jenkinsb3a371b2018-05-23 11:36:53 +010055 case ConvolutionMethod::WINOGRAD:
56 {
Jenkins52ba29e2018-08-29 15:32:11 +000057 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Jenkinsb3a371b2018-05-23 11:36:53 +010058 auto f = arm_compute::support::cpp14::make_unique<CLWinogradConvolutionLayer>(_memory_manager);
59 f->configure(input, weights, biases, output, conv_info, act_info, enable_fast_math);
60 _function = std::move(f);
61 break;
62 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000063 case ConvolutionMethod::DIRECT:
Anthony Barbierf45d5a92018-01-24 16:23:15 +000064 {
Jenkins52ba29e2018-08-29 15:32:11 +000065 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Anthony Barbier06ea0482018-02-22 15:45:35 +000066 auto f = arm_compute::support::cpp14::make_unique<CLDirectConvolutionLayer>();
Jenkinsb3a371b2018-05-23 11:36:53 +010067 f->configure(input, weights, biases, output, conv_info, act_info);
Anthony Barbier06ea0482018-02-22 15:45:35 +000068 _function = std::move(f);
69 break;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000070 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000071 case ConvolutionMethod::GEMM:
Anthony Barbierf45d5a92018-01-24 16:23:15 +000072 {
Anthony Barbier06ea0482018-02-22 15:45:35 +000073 auto f = arm_compute::support::cpp14::make_unique<CLGEMMConvolutionLayer>(_memory_manager);
Jenkins52ba29e2018-08-29 15:32:11 +000074 f->configure(input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
Anthony Barbier06ea0482018-02-22 15:45:35 +000075 _function = std::move(f);
76 break;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000077 }
Jenkins4ba87db2019-05-23 17:11:51 +010078 case ConvolutionMethod::FFT:
79 {
80 auto f = arm_compute::support::cpp14::make_unique<CLFFTConvolutionLayer>(_memory_manager);
81 f->configure(input, weights, biases, output, conv_info, act_info);
82 _function = std::move(f);
83 break;
84 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000085 default:
86 ARM_COMPUTE_ERROR("Not supported.");
87 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +000088 }
89}
90
Anthony Barbier06ea0482018-02-22 15:45:35 +000091Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Jenkins52ba29e2018-08-29 15:32:11 +000092 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Anthony Barbier871448e2017-03-24 14:54:29 +000093{
Anthony Barbier06ea0482018-02-22 15:45:35 +000094 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Jenkins52ba29e2018-08-29 15:32:11 +000095 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
Anthony Barbier8140e1e2017-12-14 23:48:46 +000096
Anthony Barbier06ea0482018-02-22 15:45:35 +000097 const GPUTarget gpu_target = CLScheduler::get().target();
Anthony Barbier871448e2017-03-24 14:54:29 +000098
Jenkinsb3a371b2018-05-23 11:36:53 +010099 switch(CLConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, act_info, gpu_target, dilation, enable_fast_math))
Anthony Barbier871448e2017-03-24 14:54:29 +0000100 {
Jenkinsb3a371b2018-05-23 11:36:53 +0100101 case ConvolutionMethod::WINOGRAD:
102 {
103 //Validate Winograd
Jenkins52ba29e2018-08-29 15:32:11 +0000104 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLWinogradConvolutionLayer is not supported");
Jenkinsb3a371b2018-05-23 11:36:53 +0100105 ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math));
106 break;
107 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000108 case ConvolutionMethod::DIRECT:
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000109 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000110 // Validate direct convolution layer
Jenkins52ba29e2018-08-29 15:32:11 +0000111 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLDirectConvolutionLayer is not supported");
Jenkinsb3a371b2018-05-23 11:36:53 +0100112 ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info));
Anthony Barbier06ea0482018-02-22 15:45:35 +0000113 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000114 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000115 case ConvolutionMethod::GEMM:
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000116 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000117 // Validate gemm-based convolution layer
Jenkins52ba29e2018-08-29 15:32:11 +0000118 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups));
Anthony Barbier06ea0482018-02-22 15:45:35 +0000119 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000120 }
Jenkins4ba87db2019-05-23 17:11:51 +0100121 case ConvolutionMethod::FFT:
122 {
123 // Validate FFT-based convolution layer
124 ARM_COMPUTE_RETURN_ON_ERROR(CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info));
125 break;
126 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000127 default:
128 ARM_COMPUTE_ERROR("Not supported.");
129 break;
Anthony Barbier871448e2017-03-24 14:54:29 +0000130 }
131
Anthony Barbier06ea0482018-02-22 15:45:35 +0000132 return Status{};
133}
Kaizen8938bd32017-09-28 14:38:23 +0100134
Jenkinsb3a371b2018-05-23 11:36:53 +0100135ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info,
136 const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation, bool enable_fast_math)
Anthony Barbier06ea0482018-02-22 15:45:35 +0000137{
Jenkinsb3a371b2018-05-23 11:36:53 +0100138 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
139 ARM_COMPUTE_ERROR_ON_NULLPTR(output);
140 ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000141 ARM_COMPUTE_UNUSED(weights_info);
142 ARM_COMPUTE_UNUSED(gpu_target);
Kaizen8938bd32017-09-28 14:38:23 +0100143
Jenkins52ba29e2018-08-29 15:32:11 +0000144 const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
145 const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
Jenkinsb3a371b2018-05-23 11:36:53 +0100146 const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
147
Jenkins52ba29e2018-08-29 15:32:11 +0000148 /* Input spatial dims, kernel size, IFM/OFM, conv info*/
149 using ConvolutionConfiguration = std::tuple<Size2D, Size2D, Size2D, PadStrideInfo, DataLayout>;
150 using ConfigurationMethod = std::pair<ConvolutionConfiguration, ConvolutionMethod>;
151
152 const std::vector<ConfigurationMethod> known_configs =
153 {
154 // Alexnet
155 ConfigurationMethod(ConvolutionConfiguration(Size2D(27U, 27U), Size2D(5U, 5U), Size2D(48U, 128U), PadStrideInfo(1U, 1U, 2U, 2U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
156 // VGG16 / VGG19
157 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 64U), PadStrideInfo(1U, 1U, 1U, 1U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
158 // Mobilenet 224
159 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
160 // Mobilenet 160
161 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
162 // Mobilenet 224
163 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
164 // Mobilenet 160
165 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
166 };
167
168 const auto find_config = [&](ConfigurationMethod c)
169 {
170 const ConvolutionConfiguration config = c.first;
171 const PadStrideInfo info = std::get<3>(config);
172 const DataLayout data_layout = std::get<4>(config);
173
174 return std::get<0>(config) == Size2D(input->dimension(idx_w), input->dimension(idx_h)) && std::get<1>(config) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h))
175 && std::get<2>(config) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == conv_info.pad_top() && info.pad_right() == conv_info.pad_right()
176 && info.pad_bottom() == conv_info.pad_bottom() && info.pad_left() == conv_info.pad_left() && info.stride() == conv_info.stride() && (data_layout == input->data_layout());
177 };
178
179 std::vector<ConfigurationMethod>::const_iterator found;
180 if((found = std::find_if(known_configs.begin(), known_configs.end(), find_config)) != known_configs.end())
181 {
182 return (*found).second;
183 }
184
Jenkins4ba87db2019-05-23 17:11:51 +0100185 if(dilation != Size2D(1U, 1U))
Jenkinsb3a371b2018-05-23 11:36:53 +0100186 {
187 return ConvolutionMethod::GEMM;
188 }
189 else
190 {
Jenkins975dfe12019-09-02 11:47:54 +0100191 // SRGAN
192 if((input->dimension(idx_h) > 720U) && (output->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
193 && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
194 {
195 return ConvolutionMethod::DIRECT;
196 }
197 if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
Jenkins4ba87db2019-05-23 17:11:51 +0100198 {
199 return ConvolutionMethod::FFT;
200 }
Jenkins975dfe12019-09-02 11:47:54 +0100201 if(input->dimension(idx_c) < 16)
Jenkins4ba87db2019-05-23 17:11:51 +0100202 {
203 return ConvolutionMethod::GEMM;
204 }
Jenkinsb3a371b2018-05-23 11:36:53 +0100205 return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
206 }
Anthony Barbier871448e2017-03-24 14:54:29 +0000207}
208
209void CLConvolutionLayer::run()
210{
Jenkinsb3a371b2018-05-23 11:36:53 +0100211 prepare();
Anthony Barbier06ea0482018-02-22 15:45:35 +0000212 _function->run();
Anthony Barbier871448e2017-03-24 14:54:29 +0000213}
Jenkinsb3a371b2018-05-23 11:36:53 +0100214
215void CLConvolutionLayer::prepare()
216{
217 _function->prepare();
218}