blob: 0014e717340cf3c5755dba451e169110554fc818 [file] [log] [blame]
Anthony Barbier871448e2017-03-24 14:54:29 +00001/*
Anthony Barbierf45d5a92018-01-24 16:23:15 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier871448e2017-03-24 14:54:29 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Anthony Barbier8140e1e2017-12-14 23:48:46 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000031#include "arm_compute/runtime/CL/CLScheduler.h"
32
33#include <cmath>
Kaizen8938bd32017-09-28 14:38:23 +010034#include <memory>
Anthony Barbier871448e2017-03-24 14:54:29 +000035#include <tuple>
36
37using namespace arm_compute;
Anthony Barbier06ea0482018-02-22 15:45:35 +000038using namespace arm_compute::misc::shape_calculator;
Anthony Barbierdbdab852017-06-23 15:42:00 +010039
Kaizen8938bd32017-09-28 14:38:23 +010040CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Anthony Barbier06ea0482018-02-22 15:45:35 +000041 : _memory_manager(std::move(memory_manager)), _function()
Anthony Barbierdbdab852017-06-23 15:42:00 +010042{
43}
44
Jenkinsb3a371b2018-05-23 11:36:53 +010045void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Jenkins52ba29e2018-08-29 15:32:11 +000046 const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000047{
Anthony Barbier06ea0482018-02-22 15:45:35 +000048 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Jenkinsb3a371b2018-05-23 11:36:53 +010049 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info,
Jenkins52ba29e2018-08-29 15:32:11 +000050 enable_fast_math, num_groups));
Anthony Barbier06ea0482018-02-22 15:45:35 +000051
Jenkinsb3a371b2018-05-23 11:36:53 +010052 switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), output->info(), conv_info,
53 weights_info, act_info, CLScheduler::get().target(), dilation, enable_fast_math))
Anthony Barbier8140e1e2017-12-14 23:48:46 +000054 {
Jenkinsb3a371b2018-05-23 11:36:53 +010055 case ConvolutionMethod::WINOGRAD:
56 {
Jenkins52ba29e2018-08-29 15:32:11 +000057 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Jenkinsb3a371b2018-05-23 11:36:53 +010058 auto f = arm_compute::support::cpp14::make_unique<CLWinogradConvolutionLayer>(_memory_manager);
59 f->configure(input, weights, biases, output, conv_info, act_info, enable_fast_math);
60 _function = std::move(f);
61 break;
62 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000063 case ConvolutionMethod::DIRECT:
Anthony Barbierf45d5a92018-01-24 16:23:15 +000064 {
Jenkins52ba29e2018-08-29 15:32:11 +000065 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Anthony Barbier06ea0482018-02-22 15:45:35 +000066 auto f = arm_compute::support::cpp14::make_unique<CLDirectConvolutionLayer>();
Jenkinsb3a371b2018-05-23 11:36:53 +010067 f->configure(input, weights, biases, output, conv_info, act_info);
Anthony Barbier06ea0482018-02-22 15:45:35 +000068 _function = std::move(f);
69 break;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000070 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000071 case ConvolutionMethod::GEMM:
Anthony Barbierf45d5a92018-01-24 16:23:15 +000072 {
Anthony Barbier06ea0482018-02-22 15:45:35 +000073 auto f = arm_compute::support::cpp14::make_unique<CLGEMMConvolutionLayer>(_memory_manager);
Jenkins52ba29e2018-08-29 15:32:11 +000074 f->configure(input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
Anthony Barbier06ea0482018-02-22 15:45:35 +000075 _function = std::move(f);
76 break;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000077 }
Anthony Barbier06ea0482018-02-22 15:45:35 +000078 default:
79 ARM_COMPUTE_ERROR("Not supported.");
80 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +000081 }
82}
83
Anthony Barbier06ea0482018-02-22 15:45:35 +000084Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Jenkins52ba29e2018-08-29 15:32:11 +000085 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Anthony Barbier871448e2017-03-24 14:54:29 +000086{
Anthony Barbier06ea0482018-02-22 15:45:35 +000087 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Jenkins52ba29e2018-08-29 15:32:11 +000088 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
Anthony Barbier8140e1e2017-12-14 23:48:46 +000089
Anthony Barbier06ea0482018-02-22 15:45:35 +000090 const GPUTarget gpu_target = CLScheduler::get().target();
Anthony Barbier871448e2017-03-24 14:54:29 +000091
Jenkinsb3a371b2018-05-23 11:36:53 +010092 switch(CLConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, act_info, gpu_target, dilation, enable_fast_math))
Anthony Barbier871448e2017-03-24 14:54:29 +000093 {
Jenkinsb3a371b2018-05-23 11:36:53 +010094 case ConvolutionMethod::WINOGRAD:
95 {
96 //Validate Winograd
Jenkins52ba29e2018-08-29 15:32:11 +000097 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLWinogradConvolutionLayer is not supported");
Jenkinsb3a371b2018-05-23 11:36:53 +010098 ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math));
99 break;
100 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000101 case ConvolutionMethod::DIRECT:
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000102 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000103 // Validate direct convolution layer
Jenkins52ba29e2018-08-29 15:32:11 +0000104 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLDirectConvolutionLayer is not supported");
Jenkinsb3a371b2018-05-23 11:36:53 +0100105 ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info));
Anthony Barbier06ea0482018-02-22 15:45:35 +0000106 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000107 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000108 case ConvolutionMethod::GEMM:
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000109 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000110 // Validate gemm-based convolution layer
Jenkins52ba29e2018-08-29 15:32:11 +0000111 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups));
Anthony Barbier06ea0482018-02-22 15:45:35 +0000112 break;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000113 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000114 default:
115 ARM_COMPUTE_ERROR("Not supported.");
116 break;
Anthony Barbier871448e2017-03-24 14:54:29 +0000117 }
118
Anthony Barbier06ea0482018-02-22 15:45:35 +0000119 return Status{};
120}
Kaizen8938bd32017-09-28 14:38:23 +0100121
Jenkinsb3a371b2018-05-23 11:36:53 +0100122ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info,
123 const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation, bool enable_fast_math)
Anthony Barbier06ea0482018-02-22 15:45:35 +0000124{
Jenkinsb3a371b2018-05-23 11:36:53 +0100125 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
126 ARM_COMPUTE_ERROR_ON_NULLPTR(output);
127 ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000128 ARM_COMPUTE_UNUSED(weights_info);
129 ARM_COMPUTE_UNUSED(gpu_target);
Kaizen8938bd32017-09-28 14:38:23 +0100130
Jenkins52ba29e2018-08-29 15:32:11 +0000131 const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
132 const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
Jenkinsb3a371b2018-05-23 11:36:53 +0100133 const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
134
Jenkins52ba29e2018-08-29 15:32:11 +0000135 /* Input spatial dims, kernel size, IFM/OFM, conv info*/
136 using ConvolutionConfiguration = std::tuple<Size2D, Size2D, Size2D, PadStrideInfo, DataLayout>;
137 using ConfigurationMethod = std::pair<ConvolutionConfiguration, ConvolutionMethod>;
138
139 const std::vector<ConfigurationMethod> known_configs =
140 {
141 // Alexnet
142 ConfigurationMethod(ConvolutionConfiguration(Size2D(27U, 27U), Size2D(5U, 5U), Size2D(48U, 128U), PadStrideInfo(1U, 1U, 2U, 2U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
143 // VGG16 / VGG19
144 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 64U), PadStrideInfo(1U, 1U, 1U, 1U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
145 // Mobilenet 224
146 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
147 // Mobilenet 160
148 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
149 // Mobilenet 224
150 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
151 // Mobilenet 160
152 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
153 };
154
155 const auto find_config = [&](ConfigurationMethod c)
156 {
157 const ConvolutionConfiguration config = c.first;
158 const PadStrideInfo info = std::get<3>(config);
159 const DataLayout data_layout = std::get<4>(config);
160
161 return std::get<0>(config) == Size2D(input->dimension(idx_w), input->dimension(idx_h)) && std::get<1>(config) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h))
162 && std::get<2>(config) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == conv_info.pad_top() && info.pad_right() == conv_info.pad_right()
163 && info.pad_bottom() == conv_info.pad_bottom() && info.pad_left() == conv_info.pad_left() && info.stride() == conv_info.stride() && (data_layout == input->data_layout());
164 };
165
166 std::vector<ConfigurationMethod>::const_iterator found;
167 if((found = std::find_if(known_configs.begin(), known_configs.end(), find_config)) != known_configs.end())
168 {
169 return (*found).second;
170 }
171
Jenkinsb3a371b2018-05-23 11:36:53 +0100172 if(dilation != Size2D(1U, 1U) || (input->dimension(idx_c) < 16))
173 {
174 return ConvolutionMethod::GEMM;
175 }
176 else
177 {
178 return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
179 }
Anthony Barbier871448e2017-03-24 14:54:29 +0000180}
181
182void CLConvolutionLayer::run()
183{
Jenkinsb3a371b2018-05-23 11:36:53 +0100184 prepare();
Anthony Barbier06ea0482018-02-22 15:45:35 +0000185 _function->run();
Anthony Barbier871448e2017-03-24 14:54:29 +0000186}
Jenkinsb3a371b2018-05-23 11:36:53 +0100187
188void CLConvolutionLayer::prepare()
189{
190 _function->prepare();
191}