blob: 151fa1b5fa417a7e3944c77f468dc6be4ef1304f [file] [log] [blame]
Anthony Barbier871448e2017-03-24 14:54:29 +00001/*
Anthony Barbier06ea0482018-02-22 15:45:35 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier871448e2017-03-24 14:54:29 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Kaizen8938bd32017-09-28 14:38:23 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000027#include "arm_compute/core/Validate.h"
Anthony Barbierf45d5a92018-01-24 16:23:15 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Anthony Barbier8140e1e2017-12-14 23:48:46 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000030#include "arm_compute/runtime/CL/CLScheduler.h"
Kaizen8938bd32017-09-28 14:38:23 +010031#include "support/ToolchainSupport.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000032
Anthony Barbiera4376382017-04-12 15:12:46 +010033#include <algorithm>
Anthony Barbiera4376382017-04-12 15:12:46 +010034
Anthony Barbier871448e2017-03-24 14:54:29 +000035using namespace arm_compute;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
Jenkinsb3a371b2018-05-23 11:36:53 +010040Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Anthony Barbierf45d5a92018-01-24 16:23:15 +000041{
Anthony Barbierf45d5a92018-01-24 16:23:15 +000042 if(is_data_type_quantized_asymmetric(input.data_type()))
43 {
44 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
45 // Extract and negate input and weights offset
46 const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
47 const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
48
49 // Validate gemmlowp function
50 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
51 &weights.clone()->set_quantization_info(weights_quantization_info),
52 &output));
53 }
54 else
55 {
Jenkinsb3a371b2018-05-23 11:36:53 +010056 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Anthony Barbierf45d5a92018-01-24 16:23:15 +000057 }
58
59 return Status{};
60}
61} // namespace
Anthony Barbier871448e2017-03-24 14:54:29 +000062
Kaizen8938bd32017-09-28 14:38:23 +010063void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
64{
65 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
66 k->configure(input, output);
67 _kernel = std::move(k);
68}
69
Anthony Barbierf45d5a92018-01-24 16:23:15 +000070Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
71{
72 return CLTransposeKernel::validate(input, output);
73}
74
Kaizen8938bd32017-09-28 14:38:23 +010075CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Jenkinsb3a371b2018-05-23 11:36:53 +010076 : _memory_group(memory_manager), _im2col_kernel(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _accumulate_biases_kernel(),
77 _im2col_output(), _gemmlowp_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _original_weights(nullptr)
Anthony Barbierdbdab852017-06-23 15:42:00 +010078{
79}
80
Jenkinsb3a371b2018-05-23 11:36:53 +010081void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000082{
83 if(_is_quantized)
84 {
85 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
86 // Extract and negate input and weights offset
87 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
88 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
89
90 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
91 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
92
93 // Configure gemmlowp function
94 _mm_gemmlowp.configure(input, weights, output);
95
96 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
97 input->info()->set_quantization_info(input_quantization_info);
98 weights->info()->set_quantization_info(weights_quantization_info);
99 }
100 else
101 {
102 // Configure matrix multiply kernel
Jenkinsb3a371b2018-05-23 11:36:53 +0100103 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */));
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000104 }
105}
106
Kaizen8938bd32017-09-28 14:38:23 +0100107void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Anthony Barbierdbdab852017-06-23 15:42:00 +0100108{
Kaizen8938bd32017-09-28 14:38:23 +0100109 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
Anthony Barbierdbdab852017-06-23 15:42:00 +0100110
Anthony Barbiera4376382017-04-12 15:12:46 +0100111 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
Anthony Barbier871448e2017-03-24 14:54:29 +0000112
Anthony Barbiera4376382017-04-12 15:12:46 +0100113 // Initialize output tensor for im2col
Jenkinsb3a371b2018-05-23 11:36:53 +0100114 TensorShape shape_im2col = compute_im2col_fc_shape(input->info());
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000115 _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col));
Anthony Barbier871448e2017-03-24 14:54:29 +0000116
Anthony Barbiera4376382017-04-12 15:12:46 +0100117 // Configure im2col kernel
Kaizen8938bd32017-09-28 14:38:23 +0100118 _memory_group.manage(&_im2col_output);
119 _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
Anthony Barbiera4376382017-04-12 15:12:46 +0100120
Anthony Barbiera4376382017-04-12 15:12:46 +0100121 // Configure matrix multiply kernel
Jenkinsb3a371b2018-05-23 11:36:53 +0100122 configure_mm(&_im2col_output, weights, output);
Anthony Barbiera4376382017-04-12 15:12:46 +0100123
124 // Allocate the output tensor for im2col once all the configure methods have been called
125 _im2col_output.allocator()->allocate();
126}
127
Kaizen8938bd32017-09-28 14:38:23 +0100128void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Anthony Barbiera4376382017-04-12 15:12:46 +0100129{
130 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
131
132 // Configure matrix multiply kernel
Jenkinsb3a371b2018-05-23 11:36:53 +0100133 configure_mm(input, weights, output);
Anthony Barbiera4376382017-04-12 15:12:46 +0100134}
135
Anthony Barbierdbdab852017-06-23 15:42:00 +0100136void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights, bool are_weights_reshaped)
Anthony Barbiera4376382017-04-12 15:12:46 +0100137{
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000138 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
139
140 // Perform validate step
141 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
142 weights->info(),
143 biases != nullptr ? biases->info() : nullptr,
144 output->info(),
145 transpose_weights,
146 are_weights_reshaped));
Anthony Barbiera4376382017-04-12 15:12:46 +0100147
Kaizen8938bd32017-09-28 14:38:23 +0100148 _are_weights_reshaped = transpose_weights ? are_weights_reshaped : true;
Anthony Barbierdbdab852017-06-23 15:42:00 +0100149 _is_fc_after_conv = true;
Anthony Barbierdbdab852017-06-23 15:42:00 +0100150 _accumulate_biases = false;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000151 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Jenkinsb3a371b2018-05-23 11:36:53 +0100152 _original_weights = weights;
Anthony Barbiera4376382017-04-12 15:12:46 +0100153
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000154 // Configure gemmlowp output
155 if(_is_quantized)
156 {
157 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
158 }
159
160 // Configure accumulate biases kernel for non quantized asymmetric types
161 if(biases != nullptr && !_is_quantized)
Anthony Barbier871448e2017-03-24 14:54:29 +0000162 {
Anthony Barbiera4376382017-04-12 15:12:46 +0100163 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
164
165 _accumulate_biases = true;
166
167 // Configure accumulate biases kernel
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000168 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Anthony Barbiera4376382017-04-12 15:12:46 +0100169 _accumulate_biases_kernel.configure(output, biases);
Anthony Barbier871448e2017-03-24 14:54:29 +0000170 }
171
Anthony Barbiera4376382017-04-12 15:12:46 +0100172 // With the Fully Connected layer we can have 4 different cases:
173 // 1) Convolution layer -> Fully Connected layer without batches
174 // 2) Fully Connected layer -> Fully Connected layer without batches
175 // 3) Convolution layer -> Fully Connected layer with batches
176 // 4) Fully Connected layer -> Fully Connected layer with batches
Anthony Barbier871448e2017-03-24 14:54:29 +0000177
Anthony Barbierdbdab852017-06-23 15:42:00 +0100178 const ICLTensor *weights_to_use = weights;
179
Kaizen8938bd32017-09-28 14:38:23 +0100180 if(!_are_weights_reshaped)
Anthony Barbier871448e2017-03-24 14:54:29 +0000181 {
Kaizen8938bd32017-09-28 14:38:23 +0100182 weights_to_use = &_reshape_weights_output;
Anthony Barbiera4376382017-04-12 15:12:46 +0100183
Kaizen8938bd32017-09-28 14:38:23 +0100184 // Reshape the weights
185 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
Anthony Barbierdbdab852017-06-23 15:42:00 +0100186 }
187
Kaizen8938bd32017-09-28 14:38:23 +0100188 // Check if we have a fully connected layer with batches
189 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
190
191 if(is_batched_fc_layer)
Anthony Barbierdbdab852017-06-23 15:42:00 +0100192 {
193 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
194 input->info()->tensor_shape().cend(),
195 output->info()->tensor_shape().cbegin() + 1));
Anthony Barbiera4376382017-04-12 15:12:46 +0100196 }
197 else
198 {
Kaizen8938bd32017-09-28 14:38:23 +0100199 _is_fc_after_conv = input->info()->num_dimensions() > 1;
200 }
Anthony Barbiera4376382017-04-12 15:12:46 +0100201
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000202 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Kaizen8938bd32017-09-28 14:38:23 +0100203 if(_is_fc_after_conv)
204 {
205 // Fully Connected layer after a Convolution Layer without batches
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000206 configure_conv_fc(input, weights_to_use, tmp_output);
Kaizen8938bd32017-09-28 14:38:23 +0100207 }
208 else
209 {
210 // Fully Connected layer after a Fully Connected Layer without batches
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000211 configure_fc_fc(input, weights_to_use, tmp_output);
212 }
213
214 // Configure output stage for asymmetric quantized types
215 if(_is_quantized)
216 {
217 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
218 int output_multiplier, output_shift;
219 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
220 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
221 _gemmlowp_output.allocator()->allocate();
Anthony Barbiera4376382017-04-12 15:12:46 +0100222 }
Anthony Barbier871448e2017-03-24 14:54:29 +0000223}
224
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000225Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights, bool are_weights_reshaped)
226{
227 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
228 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
229 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
230 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
231
232 bool weights_reshaped = transpose_weights ? are_weights_reshaped : true;
233 bool is_fc_after_conv = true;
234 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
235 const GPUTarget gpu_target = CLScheduler::get().target();
236
Jenkinsb3a371b2018-05-23 11:36:53 +0100237 const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_im2col_fc_shape(input)));
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000238 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
239 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
240
241 // Configure accumulate biases kernel for non quantized asymmetric types
242 if(biases != nullptr && !is_quantized)
243 {
244 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
245 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
246 }
247
248 // With the Fully Connected layer we can have 4 different cases:
249 // 1) Convolution layer -> Fully Connected layer without batches
250 // 2) Fully Connected layer -> Fully Connected layer without batches
251 // 3) Convolution layer -> Fully Connected layer with batches
252 // 4) Fully Connected layer -> Fully Connected layer with batches
253
254 const ITensorInfo *input_to_use = input;
255 const ITensorInfo *weights_to_use = weights;
256 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
257
258 if(!weights_reshaped)
259 {
260 // Validate reshape weights kernel
261 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
262 weights_to_use = &reshaped_weights;
263 }
264
265 // Check if we have a fully connected layer with batches
266 const bool is_batched_fc_layer = output->dimension(1) > 1;
267
268 if(is_batched_fc_layer)
269 {
270 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
271 input->tensor_shape().cend(),
272 output->tensor_shape().cbegin() + 1));
273 }
274 else
275 {
276 is_fc_after_conv = input->num_dimensions() > 1;
277 }
278
279 if(is_fc_after_conv)
280 {
281 // Fully Connected layer after a Convolution Layer without batches
282 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
283
284 // Validate im2col kernel
285 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_input, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false));
286 input_to_use = &im2col_input;
287 }
288 else
289 {
290 // Fully Connected layer after a Fully Connected Layer without batches
291 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
292 }
293 // Validate matrix multiply kernel
Jenkinsb3a371b2018-05-23 11:36:53 +0100294 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000295
296 // Validate output stage for asymmetric quantized types
297 if(is_quantized)
298 {
299 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
300 }
301
302 return Status{};
303}
304
Anthony Barbier871448e2017-03-24 14:54:29 +0000305void CLFullyConnectedLayer::run()
306{
Jenkinsb3a371b2018-05-23 11:36:53 +0100307 prepare();
Anthony Barbiera4376382017-04-12 15:12:46 +0100308
Kaizen8938bd32017-09-28 14:38:23 +0100309 _memory_group.acquire();
310
Anthony Barbiera4376382017-04-12 15:12:46 +0100311 // Linearize input if it comes from a convolutional layer
Anthony Barbierdbdab852017-06-23 15:42:00 +0100312 if(_is_fc_after_conv)
Anthony Barbiera4376382017-04-12 15:12:46 +0100313 {
314 CLScheduler::get().enqueue(_im2col_kernel, false);
315 }
316
Anthony Barbiera4376382017-04-12 15:12:46 +0100317 // Run matrix multiply
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000318 if(_is_quantized)
319 {
320 _mm_gemmlowp.run();
321 }
322 else
323 {
Jenkinsb3a371b2018-05-23 11:36:53 +0100324 _mm_gemm.run();
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000325 }
Anthony Barbiera4376382017-04-12 15:12:46 +0100326
327 // Accumulate biases if provided
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000328 if(_is_quantized)
Anthony Barbiera4376382017-04-12 15:12:46 +0100329 {
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000330 _gemmlowp_output_stage.run();
331 }
332 else
333 {
334 if(_accumulate_biases)
335 {
336 CLScheduler::get().enqueue(_accumulate_biases_kernel);
337 }
Anthony Barbiera4376382017-04-12 15:12:46 +0100338 }
Kaizen8938bd32017-09-28 14:38:23 +0100339
340 _memory_group.release();
Anthony Barbier871448e2017-03-24 14:54:29 +0000341}
Jenkinsb3a371b2018-05-23 11:36:53 +0100342
343void CLFullyConnectedLayer::prepare()
344{
345 // Reshape of the weights (happens only once)
346 if(!_are_weights_reshaped)
347 {
348 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
349
350 // Run reshape weights kernel and mark weights as unused
351 _reshape_weights_output.allocator()->allocate();
352 _reshape_weights_kernel.run();
353 _original_weights->mark_as_unused();
354
355 // Prepare GEMM prepare and release unused weights
356 if(!_is_quantized)
357 {
358 _mm_gemm.prepare();
359 if(!_reshape_weights_output.is_used())
360 {
361 _reshape_weights_output.allocator()->free();
362 }
363 }
364
365 CLScheduler::get().queue().finish();
366 _are_weights_reshaped = true;
367 }
368}