blob: 6a2aac645777ed9182fe558cce98901b195a6f56 [file] [log] [blame]
Anthony Barbier871448e2017-03-24 14:54:29 +00001/*
Anthony Barbier06ea0482018-02-22 15:45:35 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier871448e2017-03-24 14:54:29 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Kaizen8938bd32017-09-28 14:38:23 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000027#include "arm_compute/core/Validate.h"
Anthony Barbierf45d5a92018-01-24 16:23:15 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Anthony Barbier8140e1e2017-12-14 23:48:46 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000030#include "arm_compute/runtime/CL/CLScheduler.h"
Kaizen8938bd32017-09-28 14:38:23 +010031#include "support/ToolchainSupport.h"
Anthony Barbier871448e2017-03-24 14:54:29 +000032
Anthony Barbiera4376382017-04-12 15:12:46 +010033#include <algorithm>
Anthony Barbiera4376382017-04-12 15:12:46 +010034
Anthony Barbier871448e2017-03-24 14:54:29 +000035using namespace arm_compute;
Anthony Barbierf45d5a92018-01-24 16:23:15 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
Jenkinsb3a371b2018-05-23 11:36:53 +010040Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Anthony Barbierf45d5a92018-01-24 16:23:15 +000041{
Anthony Barbierf45d5a92018-01-24 16:23:15 +000042 if(is_data_type_quantized_asymmetric(input.data_type()))
43 {
44 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
45 // Extract and negate input and weights offset
46 const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
47 const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
48
49 // Validate gemmlowp function
50 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
51 &weights.clone()->set_quantization_info(weights_quantization_info),
Jenkinsb9abeae2018-11-22 11:58:08 +000052 nullptr,
Anthony Barbierf45d5a92018-01-24 16:23:15 +000053 &output));
54 }
55 else
56 {
Jenkinsb3a371b2018-05-23 11:36:53 +010057 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Anthony Barbierf45d5a92018-01-24 16:23:15 +000058 }
59
60 return Status{};
61}
62} // namespace
Anthony Barbier871448e2017-03-24 14:54:29 +000063
Kaizen8938bd32017-09-28 14:38:23 +010064void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
65{
66 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
67 k->configure(input, output);
68 _kernel = std::move(k);
69}
70
Anthony Barbierf45d5a92018-01-24 16:23:15 +000071Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
72{
73 return CLTransposeKernel::validate(input, output);
74}
75
Kaizen8938bd32017-09-28 14:38:23 +010076CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Jenkins52ba29e2018-08-29 15:32:11 +000077 : _memory_group(memory_manager), _convert_weights(), _flatten_layer(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
78 _accumulate_biases_kernel(), _flatten_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _are_weights_converted(true), _are_weights_reshaped(true),
79 _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _is_prepared(false), _original_weights(nullptr)
Anthony Barbierdbdab852017-06-23 15:42:00 +010080{
81}
Jenkins52ba29e2018-08-29 15:32:11 +000082void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000083{
84 if(_is_quantized)
85 {
86 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
87 // Extract and negate input and weights offset
88 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
89 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
90
91 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
92 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
93
94 // Configure gemmlowp function
Jenkinsb9abeae2018-11-22 11:58:08 +000095 _mm_gemmlowp.configure(input, weights, nullptr, output);
Anthony Barbier8140e1e2017-12-14 23:48:46 +000096
97 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
98 input->info()->set_quantization_info(input_quantization_info);
99 weights->info()->set_quantization_info(weights_quantization_info);
100 }
101 else
102 {
103 // Configure matrix multiply kernel
Jenkinsb9abeae2018-11-22 11:58:08 +0000104 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */, 0, false, retain_internal_weights));
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000105 }
106}
107
Jenkins52ba29e2018-08-29 15:32:11 +0000108void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Anthony Barbierdbdab852017-06-23 15:42:00 +0100109{
Kaizen8938bd32017-09-28 14:38:23 +0100110 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
Anthony Barbierdbdab852017-06-23 15:42:00 +0100111
Anthony Barbiera4376382017-04-12 15:12:46 +0100112 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
Anthony Barbier871448e2017-03-24 14:54:29 +0000113
Jenkins52ba29e2018-08-29 15:32:11 +0000114 // Initialize output tensor for flatten
115 TensorShape shape_flatten = compute_flatten_shape(input->info());
116 _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten).set_data_layout(DataLayout::NCHW));
Anthony Barbier871448e2017-03-24 14:54:29 +0000117
Jenkins52ba29e2018-08-29 15:32:11 +0000118 // Configure flatten kernel
119 _memory_group.manage(&_flatten_output);
120 _flatten_layer.configure(input, &_flatten_output);
Anthony Barbiera4376382017-04-12 15:12:46 +0100121
Anthony Barbiera4376382017-04-12 15:12:46 +0100122 // Configure matrix multiply kernel
Jenkins52ba29e2018-08-29 15:32:11 +0000123 configure_mm(&_flatten_output, weights, output, retain_internal_weights);
Anthony Barbiera4376382017-04-12 15:12:46 +0100124
Jenkins52ba29e2018-08-29 15:32:11 +0000125 // Allocate the output tensor for flatten once all the configure methods have been called
126 _flatten_output.allocator()->allocate();
Anthony Barbiera4376382017-04-12 15:12:46 +0100127}
128
Jenkins52ba29e2018-08-29 15:32:11 +0000129void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Anthony Barbiera4376382017-04-12 15:12:46 +0100130{
131 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
132
133 // Configure matrix multiply kernel
Jenkins52ba29e2018-08-29 15:32:11 +0000134 configure_mm(input, weights, output, retain_internal_weights);
Anthony Barbiera4376382017-04-12 15:12:46 +0100135}
136
Jenkins52ba29e2018-08-29 15:32:11 +0000137void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
138 FullyConnectedLayerInfo fc_info)
Anthony Barbiera4376382017-04-12 15:12:46 +0100139{
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000140 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
141
142 // Perform validate step
143 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
144 weights->info(),
145 biases != nullptr ? biases->info() : nullptr,
146 output->info(),
Jenkins52ba29e2018-08-29 15:32:11 +0000147 fc_info));
Anthony Barbiera4376382017-04-12 15:12:46 +0100148
Jenkins52ba29e2018-08-29 15:32:11 +0000149 _are_weights_converted = true;
150 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
151 _is_fc_after_conv = true;
152 _accumulate_biases = false;
153 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
154 _is_prepared = fc_info.retain_internal_weights;
155 _original_weights = weights;
Anthony Barbiera4376382017-04-12 15:12:46 +0100156
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000157 // Configure gemmlowp output
158 if(_is_quantized)
159 {
160 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
161 }
162
163 // Configure accumulate biases kernel for non quantized asymmetric types
164 if(biases != nullptr && !_is_quantized)
Anthony Barbier871448e2017-03-24 14:54:29 +0000165 {
Anthony Barbiera4376382017-04-12 15:12:46 +0100166 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
167
168 _accumulate_biases = true;
169
170 // Configure accumulate biases kernel
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000171 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Anthony Barbiera4376382017-04-12 15:12:46 +0100172 _accumulate_biases_kernel.configure(output, biases);
Anthony Barbier871448e2017-03-24 14:54:29 +0000173 }
174
Jenkins52ba29e2018-08-29 15:32:11 +0000175 const ICLTensor *weights_to_use = weights;
176
Anthony Barbiera4376382017-04-12 15:12:46 +0100177 // With the Fully Connected layer we can have 4 different cases:
178 // 1) Convolution layer -> Fully Connected layer without batches
179 // 2) Fully Connected layer -> Fully Connected layer without batches
180 // 3) Convolution layer -> Fully Connected layer with batches
181 // 4) Fully Connected layer -> Fully Connected layer with batches
Anthony Barbier871448e2017-03-24 14:54:29 +0000182
Kaizen8938bd32017-09-28 14:38:23 +0100183 // Check if we have a fully connected layer with batches
184 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Kaizen8938bd32017-09-28 14:38:23 +0100185 if(is_batched_fc_layer)
Anthony Barbierdbdab852017-06-23 15:42:00 +0100186 {
187 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
188 input->info()->tensor_shape().cend(),
189 output->info()->tensor_shape().cbegin() + 1));
Anthony Barbiera4376382017-04-12 15:12:46 +0100190 }
191 else
192 {
Kaizen8938bd32017-09-28 14:38:23 +0100193 _is_fc_after_conv = input->info()->num_dimensions() > 1;
194 }
Anthony Barbiera4376382017-04-12 15:12:46 +0100195
Jenkins52ba29e2018-08-29 15:32:11 +0000196 // Reshape weights if needed
197 if(!_are_weights_reshaped)
198 {
199 // Reshape the weights
200 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
201 weights_to_use = &_reshape_weights_output;
202 }
203
204 // Convert weights if needed
205 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
206 {
207 // Convert weights
208 _convert_weights.configure(weights_to_use,
209 &_converted_weights_output,
210 input->info()->tensor_shape(),
211 fc_info.weights_trained_layout);
212
213 weights_to_use = &_converted_weights_output;
214 _are_weights_converted = false;
215 }
216
217 // Configure fc core
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000218 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Kaizen8938bd32017-09-28 14:38:23 +0100219 if(_is_fc_after_conv)
220 {
221 // Fully Connected layer after a Convolution Layer without batches
Jenkins52ba29e2018-08-29 15:32:11 +0000222 configure_conv_fc(input, weights_to_use, tmp_output, fc_info.retain_internal_weights);
Kaizen8938bd32017-09-28 14:38:23 +0100223 }
224 else
225 {
226 // Fully Connected layer after a Fully Connected Layer without batches
Jenkins52ba29e2018-08-29 15:32:11 +0000227 configure_fc_fc(input, weights_to_use, tmp_output, fc_info.retain_internal_weights);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000228 }
229
230 // Configure output stage for asymmetric quantized types
231 if(_is_quantized)
232 {
233 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
234 int output_multiplier, output_shift;
235 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
236 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
237 _gemmlowp_output.allocator()->allocate();
Anthony Barbiera4376382017-04-12 15:12:46 +0100238 }
Anthony Barbier871448e2017-03-24 14:54:29 +0000239}
240
Jenkins52ba29e2018-08-29 15:32:11 +0000241Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
242 FullyConnectedLayerInfo fc_info)
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000243{
244 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Jenkins52ba29e2018-08-29 15:32:11 +0000245 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000246 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
247 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
248
Jenkins52ba29e2018-08-29 15:32:11 +0000249 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000250 bool is_fc_after_conv = true;
251 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
252 const GPUTarget gpu_target = CLScheduler::get().target();
253
Jenkins52ba29e2018-08-29 15:32:11 +0000254 const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)).set_data_layout(DataLayout::NCHW));
255 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
256 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
257 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000258
259 // Configure accumulate biases kernel for non quantized asymmetric types
260 if(biases != nullptr && !is_quantized)
261 {
262 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
263 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
264 }
265
266 // With the Fully Connected layer we can have 4 different cases:
267 // 1) Convolution layer -> Fully Connected layer without batches
268 // 2) Fully Connected layer -> Fully Connected layer without batches
269 // 3) Convolution layer -> Fully Connected layer with batches
270 // 4) Fully Connected layer -> Fully Connected layer with batches
271
272 const ITensorInfo *input_to_use = input;
273 const ITensorInfo *weights_to_use = weights;
274 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
275
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000276 // Check if we have a fully connected layer with batches
277 const bool is_batched_fc_layer = output->dimension(1) > 1;
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000278 if(is_batched_fc_layer)
279 {
280 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
281 input->tensor_shape().cend(),
282 output->tensor_shape().cbegin() + 1));
283 }
284 else
285 {
286 is_fc_after_conv = input->num_dimensions() > 1;
287 }
288
Jenkins52ba29e2018-08-29 15:32:11 +0000289 if(!weights_reshaped)
290 {
291 // Validate reshape weights kernel
292 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
293 weights_to_use = &reshaped_weights;
294 }
295
296 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
297 {
298 // Validate convert weights kernel
299 ARM_COMPUTE_RETURN_ON_ERROR(CLConvertFullyConnectedWeights::validate(weights_to_use,
300 &converted_weights,
301 input->tensor_shape(),
302 fc_info.weights_trained_layout));
303 weights_to_use = &converted_weights;
304 }
305
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000306 if(is_fc_after_conv)
307 {
308 // Fully Connected layer after a Convolution Layer without batches
309 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
310
Jenkins52ba29e2018-08-29 15:32:11 +0000311 // Validate flatten kernel
312 ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayer::validate(input, &flatten_input));
313 input_to_use = &flatten_input;
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000314 }
315 else
316 {
317 // Fully Connected layer after a Fully Connected Layer without batches
318 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
319 }
320 // Validate matrix multiply kernel
Jenkinsb3a371b2018-05-23 11:36:53 +0100321 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000322
323 // Validate output stage for asymmetric quantized types
324 if(is_quantized)
325 {
326 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
327 }
328
329 return Status{};
330}
331
Anthony Barbier871448e2017-03-24 14:54:29 +0000332void CLFullyConnectedLayer::run()
333{
Jenkinsb3a371b2018-05-23 11:36:53 +0100334 prepare();
Anthony Barbiera4376382017-04-12 15:12:46 +0100335
Kaizen8938bd32017-09-28 14:38:23 +0100336 _memory_group.acquire();
337
Anthony Barbiera4376382017-04-12 15:12:46 +0100338 // Linearize input if it comes from a convolutional layer
Anthony Barbierdbdab852017-06-23 15:42:00 +0100339 if(_is_fc_after_conv)
Anthony Barbiera4376382017-04-12 15:12:46 +0100340 {
Jenkins52ba29e2018-08-29 15:32:11 +0000341 _flatten_layer.run();
Anthony Barbiera4376382017-04-12 15:12:46 +0100342 }
343
Anthony Barbiera4376382017-04-12 15:12:46 +0100344 // Run matrix multiply
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000345 if(_is_quantized)
346 {
347 _mm_gemmlowp.run();
348 }
349 else
350 {
Jenkinsb3a371b2018-05-23 11:36:53 +0100351 _mm_gemm.run();
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000352 }
Anthony Barbiera4376382017-04-12 15:12:46 +0100353
354 // Accumulate biases if provided
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000355 if(_is_quantized)
Anthony Barbiera4376382017-04-12 15:12:46 +0100356 {
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000357 _gemmlowp_output_stage.run();
358 }
359 else
360 {
361 if(_accumulate_biases)
362 {
363 CLScheduler::get().enqueue(_accumulate_biases_kernel);
364 }
Anthony Barbiera4376382017-04-12 15:12:46 +0100365 }
Kaizen8938bd32017-09-28 14:38:23 +0100366
367 _memory_group.release();
Anthony Barbier871448e2017-03-24 14:54:29 +0000368}
Jenkinsb3a371b2018-05-23 11:36:53 +0100369
370void CLFullyConnectedLayer::prepare()
371{
Jenkins52ba29e2018-08-29 15:32:11 +0000372 if(!_is_prepared)
Jenkinsb3a371b2018-05-23 11:36:53 +0100373 {
374 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
375
Jenkins52ba29e2018-08-29 15:32:11 +0000376 auto release_unused = [](CLTensor * w)
377 {
378 if(!w->is_used())
379 {
380 CLScheduler::get().queue().finish();
381 w->allocator()->free();
382 }
383 };
384
385 // Pointer to current weights
386 const ICLTensor *cur_weights = _original_weights;
387
388 // Reshape of the weights if needed (happens only once)
389 if(!_are_weights_reshaped)
390 {
391 // Run reshape weights kernel and mark weights as unused
392 _reshape_weights_output.allocator()->allocate();
393 _reshape_weights_kernel.run();
394
395 cur_weights->mark_as_unused();
396 cur_weights = &_reshape_weights_output;
397 _are_weights_reshaped = true;
398 }
399
400 // Convert weights if needed (happens only once)
401 if(!_are_weights_converted)
402 {
403 _converted_weights_output.allocator()->allocate();
404 _convert_weights.run();
405
406 cur_weights->mark_as_unused();
407 _are_weights_converted = true;
408 }
409
410 // Release reshaped weights if unused
411 release_unused(&_reshape_weights_output);
Jenkinsb3a371b2018-05-23 11:36:53 +0100412
413 // Prepare GEMM prepare and release unused weights
414 if(!_is_quantized)
415 {
416 _mm_gemm.prepare();
Jenkinsb3a371b2018-05-23 11:36:53 +0100417 }
418
Jenkins52ba29e2018-08-29 15:32:11 +0000419 // Release converted weights if unused
420 release_unused(&_reshape_weights_output);
421 release_unused(&_converted_weights_output);
422
423 _is_prepared = true;
Jenkinsb3a371b2018-05-23 11:36:53 +0100424 }
425}