blob: 4671be5b61010d1d3d02e80c756c9e57574d98d0 [file] [log] [blame]
Jenkins4ba87db2019-05-23 17:11:51 +01001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h"
25
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/Validate.h"
28#include "arm_compute/core/utils/misc/ShapeCalculator.h"
29#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "utils/TypePrinter.h"
32
33#include <memory>
34#include <tuple>
35
36namespace arm_compute
37{
38namespace
39{
40std::pair<Coordinates, Coordinates> compute_start_end_slice_coordinates(const ITensorInfo &output_info, const PadStrideInfo &deconv_info, bool is_nchw)
41{
42 Coordinates start;
43 Coordinates end;
44
45 if(is_nchw)
46 {
47 start.set(0, deconv_info.pad_left());
48 start.set(1, deconv_info.pad_top());
49 end.set(0, output_info.dimension(0) - deconv_info.pad_right());
50 end.set(1, output_info.dimension(1) - deconv_info.pad_bottom());
51 }
52 else
53 {
54 start.set(0, 0);
55 start.set(1, deconv_info.pad_left());
56 start.set(2, deconv_info.pad_top());
57
58 end.set(0, output_info.dimension(0));
59 end.set(1, output_info.dimension(1) - deconv_info.pad_right());
60 end.set(2, output_info.dimension(2) - deconv_info.pad_bottom());
61 }
62
63 return { start, end };
64}
65} // namespace
66
67CLGEMMDeconvolutionLayer::CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
68 : _memory_group(std::move(memory_manager)),
69 _mm_gemm(),
70 _mm_gemmlowp(),
71 _gemmlowp_output_stage(),
72 _permute_input_to_nhwc(),
73 _permute_weights_to_nhwc(),
74 _reshape_weights(),
75 _transpose_weights(),
76 _deconv_reshape(),
77 _slice_gemm(),
78 _gemmlowp_final(),
79 _reshaped_weights(),
80 _reshaped_weights_t(),
81 _permuted_input(),
82 _permuted_weights(),
83 _gemm_output(),
84 _slice_gemm_input(),
85 _original_weights(),
86 _is_prepared(false),
87 _padded_input(false),
88 _is_nchw(false),
89 _is_quantized(false)
90{
91}
92
93Status CLGEMMDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &deconv_info)
94{
95 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
96 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8);
97 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
98 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
99
100 DataLayout data_layout = input->data_layout();
101 const bool padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
102 const bool is_nchw = input->data_layout() == DataLayout::NCHW;
103 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
104
105 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
106 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
107 const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
108
109 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != deconv_info.stride().first);
110 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) != deconv_info.stride().second);
111
112 TensorShape nhwc_weights_shape = weights->tensor_shape();
113 TensorShape nhwc_input_shape = input->tensor_shape();
114
115 if(is_nchw)
116 {
117 permute(nhwc_weights_shape, PermutationVector(2, 0, 1));
118 permute(nhwc_input_shape, PermutationVector(2, 0, 1));
119
120 TensorInfo nhwc_input_info = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_input_shape).set_data_layout(DataLayout::NCHW);
121
122 TensorInfo nhwc_weights_info = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_weights_shape).set_data_layout(DataLayout::NCHW);
123
124 CLPermute::validate(weights, &nhwc_weights_info, PermutationVector(2, 0, 1));
125 CLPermute::validate(input, &nhwc_input_info, PermutationVector(2, 0, 1));
126 }
127
128 const TensorShape reshaped_shape = TensorShape(nhwc_weights_shape[0], nhwc_weights_shape[1] * nhwc_weights_shape[2] * nhwc_weights_shape[3]);
129 const TensorInfo reshaped_info = weights->clone()->set_tensor_shape(reshaped_shape).set_data_layout(DataLayout::NCHW).set_is_resizable(true);
130 ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(weights, &reshaped_info));
131
132 TensorShape transposed_shape(reshaped_shape[1], reshaped_shape[0]);
133 const TensorInfo reshaped_t_info = reshaped_info.clone()->set_is_resizable(true).set_tensor_shape(transposed_shape);
134 ARM_COMPUTE_RETURN_ON_ERROR(CLTranspose::validate(&reshaped_info, &reshaped_t_info));
135
136 TensorShape gemm_output_shape(weights->dimension(idx_w) * weights->dimension(idx_h) * weights->dimension(idx_b),
137 input->dimension(idx_w),
138 input->dimension(idx_h),
139 input->dimension(idx_b));
140
141 TensorInfo gemm_output_info = reshaped_t_info.clone()->set_tensor_shape(gemm_output_shape).set_is_resizable(true);
142 GEMMInfo gemm_info(false, false, true, input->dimension(idx_h), true);
143
144 if(is_quantized)
145 {
146 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_tensor_shape(nhwc_input_shape), &reshaped_t_info, nullptr, &gemm_output_info.set_data_type(DataType::S32),
147 gemm_info));
148 }
149 else
150 {
151 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input->clone()->set_tensor_shape(nhwc_input_shape).set_is_resizable(true), &reshaped_t_info, nullptr, &gemm_output_info, 1.0f, 0.0f, gemm_info));
152 }
153
Jenkins0e205f72019-11-28 16:53:35 +0000154 const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second);
155 auto out_dims = deconvolution_output_dimensions(input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), weights->dimension(idx_h), stride_info);
Jenkins4ba87db2019-05-23 17:11:51 +0100156 const TensorShape deconv_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights);
157 TensorInfo col2im_output_info = gemm_output_info.clone()->set_tensor_shape(deconv_shape).set_is_resizable(true);
158
159 if(padded_input && is_quantized)
160 {
161 const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
162 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
163 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&col2im_output_info, nullptr,
164 &col2im_output_info.clone()->set_is_resizable(true).set_data_type(DataType::QASYMM8)));
165 ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info.clone()->set_is_resizable(true).set_data_type(DataType::QASYMM8), output, start_end.first, start_end.second));
166 }
167 else if(padded_input)
168 {
169 const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
170 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
171 ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info, output, start_end.first, start_end.second));
172 }
173 else if(is_quantized)
174 {
175 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
176 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&col2im_output_info, nullptr, output));
177 }
178 else
179 {
180 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, output, input, weights, deconv_info));
181 }
182
183 return Status{};
184}
185
186void CLGEMMDeconvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info)
187{
188 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
189 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMDeconvolutionLayer::validate(input->info(),
190 weights->info(),
191 bias != nullptr ? bias->info() : nullptr,
192 output->info(),
193 deconv_info));
194
195 _original_weights = weights;
196 _padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
197 _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
198 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
199
200 const ICLTensor *input_to_use = input;
201 const ICLTensor *weights_to_use = weights;
202
203 // If the data layout is NCHW, transform everything in NHWC. Another alternative could be to
204 // do an outer product in NCHW and then an accumulation through a reduction. This would have two
205 // drawbacks: first, the outer product is less efficient than a full GEMM. Second, the reduction
206 // might be slower than GEMM.
207 if(_is_nchw)
208 {
209 _memory_group.manage(&_permuted_input);
210 _permute_input_to_nhwc.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U));
211
212 _permute_weights_to_nhwc.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
213
214 input_to_use = &_permuted_input;
215 weights_to_use = &_permuted_weights;
216 }
217
218 // Reshape the input weights. The weights will be reshaped only once during the call to prepare()
219 _reshaped_weights.allocator()->init(TensorInfo(TensorShape(weights_to_use->info()->dimension(0),
220 weights_to_use->info()->dimension(1) * weights_to_use->info()->dimension(2) * weights_to_use->info()->dimension(3)),
221 1,
222 input->info()->data_type(), weights->info()->quantization_info()));
223
224 _reshape_weights.configure(weights_to_use, &_reshaped_weights);
225 _transpose_weights.configure(&_reshaped_weights, &_reshaped_weights_t);
226
227 const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
228 GEMMInfo gemm_info(false, false, true, input->info()->dimension(idx_h), true);
229
230 // Configure output stage for asymmetric quantized types
231 if(_is_quantized)
232 {
233 _mm_gemmlowp.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, gemm_info);
234 }
235 else
236 {
237 _mm_gemm.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, 1.f, 0.0f, gemm_info);
238 }
239
240 if(_is_nchw)
241 {
242 _permuted_input.allocator()->allocate();
243 }
244
245 ICLTensor *deconv_reshape_output = nullptr;
246 ICLTensor *slice_output = nullptr;
247 ICLTensor *output_stage_output = nullptr;
248
249 if(_padded_input && _is_quantized)
250 {
251 _memory_group.manage(&_slice_gemm_input);
252 _memory_group.manage(&_gemmlowp_final);
253 deconv_reshape_output = &_gemmlowp_final;
254 output_stage_output = &_slice_gemm_input;
255 slice_output = output;
256 }
257 else if(_padded_input)
258 {
259 _memory_group.manage(&_slice_gemm_input);
260 deconv_reshape_output = &_slice_gemm_input;
261 slice_output = output;
262 }
263 else if(_is_quantized)
264 {
265 _memory_group.manage(&_gemmlowp_final);
266 deconv_reshape_output = &_gemmlowp_final;
267 output_stage_output = output;
268 }
269 else
270 {
271 deconv_reshape_output = output;
272 }
273
274 // Configure a Col2Im call to reshape the output of GEMM
275 _deconv_reshape.configure(&_gemm_output, bias, deconv_reshape_output, input->info(), weights->info(), deconv_info);
276 _gemm_output.allocator()->allocate();
277
278 if(_is_quantized)
279 {
Jenkins975dfe12019-09-02 11:47:54 +0100280 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
281 const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
Jenkins0e205f72019-11-28 16:53:35 +0000282 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
Jenkins975dfe12019-09-02 11:47:54 +0100283
284 float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
Jenkins4ba87db2019-05-23 17:11:51 +0100285 int output_multiplier(0);
286 int output_shift(0);
287 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Jenkins975dfe12019-09-02 11:47:54 +0100288 _gemmlowp_output_stage.configure(&_gemmlowp_final, nullptr, output_stage_output, output_multiplier, output_shift, oq_info.offset);
Jenkins4ba87db2019-05-23 17:11:51 +0100289 _gemmlowp_final.allocator()->allocate();
290 }
291
292 // If the input was padded, the output needs to be sliced.
293 if(_padded_input)
294 {
295 const auto start_end = compute_start_end_slice_coordinates(*deconv_reshape_output->info(), deconv_info, _is_nchw);
296 _slice_gemm.configure(&_slice_gemm_input, slice_output, start_end.first, start_end.second);
297 _slice_gemm_input.allocator()->allocate();
298 }
299}
300
301void CLGEMMDeconvolutionLayer::run()
302{
303 prepare();
304
305 MemoryGroupResourceScope scope_mg(_memory_group);
306
307 if(_is_nchw)
308 {
309 _permute_input_to_nhwc.run();
310 }
311
312 if(_is_quantized)
313 {
314 _mm_gemmlowp.run();
315 }
316 else
317 {
318 _mm_gemm.run();
319 }
320
321 CLScheduler::get().enqueue(_deconv_reshape, false);
322
323 if(_is_quantized)
324 {
325 _gemmlowp_output_stage.run();
326 }
327
328 if(_padded_input)
329 {
330 _slice_gemm.run();
331 }
332}
333
334void CLGEMMDeconvolutionLayer::prepare()
335{
336 if(!_is_prepared)
337 {
338 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
339
340 if(_is_nchw)
341 {
342 _permuted_weights.allocator()->allocate();
343 _permute_weights_to_nhwc.run();
344 }
345
346 _reshaped_weights.allocator()->allocate();
347 _reshape_weights.run();
348
349 if(_is_nchw)
350 {
351 _permuted_weights.allocator()->free();
352 }
353
354 _reshaped_weights_t.allocator()->allocate();
355 _transpose_weights.run();
356
357 // Prepare gemm
358 if(!_is_quantized)
359 {
360 _mm_gemm.prepare();
361 }
362 else
363 {
364 _mm_gemmlowp.prepare();
365 }
366
367 // Free resources
368 if(!_reshaped_weights_t.is_used())
369 {
370 _reshaped_weights_t.allocator()->free();
371 }
372
373 _original_weights->mark_as_unused();
374 _is_prepared = true;
375 }
376}
377} // namespace arm_compute