blob: b7ed2f56c0b2c873953e34c3adfeeb07115ef166 [file] [log] [blame]
Anthony Barbier8140e1e2017-12-14 23:48:46 +00001/*
Anthony Barbier06ea0482018-02-22 15:45:35 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier8140e1e2017-12-14 23:48:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "ConvolutionLayer.h"
25
26#include "tests/validation/FixedPoint.h"
27#include "tests/validation/Helpers.h"
28#include "tests/validation/reference/Utils.h"
29#include "tests/validation/reference/UtilsQuantizedAsymm.h"
30
31#include "tests/framework/Asserts.h"
32
33#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
34
35namespace arm_compute
36{
37namespace test
38{
39namespace validation
40{
41namespace reference
42{
43namespace
44{
45inline bool is_valid_pixel(int i, int min, int max)
46{
47 return (i >= min && i < max);
48}
49
50// 3D convolution for floating point type
51template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
52void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
53 int i_offset, int w_offset, int b_offset, int o_offset,
54 int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
55{
56 const T *in_ptr = in.data() + i_offset;
57 const T *w_ptr = weights.data() + w_offset;
58 const TB *b_ptr = bias.data() + b_offset;
59 T *out_ptr = out.data() + o_offset;
60
Anthony Barbier06ea0482018-02-22 15:45:35 +000061 const int half_width_weights_start = width_weights / 2;
62 const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
63 const int half_height_weights_start = height_weights / 2;
64 const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
Anthony Barbier8140e1e2017-12-14 23:48:46 +000065
66 // Reset accumulator
67 T acc(0);
68
69 // Compute a 2D convolution for each IFM and accumulate the result
70 for(int ifm = 0; ifm < depth_in; ++ifm)
71 {
72 // Compute the offset for the input slice
73 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
74
75 // Compute 2D convolution
Anthony Barbier06ea0482018-02-22 15:45:35 +000076 for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000077 {
Anthony Barbier06ea0482018-02-22 15:45:35 +000078 for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
Anthony Barbier8140e1e2017-12-14 23:48:46 +000079 {
80 // Check if the pixel is out-of-bound
81 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
82 {
Anthony Barbier06ea0482018-02-22 15:45:35 +000083 const int idx = xk + half_width_weights_start;
84 const int idy = yk + half_height_weights_start;
Anthony Barbier8140e1e2017-12-14 23:48:46 +000085
86 const T i_value = in_ptr[offset_slice_in + xk + yk * width_in];
87 const T w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
88
89 acc += i_value * w_value;
90 }
91 }
92 }
93 }
94
95 // Accumulate the bias and store the result
96 *out_ptr = acc + (*b_ptr);
97}
98
99// 3D convolution for fixed point type
100template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
101void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
102 int i_offset, int w_offset, int b_offset, int o_offset,
103 int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
104{
105 const T *in_ptr = in.data() + i_offset;
106 const T *w_ptr = weights.data() + w_offset;
107 const T *b_ptr = bias.data() + b_offset;
108 T *out_ptr = out.data() + o_offset;
109 int fixed_point_position = in.fixed_point_position();
110
Anthony Barbier06ea0482018-02-22 15:45:35 +0000111 const int half_width_weights_start = width_weights / 2;
112 const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
113 const int half_height_weights_start = height_weights / 2;
114 const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000115
116 using namespace fixed_point_arithmetic;
117 using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
118
119 // Reset accumulator
120 fixed_point<promoted_type> acc(0, fixed_point_position);
121
122 // Compute a 2D convolution for each IFM and accumulate the result
123 for(int ifm = 0; ifm < depth_in; ++ifm)
124 {
125 // Compute the offset for the input slice
126 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
127
128 // Compute 2D convolution
Anthony Barbier06ea0482018-02-22 15:45:35 +0000129 for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000130 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000131 for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000132 {
133 // Check if the pixel is out-of-bound
134 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
135 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000136 const int idx = xk + half_width_weights_start;
137 const int idy = yk + half_height_weights_start;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000138
139 const fixed_point<promoted_type> i_value(in_ptr[offset_slice_in + xk + yk * width_in], fixed_point_position, true);
140 const fixed_point<promoted_type> w_value(w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
141 const fixed_point<promoted_type> iw = i_value * w_value;
142 acc = iw + acc;
143 }
144 }
145 }
146 }
147
148 // Get the bias
149 const fixed_point<promoted_type> b(*b_ptr, fixed_point_position, true);
150
151 // Accumulate the bias and covert back
152 acc = acc + b;
153 fixed_point<T> res(acc);
154 *out_ptr = res.raw();
155}
156
157// 3D convolution for QASYMM8 type
158template <>
159void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &out,
160 int i_offset, int w_offset, int b_offset, int o_offset,
161 int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
162{
163 const uint8_t *in_ptr = in.data() + i_offset;
164 const uint8_t *w_ptr = weights.data() + w_offset;
165 const int32_t *b_ptr = bias.data() + b_offset;
166 uint8_t *out_ptr = out.data() + o_offset;
167
168 const int input_offset = -in.quantization_info().offset;
169 const float input_scale = in.quantization_info().scale;
170 const int weights_offset = -weights.quantization_info().offset;
171 const float weights_scale = weights.quantization_info().scale;
172 const int output_offset = out.quantization_info().offset;
173 const float output_scale = out.quantization_info().scale;
174
175 int output_multiplier = 0;
176 int output_shift = 0;
177 const float multiplier = input_scale * weights_scale / output_scale;
178 arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
179
Anthony Barbier06ea0482018-02-22 15:45:35 +0000180 const int half_width_weights_start = width_weights / 2;
181 const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
182 const int half_height_weights_start = height_weights / 2;
183 const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000184
185 // Reset accumulator
186 int32_t acc(0);
187
188 // Compute a 2D convolution for each IFM and accumulate the result
189 for(int ifm = 0; ifm < depth_in; ++ifm)
190 {
191 // Compute the offset for the input slice
192 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
193
194 // Compute 2D convolution
Anthony Barbier06ea0482018-02-22 15:45:35 +0000195 for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000196 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000197 for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000198 {
199 // Check if the pixel is out-of-bound
200 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
201 {
Anthony Barbier06ea0482018-02-22 15:45:35 +0000202 const int idx = xk + half_width_weights_start;
203 const int idy = yk + half_height_weights_start;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000204
205 const uint8_t i_value = in_ptr[offset_slice_in + xk + yk * width_in];
206 const uint8_t w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
207
208 acc += (i_value + input_offset) * (w_value + weights_offset);
209 }
210 }
211 }
212 }
213
214 // Accumulate the bias
215 acc += (*b_ptr);
216
217 acc = asymm_rounding_divide_by_pow2(asymm_int_mult(acc, output_multiplier), output_shift);
218 acc += output_offset;
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000219 acc = utility::clamp<int32_t>(acc, 0, 255);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000220
221 // Store the result
222 *out_ptr = acc;
223}
224} // namespace
225
226template <typename T, typename TB>
227SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape, const PadStrideInfo &info)
228{
229 // Create reference
230 SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
231
232 // Compute reference
233 const int width_in = src.shape().x();
234 const int height_in = src.shape().y();
235 const int depth_in = src.shape().z();
236 const int width_out = dst.shape().x();
237 const int height_out = dst.shape().y();
238 const int depth_out = dst.shape().z();
239 const int width_weights = weights.shape().x();
240 const int height_weights = weights.shape().y();
241 const int depth_weights = weights.shape().z();
Anthony Barbier06ea0482018-02-22 15:45:35 +0000242 const int pad_left = info.pad_left();
243 const int pad_top = info.pad_top();
244 const int stride_xi = info.stride().first;
245 const int stride_yi = info.stride().second;
246
247 auto output_wh = scaled_dimensions(width_in, height_in, width_weights, height_weights, info);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000248
249 const int start_xi = width_weights / 2 - pad_left;
250 const int start_yi = height_weights / 2 - pad_top;
Anthony Barbier06ea0482018-02-22 15:45:35 +0000251 const int end_xi = output_wh.first * stride_xi;
252 const int end_yi = output_wh.second * stride_yi;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000253 const int num_batches = src.shape().total_size() / (width_in * height_in * depth_in);
254
255 for(int r = 0; r < num_batches; ++r)
256 {
257 for(int yi = start_yi; yi < start_yi + end_yi; yi += stride_yi)
258 {
259 for(int xi = start_xi; xi < start_xi + end_xi; xi += stride_xi)
260 {
261 for(int ofm = 0; ofm < depth_out; ++ofm)
262 {
263 // Compute input and output offsets
264 const int offset_in = r * width_in * height_in * depth_in;
265 const int xo = (xi - start_xi) / stride_xi;
266 const int yo = (yi - start_yi) / stride_yi;
267 const int offset_out = xo + yo * width_out + ofm * width_out * height_out + r * width_out * height_out * depth_out;
268
269 ARM_COMPUTE_ASSERT(xo < width_out);
270 ARM_COMPUTE_ASSERT(yo < height_out);
271
272 // Compute 3D convolution
273 convolution3d(src, weights, bias, dst,
274 offset_in, ofm * width_weights * height_weights * depth_weights, ofm, offset_out,
275 xi, yi,
276 width_in, height_in, depth_in,
277 width_weights, height_weights);
278 }
279 }
280 }
281 }
282
283 return dst;
284}
285
286template SimpleTensor<float> convolution_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &output_shape,
287 const PadStrideInfo &info);
288template SimpleTensor<half> convolution_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &output_shape,
289 const PadStrideInfo &info);
290template SimpleTensor<qint8_t> convolution_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &output_shape,
291 const PadStrideInfo &info);
292template SimpleTensor<qint16_t> convolution_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &output_shape,
293 const PadStrideInfo &info);
294template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
295 const PadStrideInfo &info);
296} // namespace reference
297} // namespace validation
298} // namespace test
299} // namespace arm_compute