blob: f1f9b59330ab86544265d4f2f0e3d4a377ba2532 [file] [log] [blame]
Kaizen8938bd32017-09-28 14:38:23 +01001/*
Jenkins514be652019-02-28 12:25:18 +00002 * Copyright (c) 2017-2019 ARM Limited.
Kaizen8938bd32017-09-28 14:38:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
Jenkins514be652019-02-28 12:25:18 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
Kaizen8938bd32017-09-28 14:38:23 +010022 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000028#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
Kaizen8938bd32017-09-28 14:38:23 +010029#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
31#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000033#include "tests/datasets/TinyConvolutionLayerDataset.h"
Kaizen8938bd32017-09-28 14:38:23 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Jenkinsb3a371b2018-05-23 11:36:53 +010048constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Jenkins4ba87db2019-05-23 17:11:51 +010049RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Kaizen8938bd32017-09-28 14:38:23 +010050RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
Jenkinsb9abeae2018-11-22 11:58:08 +000051constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Kaizen8938bd32017-09-28 14:38:23 +010052constexpr float tolerance_num = 0.07f; /**< Tolerance number */
53
54/** CNN data types */
55const auto CNNDataTypes = framework::dataset::make("DataType",
56{
57 DataType::F16,
58 DataType::F32,
Anthony Barbier8140e1e2017-12-14 23:48:46 +000059 DataType::QASYMM8,
Kaizen8938bd32017-09-28 14:38:23 +010060});
Jenkins52ba29e2018-08-29 15:32:11 +000061
62/** Grouped CNN data types */
63const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
64{
65 DataType::F16,
66 DataType::F32
67});
68
Jenkinsb3a371b2018-05-23 11:36:53 +010069const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
70{
71 ActivationLayerInfo(),
72 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
73 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
74 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
75});
Jenkins514be652019-02-28 12:25:18 +000076const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
77{
78 ActivationLayerInfo(),
79 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
80});
Kaizen8938bd32017-09-28 14:38:23 +010081} // namespace
82
83TEST_SUITE(CL)
84TEST_SUITE(ConvolutionLayer)
85
Jenkins514be652019-02-28 12:25:18 +000086// *INDENT-OFF*
87// clang-format off
Jenkinsb3a371b2018-05-23 11:36:53 +010088DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
Jenkins514be652019-02-28 12:25:18 +000089 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
90 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
91 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM
92 TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD
93 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), // Select GEMM
94 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32), // Select GEMM
95 TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32), // Select WINOGRAD
96 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32) // Select GEMM
97 }),
98 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
99 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
100 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
101 TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
102 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
103 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
104 TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
105 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32)
106 })),
107 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
108 TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
109 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
110 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
111 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
112 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
113 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
114 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32)
115 })),
116 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
117 PadStrideInfo(1, 2, 1, 1),
118 PadStrideInfo(1, 1, 0, 0),
119 PadStrideInfo(1, 1, 0, 0),
120 PadStrideInfo(2, 1, 0, 0),
121 PadStrideInfo(3, 2, 1, 0),
122 PadStrideInfo(1, 1, 2, 2),
123 PadStrideInfo(1, 1, 2, 2)
124 })),
125 framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
126 GPUTarget::MIDGARD,
127 GPUTarget::G71,
128 GPUTarget::G71,
129 GPUTarget::MIDGARD,
130 GPUTarget::BIFROST,
131 GPUTarget::BIFROST,
132 GPUTarget::BIFROST
133 })),
134 framework::dataset::make("Dilation", { Size2D(1U, 1U),
135 Size2D(1U, 1U),
136 Size2D(1U, 1U),
137 Size2D(1U, 1U),
138 Size2D(1U, 1U),
139 Size2D(1U, 1U),
140 Size2D(1U, 1U),
141 Size2D(2U, 1U),
142 })),
143 framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true })),
144 framework::dataset::make("Expected",{ ConvolutionMethod::GEMM,
145 ConvolutionMethod::GEMM,
146 ConvolutionMethod::GEMM,
147 ConvolutionMethod::WINOGRAD,
148 ConvolutionMethod::GEMM,
149 ConvolutionMethod::GEMM,
150 ConvolutionMethod::WINOGRAD,
151 ConvolutionMethod::GEMM,
152 })),
153 input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
Jenkinsb3a371b2018-05-23 11:36:53 +0100154{
155 ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
156 &weights_info.clone()->set_is_resizable(true),
157 &output_info.clone()->set_is_resizable(true), conv_info,
158 WeightsInfo(),
159 ActivationLayerInfo(),
160 gpu_target,
161 dilation,
162 enable_fast_math);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000163 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
164}
Jenkins514be652019-02-28 12:25:18 +0000165// clang-format on
166// *INDENT-ON*
167TEST_SUITE_END() // ConvolutionLayer
Anthony Barbier06ea0482018-02-22 15:45:35 +0000168
169TEST_SUITE(GEMMConvolutionLayer)
170
Jenkins514be652019-02-28 12:25:18 +0000171DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallConvolutionLayerDataset(),
Jenkinsb3a371b2018-05-23 11:36:53 +0100172 CNNDataTypes),
173 ActivationFunctionsDataset),
174 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Kaizen8938bd32017-09-28 14:38:23 +0100175{
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000176 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
177
Kaizen8938bd32017-09-28 14:38:23 +0100178 // Create tensors
Jenkins52ba29e2018-08-29 15:32:11 +0000179 CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
180 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
181 CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
182 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Kaizen8938bd32017-09-28 14:38:23 +0100183
184 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
185 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
186 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
187 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
188
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000189 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
190 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
191
Kaizen8938bd32017-09-28 14:38:23 +0100192 // Create and configure function
Anthony Barbier06ea0482018-02-22 15:45:35 +0000193 CLGEMMConvolutionLayer conv;
Jenkinsb3a371b2018-05-23 11:36:53 +0100194 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Kaizen8938bd32017-09-28 14:38:23 +0100195
196 // Validate valid region
197 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
198 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
199 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
200 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
201
202 validate(src.info()->valid_region(), src_valid_region);
203 validate(weights.info()->valid_region(), weights_valid_region);
204 validate(bias.info()->valid_region(), bias_valid_region);
205 validate(dst.info()->valid_region(), dst_valid_region);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000206
207 // Validate QuantizationInfo
208 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
209 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
Jenkinsb3a371b2018-05-23 11:36:53 +0100210
211 // Validate padding
Jenkinsb9abeae2018-11-22 11:58:08 +0000212 //TODO(COMPMID-415) Need to validate padding?
Kaizen8938bd32017-09-28 14:38:23 +0100213}
214
215template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000216using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Kaizen8938bd32017-09-28 14:38:23 +0100217
218TEST_SUITE(Float)
219TEST_SUITE(FP16)
Jenkinsb3a371b2018-05-23 11:36:53 +0100220
Jenkins514be652019-02-28 12:25:18 +0000221FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000222 framework::dataset::make("ReshapeWeights", { true })),
223 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100224 DataType::F16)),
Jenkins52ba29e2018-08-29 15:32:11 +0000225 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkins514be652019-02-28 12:25:18 +0000226 ActivationFunctionsSmallDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100227{
228 // Validate output
229 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
230}
Jenkinsb3a371b2018-05-23 11:36:53 +0100231
Jenkins514be652019-02-28 12:25:18 +0000232FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
233 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
234 framework::dataset::make("ReshapeWeights", { true })),
235 framework::dataset::make("DataType",
236 DataType::F16)),
237 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
238 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100239{
240 // Validate output
241 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
242}
Jenkins52ba29e2018-08-29 15:32:11 +0000243TEST_SUITE_END() // FP16
Kaizen8938bd32017-09-28 14:38:23 +0100244
245TEST_SUITE(FP32)
Jenkinsb3a371b2018-05-23 11:36:53 +0100246
Jenkins514be652019-02-28 12:25:18 +0000247FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000248 framework::dataset::make("ReshapeWeights", { true })),
249 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100250 DataType::F32)),
Jenkins52ba29e2018-08-29 15:32:11 +0000251 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkins514be652019-02-28 12:25:18 +0000252 ActivationFunctionsSmallDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100253{
254 // Validate output
255 validate(CLAccessor(_target), _reference, tolerance_f32);
256}
Jenkinsb3a371b2018-05-23 11:36:53 +0100257
Jenkins514be652019-02-28 12:25:18 +0000258FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
259 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
260 framework::dataset::make("ReshapeWeights", { true })),
261 framework::dataset::make("DataType",
262 DataType::F32)),
263 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
264 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100265{
266 // Validate output
Jenkinsb3a371b2018-05-23 11:36:53 +0100267 validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, absolute_tolerance_float);
Kaizen8938bd32017-09-28 14:38:23 +0100268}
Jenkins52ba29e2018-08-29 15:32:11 +0000269TEST_SUITE_END() // FP32
270TEST_SUITE_END() // Float
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000271
272template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000273using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000274
Jenkinsb3a371b2018-05-23 11:36:53 +0100275const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
276{
277 ActivationLayerInfo(),
278 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
279 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
280});
Jenkins514be652019-02-28 12:25:18 +0000281const auto QuantizedActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
282{
283 ActivationLayerInfo(),
284 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
285});
Jenkinsb3a371b2018-05-23 11:36:53 +0100286
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000287TEST_SUITE(Quantized)
288TEST_SUITE(QASYMM8)
Jenkinsb3a371b2018-05-23 11:36:53 +0100289
Jenkinsb9abeae2018-11-22 11:58:08 +0000290const auto QuantizationData = framework::dataset::make("QuantizationInfo",
291{
292 QuantizationInfo(0.5f, 10),
293 QuantizationInfo(0.3f, 3),
294 QuantizationInfo(1.f, 10),
295});
296
Jenkins514be652019-02-28 12:25:18 +0000297FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
298 combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
299 framework::dataset::make("ReshapeWeights", { true })),
300 framework::dataset::make("DataType", DataType::QASYMM8)),
301 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
302 QuantizationData),
303 QuantizedActivationFunctionsSmallDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000304{
305 // Validate output
306 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
307}
Jenkins514be652019-02-28 12:25:18 +0000308FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
309 combine(combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
310 framework::dataset::make("ReshapeWeights", { true })),
311 framework::dataset::make("DataType", DataType::QASYMM8)),
312 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
313 QuantizationData),
314 QuantizedActivationFunctionsDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000315{
316 // Validate output
317 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
Kaizen8938bd32017-09-28 14:38:23 +0100318}
Jenkins52ba29e2018-08-29 15:32:11 +0000319TEST_SUITE_END() // QASYMM8
320TEST_SUITE_END() // Quantized
Kaizen8938bd32017-09-28 14:38:23 +0100321
Jenkins52ba29e2018-08-29 15:32:11 +0000322TEST_SUITE_END() // GEMMConvolutionLayer
323
324template <typename T>
325using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
326
327TEST_SUITE(GroupedGEMMConvolutionLayer)
328
Jenkins514be652019-02-28 12:25:18 +0000329DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
Jenkins52ba29e2018-08-29 15:32:11 +0000330 GroupedCNNDataTypes),
331 ActivationFunctionsDataset),
332 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
333{
334 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
335
336 // The number of groups is calculated dividing the number of input channels of the input tensor by the number of input channels of the weights shape
337 const int num_groups = input_shape[2] / weights_shape[2];
338
339 // Create tensors
340 CLTensor src = create_tensor<CLTensor>(input_shape, data_type);
341 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1);
342 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1);
343 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
344
345 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
346 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
347 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
348 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
349
350 // Create and configure function
351 CLGEMMConvolutionLayer conv;
352 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info, num_groups);
353
354 // Validate valid region
355 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
356 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
357 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
358 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
359
360 validate(src.info()->valid_region(), src_valid_region);
361 validate(weights.info()->valid_region(), weights_valid_region);
362 validate(bias.info()->valid_region(), bias_valid_region);
363 validate(dst.info()->valid_region(), dst_valid_region);
364
365 // Validate padding
Jenkinsb9abeae2018-11-22 11:58:08 +0000366 //TODO(COMPMID-415) Need to validate padding?
Jenkins52ba29e2018-08-29 15:32:11 +0000367}
368
369TEST_SUITE(Float)
370TEST_SUITE(FP32)
371
372FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
373 framework::dataset::make("ReshapeWeights", { true })),
374 framework::dataset::make("DataType", DataType::F32)),
375 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Jenkins514be652019-02-28 12:25:18 +0000376 ActivationFunctionsSmallDataset))
Jenkins52ba29e2018-08-29 15:32:11 +0000377{
378 // Validate output
379 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
380}
381
Jenkins514be652019-02-28 12:25:18 +0000382FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
383 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
384 framework::dataset::make("ReshapeWeights", { true })),
385 framework::dataset::make("DataType", DataType::F32)),
386 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
387 ActivationFunctionsDataset))
Jenkins52ba29e2018-08-29 15:32:11 +0000388{
389 // Validate output
390 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
391}
392TEST_SUITE_END() // FP32
393
394TEST_SUITE(FP16)
395
396FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
397 framework::dataset::make("ReshapeWeights", { true })),
398 framework::dataset::make("DataType", DataType::F16)),
399 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Jenkins514be652019-02-28 12:25:18 +0000400 ActivationFunctionsSmallDataset))
Jenkins52ba29e2018-08-29 15:32:11 +0000401{
402 // Validate output
403 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
404}
405
Jenkins514be652019-02-28 12:25:18 +0000406FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
407 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
408 framework::dataset::make("ReshapeWeights", { true })),
409 framework::dataset::make("DataType", DataType::F16)),
410 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
411 ActivationFunctionsDataset))
Jenkins52ba29e2018-08-29 15:32:11 +0000412{
413 // Validate output
414 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
415}
416TEST_SUITE_END() // FP16
417TEST_SUITE_END() // Float
418
419TEST_SUITE_END() // GroupedGEMMConvolutionLayer
420TEST_SUITE_END() // CL
Kaizen8938bd32017-09-28 14:38:23 +0100421} // namespace validation
422} // namespace test
423} // namespace arm_compute