blob: 2cb704f6091594eddbb1b75b018e391dd1d05f6e [file] [log] [blame]
Kaizen8938bd32017-09-28 14:38:23 +01001/*
Anthony Barbierf45d5a92018-01-24 16:23:15 +00002 * Copyright (c) 2017-2018 ARM Limited.
Kaizen8938bd32017-09-28 14:38:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000028#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
Kaizen8938bd32017-09-28 14:38:23 +010029#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
31#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000033#include "tests/datasets/TinyConvolutionLayerDataset.h"
Kaizen8938bd32017-09-28 14:38:23 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Jenkinsb3a371b2018-05-23 11:36:53 +010048constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Kaizen8938bd32017-09-28 14:38:23 +010049RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
50RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
Anthony Barbier8140e1e2017-12-14 23:48:46 +000051constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Kaizen8938bd32017-09-28 14:38:23 +010052constexpr float tolerance_num = 0.07f; /**< Tolerance number */
53
54/** CNN data types */
55const auto CNNDataTypes = framework::dataset::make("DataType",
56{
57 DataType::F16,
58 DataType::F32,
Anthony Barbier8140e1e2017-12-14 23:48:46 +000059 DataType::QASYMM8,
Kaizen8938bd32017-09-28 14:38:23 +010060});
Jenkins52ba29e2018-08-29 15:32:11 +000061
62/** Grouped CNN data types */
63const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
64{
65 DataType::F16,
66 DataType::F32
67});
68
Jenkinsb3a371b2018-05-23 11:36:53 +010069const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
70{
71 ActivationLayerInfo(),
72 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
73 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
74 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
75});
Kaizen8938bd32017-09-28 14:38:23 +010076} // namespace
77
78TEST_SUITE(CL)
79TEST_SUITE(ConvolutionLayer)
80
Jenkinsb3a371b2018-05-23 11:36:53 +010081DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
Jenkins52ba29e2018-08-29 15:32:11 +000082 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
83 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
84 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
85 TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32),
86 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
87 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32),
88 TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32),
89 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32)
Jenkinsb3a371b2018-05-23 11:36:53 +010090 }),
Jenkins52ba29e2018-08-29 15:32:11 +000091 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
92 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
93 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
94 TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
95 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
96 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
97 TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
98 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32)
Jenkinsb3a371b2018-05-23 11:36:53 +010099 })),
Jenkins52ba29e2018-08-29 15:32:11 +0000100 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
101 TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
102 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
103 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
104 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
105 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
106 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
107 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32)
Jenkinsb3a371b2018-05-23 11:36:53 +0100108 })),
109 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
110 PadStrideInfo(1, 2, 1, 1),
111 PadStrideInfo(1, 1, 0, 0),
112 PadStrideInfo(1, 1, 0, 0),
113 PadStrideInfo(2, 1, 0, 0),
114 PadStrideInfo(3, 2, 1, 0),
115 PadStrideInfo(1, 1, 2, 2),
116 PadStrideInfo(1, 1, 2, 2)
117 })),
118 framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
119 GPUTarget::MIDGARD,
120 GPUTarget::G71,
121 GPUTarget::G71,
122 GPUTarget::MIDGARD,
123 GPUTarget::BIFROST,
124 GPUTarget::BIFROST,
125 GPUTarget::BIFROST
126 })),
127 framework::dataset::make("Dilation",
Anthony Barbier06ea0482018-02-22 15:45:35 +0000128{
Jenkinsb3a371b2018-05-23 11:36:53 +0100129 Size2D(1U, 1U),
130 Size2D(1U, 1U),
131 Size2D(1U, 1U),
132 Size2D(1U, 1U),
133 Size2D(1U, 1U),
134 Size2D(1U, 1U),
135 Size2D(1U, 1U),
136 Size2D(2U, 1U),
137})),
138framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true })),
139framework::dataset::make("Expected",
140{
141 ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM,
142})),
143input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
144{
145 ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
146 &weights_info.clone()->set_is_resizable(true),
147 &output_info.clone()->set_is_resizable(true), conv_info,
148 WeightsInfo(),
149 ActivationLayerInfo(),
150 gpu_target,
151 dilation,
152 enable_fast_math);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000153 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
154}
155TEST_SUITE_END()
156
157TEST_SUITE(GEMMConvolutionLayer)
158
Jenkinsb3a371b2018-05-23 11:36:53 +0100159DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
160 CNNDataTypes),
161 ActivationFunctionsDataset),
162 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Kaizen8938bd32017-09-28 14:38:23 +0100163{
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000164 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
165
Kaizen8938bd32017-09-28 14:38:23 +0100166 // Create tensors
Jenkins52ba29e2018-08-29 15:32:11 +0000167 CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
168 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
169 CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
170 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Kaizen8938bd32017-09-28 14:38:23 +0100171
172 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
174 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
175 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
176
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000177 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
178 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
179
Kaizen8938bd32017-09-28 14:38:23 +0100180 // Create and configure function
Anthony Barbier06ea0482018-02-22 15:45:35 +0000181 CLGEMMConvolutionLayer conv;
Jenkinsb3a371b2018-05-23 11:36:53 +0100182 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Kaizen8938bd32017-09-28 14:38:23 +0100183
184 // Validate valid region
185 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
186 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
187 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
188 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
189
190 validate(src.info()->valid_region(), src_valid_region);
191 validate(weights.info()->valid_region(), weights_valid_region);
192 validate(bias.info()->valid_region(), bias_valid_region);
193 validate(dst.info()->valid_region(), dst_valid_region);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000194
195 // Validate QuantizationInfo
196 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
197 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
Jenkinsb3a371b2018-05-23 11:36:53 +0100198
199 // Validate padding
Kaizen8938bd32017-09-28 14:38:23 +0100200}
201
202template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000203using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Kaizen8938bd32017-09-28 14:38:23 +0100204
205TEST_SUITE(Float)
206TEST_SUITE(FP16)
Jenkinsb3a371b2018-05-23 11:36:53 +0100207
208FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000209 framework::dataset::make("ReshapeWeights", { true })),
210 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100211 DataType::F16)),
Jenkins52ba29e2018-08-29 15:32:11 +0000212 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100213 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100214{
215 // Validate output
216 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
217}
Jenkinsb3a371b2018-05-23 11:36:53 +0100218
219FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000220 framework::dataset::make("ReshapeWeights", { true })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100221 framework::dataset::make("DataType",
222 DataType::F16)),
Jenkins52ba29e2018-08-29 15:32:11 +0000223 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100224 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100225{
226 // Validate output
227 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
228}
Jenkins52ba29e2018-08-29 15:32:11 +0000229TEST_SUITE_END() // FP16
Kaizen8938bd32017-09-28 14:38:23 +0100230
231TEST_SUITE(FP32)
Jenkinsb3a371b2018-05-23 11:36:53 +0100232
233FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000234 framework::dataset::make("ReshapeWeights", { true })),
235 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100236 DataType::F32)),
Jenkins52ba29e2018-08-29 15:32:11 +0000237 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100238 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100239{
240 // Validate output
241 validate(CLAccessor(_target), _reference, tolerance_f32);
242}
Jenkinsb3a371b2018-05-23 11:36:53 +0100243
244FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000245 framework::dataset::make("ReshapeWeights", { true })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100246 framework::dataset::make("DataType",
247 DataType::F32)),
Jenkins52ba29e2018-08-29 15:32:11 +0000248 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100249 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100250{
251 // Validate output
Jenkinsb3a371b2018-05-23 11:36:53 +0100252 validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, absolute_tolerance_float);
Kaizen8938bd32017-09-28 14:38:23 +0100253}
Jenkins52ba29e2018-08-29 15:32:11 +0000254TEST_SUITE_END() // FP32
255TEST_SUITE_END() // Float
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000256
257template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000258using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000259
Jenkinsb3a371b2018-05-23 11:36:53 +0100260const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
261{
262 ActivationLayerInfo(),
263 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
264 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
265});
266
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000267TEST_SUITE(Quantized)
268TEST_SUITE(QASYMM8)
Jenkinsb3a371b2018-05-23 11:36:53 +0100269
Jenkins52ba29e2018-08-29 15:32:11 +0000270FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000271 framework::dataset::make("ReshapeWeights", { true })),
272 framework::dataset::make("DataType", DataType::QASYMM8)),
Jenkins52ba29e2018-08-29 15:32:11 +0000273 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100274 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
275 QuantizedActivationFunctionsDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000276{
277 // Validate output
278 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
279}
Jenkins52ba29e2018-08-29 15:32:11 +0000280FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000281 framework::dataset::make("ReshapeWeights", { true })),
282 framework::dataset::make("DataType", DataType::QASYMM8)),
Jenkins52ba29e2018-08-29 15:32:11 +0000283 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100284 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })),
285 QuantizedActivationFunctionsDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000286{
287 // Validate output
288 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
Kaizen8938bd32017-09-28 14:38:23 +0100289}
Jenkins52ba29e2018-08-29 15:32:11 +0000290TEST_SUITE_END() // QASYMM8
291TEST_SUITE_END() // Quantized
Kaizen8938bd32017-09-28 14:38:23 +0100292
Jenkins52ba29e2018-08-29 15:32:11 +0000293TEST_SUITE_END() // GEMMConvolutionLayer
294
295template <typename T>
296using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
297
298TEST_SUITE(GroupedGEMMConvolutionLayer)
299
300DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
301 GroupedCNNDataTypes),
302 ActivationFunctionsDataset),
303 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
304{
305 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
306
307 // The number of groups is calculated dividing the number of input channels of the input tensor by the number of input channels of the weights shape
308 const int num_groups = input_shape[2] / weights_shape[2];
309
310 // Create tensors
311 CLTensor src = create_tensor<CLTensor>(input_shape, data_type);
312 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1);
313 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1);
314 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
315
316 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
317 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
318 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
319 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
320
321 // Create and configure function
322 CLGEMMConvolutionLayer conv;
323 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info, num_groups);
324
325 // Validate valid region
326 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
327 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
328 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
329 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
330
331 validate(src.info()->valid_region(), src_valid_region);
332 validate(weights.info()->valid_region(), weights_valid_region);
333 validate(bias.info()->valid_region(), bias_valid_region);
334 validate(dst.info()->valid_region(), dst_valid_region);
335
336 // Validate padding
337}
338
339TEST_SUITE(Float)
340TEST_SUITE(FP32)
341
342FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
343 framework::dataset::make("ReshapeWeights", { true })),
344 framework::dataset::make("DataType", DataType::F32)),
345 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
346 ActivationFunctionsDataset))
347{
348 // Validate output
349 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
350}
351
352FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
353 framework::dataset::make("ReshapeWeights", { true })),
354 framework::dataset::make("DataType", DataType::F32)),
355 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
356 ActivationFunctionsDataset))
357{
358 // Validate output
359 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
360}
361TEST_SUITE_END() // FP32
362
363TEST_SUITE(FP16)
364
365FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
366 framework::dataset::make("ReshapeWeights", { true })),
367 framework::dataset::make("DataType", DataType::F16)),
368 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
369 ActivationFunctionsDataset))
370{
371 // Validate output
372 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
373}
374
375FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
376 framework::dataset::make("ReshapeWeights", { true })),
377 framework::dataset::make("DataType", DataType::F16)),
378 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
379 ActivationFunctionsDataset))
380{
381 // Validate output
382 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
383}
384TEST_SUITE_END() // FP16
385TEST_SUITE_END() // Float
386
387TEST_SUITE_END() // GroupedGEMMConvolutionLayer
388TEST_SUITE_END() // CL
Kaizen8938bd32017-09-28 14:38:23 +0100389} // namespace validation
390} // namespace test
391} // namespace arm_compute