blob: 5f10b4b9bce408b08a7cb5c1d4aefb1c68056720 [file] [log] [blame]
Kaizen8938bd32017-09-28 14:38:23 +01001/*
Anthony Barbierf45d5a92018-01-24 16:23:15 +00002 * Copyright (c) 2017-2018 ARM Limited.
Kaizen8938bd32017-09-28 14:38:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000028#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
Kaizen8938bd32017-09-28 14:38:23 +010029#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
31#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000033#include "tests/datasets/TinyConvolutionLayerDataset.h"
Kaizen8938bd32017-09-28 14:38:23 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Jenkinsb3a371b2018-05-23 11:36:53 +010048constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Kaizen8938bd32017-09-28 14:38:23 +010049RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
50RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
Jenkinsb9abeae2018-11-22 11:58:08 +000051constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Kaizen8938bd32017-09-28 14:38:23 +010052constexpr float tolerance_num = 0.07f; /**< Tolerance number */
53
54/** CNN data types */
55const auto CNNDataTypes = framework::dataset::make("DataType",
56{
57 DataType::F16,
58 DataType::F32,
Anthony Barbier8140e1e2017-12-14 23:48:46 +000059 DataType::QASYMM8,
Kaizen8938bd32017-09-28 14:38:23 +010060});
Jenkins52ba29e2018-08-29 15:32:11 +000061
62/** Grouped CNN data types */
63const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
64{
65 DataType::F16,
66 DataType::F32
67});
68
Jenkinsb3a371b2018-05-23 11:36:53 +010069const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
70{
71 ActivationLayerInfo(),
72 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
73 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
74 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
75});
Kaizen8938bd32017-09-28 14:38:23 +010076} // namespace
77
78TEST_SUITE(CL)
79TEST_SUITE(ConvolutionLayer)
80
Jenkinsb3a371b2018-05-23 11:36:53 +010081DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
Jenkins52ba29e2018-08-29 15:32:11 +000082 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
83 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
84 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
85 TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32),
86 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
87 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32),
88 TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32),
89 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32)
Jenkinsb3a371b2018-05-23 11:36:53 +010090 }),
Jenkins52ba29e2018-08-29 15:32:11 +000091 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
92 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
93 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
94 TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
95 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
96 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
97 TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
98 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32)
Jenkinsb3a371b2018-05-23 11:36:53 +010099 })),
Jenkins52ba29e2018-08-29 15:32:11 +0000100 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
101 TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
102 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
103 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
104 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
105 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
106 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
107 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32)
Jenkinsb3a371b2018-05-23 11:36:53 +0100108 })),
109 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
110 PadStrideInfo(1, 2, 1, 1),
111 PadStrideInfo(1, 1, 0, 0),
112 PadStrideInfo(1, 1, 0, 0),
113 PadStrideInfo(2, 1, 0, 0),
114 PadStrideInfo(3, 2, 1, 0),
115 PadStrideInfo(1, 1, 2, 2),
116 PadStrideInfo(1, 1, 2, 2)
117 })),
118 framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
119 GPUTarget::MIDGARD,
120 GPUTarget::G71,
121 GPUTarget::G71,
122 GPUTarget::MIDGARD,
123 GPUTarget::BIFROST,
124 GPUTarget::BIFROST,
125 GPUTarget::BIFROST
126 })),
127 framework::dataset::make("Dilation",
Anthony Barbier06ea0482018-02-22 15:45:35 +0000128{
Jenkinsb3a371b2018-05-23 11:36:53 +0100129 Size2D(1U, 1U),
130 Size2D(1U, 1U),
131 Size2D(1U, 1U),
132 Size2D(1U, 1U),
133 Size2D(1U, 1U),
134 Size2D(1U, 1U),
135 Size2D(1U, 1U),
136 Size2D(2U, 1U),
137})),
138framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true })),
139framework::dataset::make("Expected",
140{
141 ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM,
142})),
143input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
144{
145 ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
146 &weights_info.clone()->set_is_resizable(true),
147 &output_info.clone()->set_is_resizable(true), conv_info,
148 WeightsInfo(),
149 ActivationLayerInfo(),
150 gpu_target,
151 dilation,
152 enable_fast_math);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000153 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
154}
155TEST_SUITE_END()
156
157TEST_SUITE(GEMMConvolutionLayer)
158
Jenkinsb3a371b2018-05-23 11:36:53 +0100159DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
160 CNNDataTypes),
161 ActivationFunctionsDataset),
162 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Kaizen8938bd32017-09-28 14:38:23 +0100163{
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000164 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
165
Kaizen8938bd32017-09-28 14:38:23 +0100166 // Create tensors
Jenkins52ba29e2018-08-29 15:32:11 +0000167 CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
168 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
169 CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
170 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Kaizen8938bd32017-09-28 14:38:23 +0100171
172 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
174 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
175 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
176
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000177 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
178 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
179
Kaizen8938bd32017-09-28 14:38:23 +0100180 // Create and configure function
Anthony Barbier06ea0482018-02-22 15:45:35 +0000181 CLGEMMConvolutionLayer conv;
Jenkinsb3a371b2018-05-23 11:36:53 +0100182 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Kaizen8938bd32017-09-28 14:38:23 +0100183
184 // Validate valid region
185 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
186 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
187 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
188 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
189
190 validate(src.info()->valid_region(), src_valid_region);
191 validate(weights.info()->valid_region(), weights_valid_region);
192 validate(bias.info()->valid_region(), bias_valid_region);
193 validate(dst.info()->valid_region(), dst_valid_region);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000194
195 // Validate QuantizationInfo
196 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
197 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
Jenkinsb3a371b2018-05-23 11:36:53 +0100198
199 // Validate padding
Jenkinsb9abeae2018-11-22 11:58:08 +0000200 //TODO(COMPMID-415) Need to validate padding?
Kaizen8938bd32017-09-28 14:38:23 +0100201}
202
203template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000204using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Kaizen8938bd32017-09-28 14:38:23 +0100205
206TEST_SUITE(Float)
207TEST_SUITE(FP16)
Jenkinsb3a371b2018-05-23 11:36:53 +0100208
209FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000210 framework::dataset::make("ReshapeWeights", { true })),
211 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100212 DataType::F16)),
Jenkins52ba29e2018-08-29 15:32:11 +0000213 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100214 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100215{
216 // Validate output
217 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
218}
Jenkinsb3a371b2018-05-23 11:36:53 +0100219
220FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000221 framework::dataset::make("ReshapeWeights", { true })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100222 framework::dataset::make("DataType",
223 DataType::F16)),
Jenkins52ba29e2018-08-29 15:32:11 +0000224 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100225 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100226{
227 // Validate output
228 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
229}
Jenkins52ba29e2018-08-29 15:32:11 +0000230TEST_SUITE_END() // FP16
Kaizen8938bd32017-09-28 14:38:23 +0100231
232TEST_SUITE(FP32)
Jenkinsb3a371b2018-05-23 11:36:53 +0100233
234FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000235 framework::dataset::make("ReshapeWeights", { true })),
236 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100237 DataType::F32)),
Jenkins52ba29e2018-08-29 15:32:11 +0000238 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100239 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100240{
241 // Validate output
242 validate(CLAccessor(_target), _reference, tolerance_f32);
243}
Jenkinsb3a371b2018-05-23 11:36:53 +0100244
245FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000246 framework::dataset::make("ReshapeWeights", { true })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100247 framework::dataset::make("DataType",
248 DataType::F32)),
Jenkins52ba29e2018-08-29 15:32:11 +0000249 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100250 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100251{
252 // Validate output
Jenkinsb3a371b2018-05-23 11:36:53 +0100253 validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, absolute_tolerance_float);
Kaizen8938bd32017-09-28 14:38:23 +0100254}
Jenkins52ba29e2018-08-29 15:32:11 +0000255TEST_SUITE_END() // FP32
256TEST_SUITE_END() // Float
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000257
258template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000259using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000260
Jenkinsb3a371b2018-05-23 11:36:53 +0100261const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
262{
263 ActivationLayerInfo(),
264 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
265 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
266});
267
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000268TEST_SUITE(Quantized)
269TEST_SUITE(QASYMM8)
Jenkinsb3a371b2018-05-23 11:36:53 +0100270
Jenkinsb9abeae2018-11-22 11:58:08 +0000271const auto QuantizationData = framework::dataset::make("QuantizationInfo",
272{
273 QuantizationInfo(0.5f, 10),
274 QuantizationInfo(0.3f, 3),
275 QuantizationInfo(1.f, 10),
276});
277
Jenkins52ba29e2018-08-29 15:32:11 +0000278FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000279 framework::dataset::make("ReshapeWeights", { true })),
280 framework::dataset::make("DataType", DataType::QASYMM8)),
Jenkins52ba29e2018-08-29 15:32:11 +0000281 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb9abeae2018-11-22 11:58:08 +0000282 QuantizationData),
Jenkinsb3a371b2018-05-23 11:36:53 +0100283 QuantizedActivationFunctionsDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000284{
285 // Validate output
286 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
287}
Jenkins52ba29e2018-08-29 15:32:11 +0000288FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000289 framework::dataset::make("ReshapeWeights", { true })),
290 framework::dataset::make("DataType", DataType::QASYMM8)),
Jenkins52ba29e2018-08-29 15:32:11 +0000291 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Jenkinsb9abeae2018-11-22 11:58:08 +0000292 QuantizationData),
Jenkinsb3a371b2018-05-23 11:36:53 +0100293 QuantizedActivationFunctionsDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000294{
295 // Validate output
296 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
Kaizen8938bd32017-09-28 14:38:23 +0100297}
Jenkins52ba29e2018-08-29 15:32:11 +0000298TEST_SUITE_END() // QASYMM8
299TEST_SUITE_END() // Quantized
Kaizen8938bd32017-09-28 14:38:23 +0100300
Jenkins52ba29e2018-08-29 15:32:11 +0000301TEST_SUITE_END() // GEMMConvolutionLayer
302
303template <typename T>
304using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
305
306TEST_SUITE(GroupedGEMMConvolutionLayer)
307
308DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
309 GroupedCNNDataTypes),
310 ActivationFunctionsDataset),
311 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
312{
313 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
314
315 // The number of groups is calculated dividing the number of input channels of the input tensor by the number of input channels of the weights shape
316 const int num_groups = input_shape[2] / weights_shape[2];
317
318 // Create tensors
319 CLTensor src = create_tensor<CLTensor>(input_shape, data_type);
320 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1);
321 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1);
322 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
323
324 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
325 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
326 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
327 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
328
329 // Create and configure function
330 CLGEMMConvolutionLayer conv;
331 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info, num_groups);
332
333 // Validate valid region
334 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
335 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
336 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
337 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
338
339 validate(src.info()->valid_region(), src_valid_region);
340 validate(weights.info()->valid_region(), weights_valid_region);
341 validate(bias.info()->valid_region(), bias_valid_region);
342 validate(dst.info()->valid_region(), dst_valid_region);
343
344 // Validate padding
Jenkinsb9abeae2018-11-22 11:58:08 +0000345 //TODO(COMPMID-415) Need to validate padding?
Jenkins52ba29e2018-08-29 15:32:11 +0000346}
347
348TEST_SUITE(Float)
349TEST_SUITE(FP32)
350
351FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
352 framework::dataset::make("ReshapeWeights", { true })),
353 framework::dataset::make("DataType", DataType::F32)),
354 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
355 ActivationFunctionsDataset))
356{
357 // Validate output
358 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
359}
360
361FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
362 framework::dataset::make("ReshapeWeights", { true })),
363 framework::dataset::make("DataType", DataType::F32)),
364 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
365 ActivationFunctionsDataset))
366{
367 // Validate output
368 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
369}
370TEST_SUITE_END() // FP32
371
372TEST_SUITE(FP16)
373
374FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
375 framework::dataset::make("ReshapeWeights", { true })),
376 framework::dataset::make("DataType", DataType::F16)),
377 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
378 ActivationFunctionsDataset))
379{
380 // Validate output
381 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
382}
383
384FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
385 framework::dataset::make("ReshapeWeights", { true })),
386 framework::dataset::make("DataType", DataType::F16)),
387 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
388 ActivationFunctionsDataset))
389{
390 // Validate output
391 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
392}
393TEST_SUITE_END() // FP16
394TEST_SUITE_END() // Float
395
396TEST_SUITE_END() // GroupedGEMMConvolutionLayer
397TEST_SUITE_END() // CL
Kaizen8938bd32017-09-28 14:38:23 +0100398} // namespace validation
399} // namespace test
400} // namespace arm_compute