blob: ec729b3ade5104015f43a1d000e9472300d4a202 [file] [log] [blame]
Kaizen8938bd32017-09-28 14:38:23 +01001/*
Anthony Barbierf45d5a92018-01-24 16:23:15 +00002 * Copyright (c) 2017-2018 ARM Limited.
Kaizen8938bd32017-09-28 14:38:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000028#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
Kaizen8938bd32017-09-28 14:38:23 +010029#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
31#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000033#include "tests/datasets/TinyConvolutionLayerDataset.h"
Kaizen8938bd32017-09-28 14:38:23 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Jenkinsb3a371b2018-05-23 11:36:53 +010048constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Kaizen8938bd32017-09-28 14:38:23 +010049RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
50RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
Anthony Barbier8140e1e2017-12-14 23:48:46 +000051constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
52constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Kaizen8938bd32017-09-28 14:38:23 +010053constexpr float tolerance_num = 0.07f; /**< Tolerance number */
54
55/** CNN data types */
56const auto CNNDataTypes = framework::dataset::make("DataType",
57{
58 DataType::F16,
59 DataType::F32,
60 DataType::QS8,
61 DataType::QS16,
Anthony Barbier8140e1e2017-12-14 23:48:46 +000062 DataType::QASYMM8,
Kaizen8938bd32017-09-28 14:38:23 +010063});
Jenkinsb3a371b2018-05-23 11:36:53 +010064const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
65{
66 ActivationLayerInfo(),
67 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
68 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
69 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
70});
Kaizen8938bd32017-09-28 14:38:23 +010071} // namespace
72
73TEST_SUITE(CL)
74TEST_SUITE(ConvolutionLayer)
75
Jenkinsb3a371b2018-05-23 11:36:53 +010076DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
77 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
78 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
79 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32, 0),
80 TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32, 0),
81 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
82 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0),
83 TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32, 0),
84 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0)
85 }),
86 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
87 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
88 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
89 TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32, 0),
90 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
91 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0),
92 TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32, 0),
93 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0)
94 })),
95 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
96 TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
97 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
98 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
99 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
100 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0),
101 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32, 0),
102 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32, 0)
103 })),
104 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
105 PadStrideInfo(1, 2, 1, 1),
106 PadStrideInfo(1, 1, 0, 0),
107 PadStrideInfo(1, 1, 0, 0),
108 PadStrideInfo(2, 1, 0, 0),
109 PadStrideInfo(3, 2, 1, 0),
110 PadStrideInfo(1, 1, 2, 2),
111 PadStrideInfo(1, 1, 2, 2)
112 })),
113 framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
114 GPUTarget::MIDGARD,
115 GPUTarget::G71,
116 GPUTarget::G71,
117 GPUTarget::MIDGARD,
118 GPUTarget::BIFROST,
119 GPUTarget::BIFROST,
120 GPUTarget::BIFROST
121 })),
122 framework::dataset::make("Dilation",
Anthony Barbier06ea0482018-02-22 15:45:35 +0000123{
Jenkinsb3a371b2018-05-23 11:36:53 +0100124 Size2D(1U, 1U),
125 Size2D(1U, 1U),
126 Size2D(1U, 1U),
127 Size2D(1U, 1U),
128 Size2D(1U, 1U),
129 Size2D(1U, 1U),
130 Size2D(1U, 1U),
131 Size2D(2U, 1U),
132})),
133framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true })),
134framework::dataset::make("Expected",
135{
136 ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM,
137})),
138input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
139{
140 ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
141 &weights_info.clone()->set_is_resizable(true),
142 &output_info.clone()->set_is_resizable(true), conv_info,
143 WeightsInfo(),
144 ActivationLayerInfo(),
145 gpu_target,
146 dilation,
147 enable_fast_math);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000148 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
149}
150TEST_SUITE_END()
151
152TEST_SUITE(GEMMConvolutionLayer)
153
Jenkinsb3a371b2018-05-23 11:36:53 +0100154DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
155 CNNDataTypes),
156 ActivationFunctionsDataset),
157 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Kaizen8938bd32017-09-28 14:38:23 +0100158{
159 // Set fixed point position data type allowed
160 int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
161
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000162 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
163
Kaizen8938bd32017-09-28 14:38:23 +0100164 // Create tensors
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000165 CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
166 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
167 CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
168 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
Kaizen8938bd32017-09-28 14:38:23 +0100169
170 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
171 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
172 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
174
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000175 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
176 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
177
Kaizen8938bd32017-09-28 14:38:23 +0100178 // Create and configure function
Anthony Barbier06ea0482018-02-22 15:45:35 +0000179 CLGEMMConvolutionLayer conv;
Jenkinsb3a371b2018-05-23 11:36:53 +0100180 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Kaizen8938bd32017-09-28 14:38:23 +0100181
182 // Validate valid region
183 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
184 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
185 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
186 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
187
188 validate(src.info()->valid_region(), src_valid_region);
189 validate(weights.info()->valid_region(), weights_valid_region);
190 validate(bias.info()->valid_region(), bias_valid_region);
191 validate(dst.info()->valid_region(), dst_valid_region);
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000192
193 // Validate QuantizationInfo
194 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
195 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
Jenkinsb3a371b2018-05-23 11:36:53 +0100196
197 // Validate padding
Kaizen8938bd32017-09-28 14:38:23 +0100198}
199
200template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000201using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Kaizen8938bd32017-09-28 14:38:23 +0100202
203TEST_SUITE(Float)
204TEST_SUITE(FP16)
Jenkinsb3a371b2018-05-23 11:36:53 +0100205
206FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000207 framework::dataset::make("ReshapeWeights", { true })),
208 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100209 DataType::F16)),
210 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
211 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100212{
213 // Validate output
214 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
215}
Jenkinsb3a371b2018-05-23 11:36:53 +0100216
217FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000218 framework::dataset::make("ReshapeWeights", { true })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100219 framework::dataset::make("DataType",
220 DataType::F16)),
221 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
222 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100223{
224 // Validate output
225 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
226}
227TEST_SUITE_END()
228
229TEST_SUITE(FP32)
Jenkinsb3a371b2018-05-23 11:36:53 +0100230
231FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000232 framework::dataset::make("ReshapeWeights", { true })),
233 framework::dataset::make("DataType",
Jenkinsb3a371b2018-05-23 11:36:53 +0100234 DataType::F32)),
235 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
236 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100237{
238 // Validate output
239 validate(CLAccessor(_target), _reference, tolerance_f32);
240}
Jenkinsb3a371b2018-05-23 11:36:53 +0100241
242FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000243 framework::dataset::make("ReshapeWeights", { true })),
Jenkinsb3a371b2018-05-23 11:36:53 +0100244 framework::dataset::make("DataType",
245 DataType::F32)),
246 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
247 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100248{
249 // Validate output
Jenkinsb3a371b2018-05-23 11:36:53 +0100250 validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, absolute_tolerance_float);
Kaizen8938bd32017-09-28 14:38:23 +0100251}
252TEST_SUITE_END()
253TEST_SUITE_END()
254
255template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000256using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Kaizen8938bd32017-09-28 14:38:23 +0100257
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000258TEST_SUITE(FixedPoint)
Kaizen8938bd32017-09-28 14:38:23 +0100259TEST_SUITE(QS8)
260// We test for fixed point precision [4,6]
Jenkinsb3a371b2018-05-23 11:36:53 +0100261FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000262 framework::dataset::make("ReshapeWeights", { true })),
Kaizen8938bd32017-09-28 14:38:23 +0100263 framework::dataset::make("DataType",
264 DataType::QS8)),
Jenkinsb3a371b2018-05-23 11:36:53 +0100265 framework::dataset::make("FractionalBits", 4, 7)),
266 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100267{
268 // Validate output
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000269 validate(CLAccessor(_target), _reference, tolerance_fixed);
Kaizen8938bd32017-09-28 14:38:23 +0100270}
Jenkinsb3a371b2018-05-23 11:36:53 +0100271
272FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000273 framework::dataset::make("ReshapeWeights", { true })),
274 framework::dataset::make("DataType",
275 DataType::QS8)),
Jenkinsb3a371b2018-05-23 11:36:53 +0100276 framework::dataset::make("FractionalBits", 4, 7)),
277 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100278{
279 // Validate output
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000280 validate(CLAccessor(_target), _reference, tolerance_fixed);
Kaizen8938bd32017-09-28 14:38:23 +0100281}
282TEST_SUITE_END()
283
284TEST_SUITE(QS16)
285// Testing for fixed point position [1,14)
Jenkinsb3a371b2018-05-23 11:36:53 +0100286FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000287 framework::dataset::make("ReshapeWeights", { true })),
Kaizen8938bd32017-09-28 14:38:23 +0100288 framework::dataset::make("DataType",
289 DataType::QS16)),
Jenkinsb3a371b2018-05-23 11:36:53 +0100290 framework::dataset::make("FractionalBits", 1, 14)),
291 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100292{
293 // Validate output
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000294 validate(CLAccessor(_target), _reference, tolerance_fixed);
Kaizen8938bd32017-09-28 14:38:23 +0100295}
Jenkinsb3a371b2018-05-23 11:36:53 +0100296
297FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000298 framework::dataset::make("ReshapeWeights", { true })),
299 framework::dataset::make("DataType",
300 DataType::QS16)),
Jenkinsb3a371b2018-05-23 11:36:53 +0100301 framework::dataset::make("FractionalBits", 1, 14)),
302 ActivationFunctionsDataset))
Kaizen8938bd32017-09-28 14:38:23 +0100303{
304 // Validate output
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000305 validate(CLAccessor(_target), _reference, tolerance_fixed);
306}
307TEST_SUITE_END()
308TEST_SUITE_END()
309
310template <typename T>
Anthony Barbier06ea0482018-02-22 15:45:35 +0000311using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000312
Jenkinsb3a371b2018-05-23 11:36:53 +0100313const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
314{
315 ActivationLayerInfo(),
316 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
317 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
318});
319
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000320TEST_SUITE(Quantized)
321TEST_SUITE(QASYMM8)
Jenkinsb3a371b2018-05-23 11:36:53 +0100322
323FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000324 framework::dataset::make("ReshapeWeights", { true })),
325 framework::dataset::make("DataType", DataType::QASYMM8)),
Jenkinsb3a371b2018-05-23 11:36:53 +0100326 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
327 QuantizedActivationFunctionsDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000328{
329 // Validate output
330 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
331}
Jenkinsb3a371b2018-05-23 11:36:53 +0100332FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Anthony Barbier06ea0482018-02-22 15:45:35 +0000333 framework::dataset::make("ReshapeWeights", { true })),
334 framework::dataset::make("DataType", DataType::QASYMM8)),
Jenkinsb3a371b2018-05-23 11:36:53 +0100335 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })),
336 QuantizedActivationFunctionsDataset))
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000337{
338 // Validate output
339 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
Kaizen8938bd32017-09-28 14:38:23 +0100340}
341TEST_SUITE_END()
342TEST_SUITE_END()
343
344TEST_SUITE_END()
345TEST_SUITE_END()
346} // namespace validation
347} // namespace test
348} // namespace arm_compute