arm_compute v18.01
Change-Id: I9bfa178c2e38bfd5fc812e62aab6760d87748e05
diff --git a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
index 17eaaf8..e8c7715 100644
--- a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,22 +42,24 @@
{
namespace
{
-constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QASYMM8 */
} // namespace
TEST_SUITE(NEON)
-TEST_SUITE(DepthwiseConvolutionLayer)
+TEST_SUITE(DepthwiseConvLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
datasets::LargeDepthwiseConvolutionLayerDataset3x3()),
framework::dataset::make("DataType", DataType::F32)),
- input_shape, weights_shape, bias_shape, output_shape, info, data_type)
+ input_shape, weights_shape, output_shape, info, data_type)
{
// Create tensors
- Tensor src = create_tensor<Tensor>(input_shape, data_type);
- Tensor dst = create_tensor<Tensor>(output_shape, data_type);
- Tensor weights = create_tensor<Tensor>(weights_shape, data_type);
- Tensor bias = create_tensor<Tensor>(bias_shape, data_type);
+ Tensor src = create_tensor<Tensor>(input_shape, data_type);
+ Tensor dst = create_tensor<Tensor>(output_shape, data_type);
+ Tensor weights = create_tensor<Tensor>(weights_shape, data_type);
+ const TensorShape bias_shape(weights_shape[2]);
+ Tensor bias = create_tensor<Tensor>(bias_shape, data_type);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -120,8 +122,30 @@
validate(Accessor(_target), _reference, tolerance_f32);
}
TEST_SUITE_END()
+TEST_SUITE_END()
TEST_SUITE_END()
+
+template <typename T>
+using NEDepthwiseConvolutionLayerQuantizedFixture3x3 = DepthwiseConvolutionLayerValidationQuantizedFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer3x3, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+TEST_SUITE(W3x3)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
TEST_SUITE_END()
TEST_SUITE_END()