arm_compute v20.02
diff --git a/tests/validation/reference/QuantizationLayer.cpp b/tests/validation/reference/QuantizationLayer.cpp
index ae23f7e..cfc5085 100644
--- a/tests/validation/reference/QuantizationLayer.cpp
+++ b/tests/validation/reference/QuantizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,26 +40,35 @@
SimpleTensor<Tout> dst{ src.shape(), output_data_type, 1, quantization_info };
const UniformQuantizationInfo qinfo = quantization_info.uniform();
+
+#ifdef __aarch64__
+ constexpr auto rounding_policy = RoundingPolicy::TO_NEAREST_EVEN;
+#else // __aarch64__
+ constexpr auto rounding_policy = RoundingPolicy::TO_ZERO;
+#endif // __aarch64__
+
switch(output_data_type)
{
case DataType::QASYMM8:
for(int i = 0; i < src.num_elements(); ++i)
{
+ dst[i] = quantize_qasymm8((src[i]), qinfo, rounding_policy);
+ }
+ break;
+ case DataType::QASYMM8_SIGNED:
+ for(int i = 0; i < src.num_elements(); ++i)
+ {
#ifdef __aarch64__
- dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
+ dst[i] = quantize_qasymm8_signed((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
#else // __aarch64__
- dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_ZERO);
+ dst[i] = quantize_qasymm8_signed((src[i]), qinfo, RoundingPolicy::TO_ZERO);
#endif // __aarch64__
}
break;
case DataType::QASYMM16:
for(int i = 0; i < src.num_elements(); ++i)
{
-#ifdef __aarch64__
- dst[i] = quantize_qasymm16((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
-#else // __aarch64__
- dst[i] = quantize_qasymm16((src[i]), qinfo, RoundingPolicy::TO_ZERO);
-#endif // __aarch64__
+ dst[i] = quantize_qasymm16((src[i]), qinfo, rounding_policy);
}
break;
default:
@@ -68,6 +77,43 @@
return dst;
}
+template <>
+SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<uint8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric<uint8_t>(src);
+ return quantization_layer<float, uint8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<int8_t> quantization_layer(const SimpleTensor<uint8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric<uint8_t>(src);
+ return quantization_layer<float, int8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<int8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric<int8_t>(src);
+ return quantization_layer<float, uint8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<int8_t> quantization_layer(const SimpleTensor<int8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric<int8_t>(src);
+ return quantization_layer<float, int8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<uint16_t> quantization_layer(const SimpleTensor<uint8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric<uint8_t>(src);
+ return quantization_layer<float, uint16_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template SimpleTensor<int8_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
+template SimpleTensor<int8_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
template SimpleTensor<uint16_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);