arm_compute v18.01

Change-Id: I9bfa178c2e38bfd5fc812e62aab6760d87748e05
diff --git a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
index afa5d97..c26c99a 100644
--- a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
@@ -34,7 +34,7 @@
 using namespace arm_compute;
 
 NEDirectConvolutionLayer::NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
-    : _memory_group(std::move(memory_manager)), _accumulate_bias_kernel(), _conv_kernel(), _input_border_handler(), _accumulator(), _has_bias(false)
+    : _memory_group(std::move(memory_manager)), _output_stage_kernel(), _conv_kernel(), _input_border_handler(), _accumulator(), _has_bias(false), _is_fixed_point(false)
 {
 }
 
@@ -50,16 +50,16 @@
     _has_bias = (bias != nullptr);
 
     // Allocate the intermediate accumulator tensor in case of fixed point input
-    if(is_data_type_fixed_point(input->info()->data_type()))
+    _is_fixed_point = is_data_type_fixed_point(input->info()->data_type());
+    if(_is_fixed_point)
     {
         const DataType promoted_dt = (input->info()->data_type() == DataType::QS8) ? DataType::QS16 : DataType::QS32;
         _accumulator.allocator()->init(TensorInfo(output->info()->tensor_shape(), 1, promoted_dt, output->info()->fixed_point_position()));
         _memory_group.manage(&_accumulator);
         _conv_kernel.configure(input, weights, &_accumulator, conv_info);
-        if(_has_bias)
-        {
-            _accumulate_bias_kernel.configure(&_accumulator, bias, output);
-        }
+
+        // When no bias is provided, we need to downscale the accumulator tensor
+        _output_stage_kernel.configure(&_accumulator, bias, output);
         _accumulator.allocator()->allocate();
     }
     else
@@ -67,7 +67,7 @@
         _conv_kernel.configure(input, weights, output, conv_info);
         if(_has_bias)
         {
-            _accumulate_bias_kernel.configure(output, bias);
+            _output_stage_kernel.configure(output, bias);
         }
     }
 
@@ -90,20 +90,17 @@
     // Validate Convolution kernel
     ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerKernel::validate(input, weights, &accumulator, conv_info));
 
-    // Validate bias
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias == nullptr) && is_data_type_fixed_point(data_type),
-                                    "Biases should be provided for fixed point inputs");
     if(bias != nullptr)
     {
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
         ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != weights->dimension(3),
                                         "Biases size and number of input feature maps should match");
         ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->num_dimensions() > 1, "Biases should be one dimensional");
-
-        // Validate bias kernel
-        ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerBiasAccumulateKernel::validate(&accumulator, bias, output));
     }
 
+    // Validate bias kernel
+    ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, bias, output));
+
     return Status{};
 }
 
@@ -114,10 +111,9 @@
     _memory_group.acquire();
 
     NEScheduler::get().schedule(&_conv_kernel, Window::DimZ);
-    if(_has_bias)
+    if(_has_bias || _is_fixed_point)
     {
-        NEScheduler::get().schedule(&_accumulate_bias_kernel, Window::DimY);
+        NEScheduler::get().schedule(&_output_stage_kernel, Window::DimY);
     }
-
     _memory_group.release();
 }