arm_compute v17.04
diff --git a/src/runtime/CL/functions/CLCannyEdge.cpp b/src/runtime/CL/functions/CLCannyEdge.cpp
index 20b8dd8..1d018b8 100644
--- a/src/runtime/CL/functions/CLCannyEdge.cpp
+++ b/src/runtime/CL/functions/CLCannyEdge.cpp
@@ -36,7 +36,7 @@
 using namespace arm_compute;
 
 CLCannyEdge::CLCannyEdge()
-    : _sobel(nullptr), _gradient(), _non_max_suppr(), _edge_trace(), _gx(), _gy(), _mag(), _phase(), _nonmax(), _visited(), _recorded(), _l1_list_counter(), _l1_stack()
+    : _sobel(nullptr), _gradient(), _border_mag_gradient(), _non_max_suppr(), _edge_trace(), _gx(), _gy(), _mag(), _phase(), _nonmax(), _visited(), _recorded(), _l1_list_counter(), _l1_stack()
 {
 }
 
@@ -48,7 +48,6 @@
     ARM_COMPUTE_ERROR_ON(lower_thr > upper_thr);
 
     const unsigned int L1_hysteresis_stack_size = 8;
-    int32_t            num_pixel_to_skip        = (border_mode == BorderMode::UNDEFINED) ? gradient_size / 2 : 0;
     const TensorShape  shape                    = input->info()->tensor_shape();
 
     TensorInfo gradient_info;
@@ -57,44 +56,32 @@
     // Initialize images
     if(gradient_size < 7)
     {
-        gradient_info.init_auto_padding(shape, 1, arm_compute::DataType::S16);
-        info.init_auto_padding(shape, 1, arm_compute::DataType::U16);
+        gradient_info.init(shape, 1, arm_compute::DataType::S16);
+        info.init(shape, 1, arm_compute::DataType::U16);
     }
     else
     {
-        gradient_info.init_auto_padding(shape, 1, arm_compute::DataType::S32);
-        info.init_auto_padding(shape, 1, arm_compute::DataType::U32);
+        gradient_info.init(shape, 1, arm_compute::DataType::S32);
+        info.init(shape, 1, arm_compute::DataType::U32);
     }
 
     _gx.allocator()->init(gradient_info);
-    _gx.allocator()->allocate();
     _gy.allocator()->init(gradient_info);
-    _gy.allocator()->allocate();
     _mag.allocator()->init(info);
-    _mag.allocator()->allocate();
     _nonmax.allocator()->init(info);
-    _nonmax.allocator()->allocate();
 
     TensorInfo info_u8(shape, 1, arm_compute::DataType::U8);
-    info_u8.auto_padding();
     _phase.allocator()->init(info_u8);
-    _phase.allocator()->allocate();
     _l1_list_counter.allocator()->init(info_u8);
-    _l1_list_counter.allocator()->allocate();
 
     TensorInfo info_u32(shape, 1, arm_compute::DataType::U32);
-    info_u32.auto_padding();
     _visited.allocator()->init(info_u32);
-    _visited.allocator()->allocate();
     _recorded.allocator()->init(info_u32);
-    _recorded.allocator()->allocate();
 
     TensorShape shape_l1_stack = input->info()->tensor_shape();
     shape_l1_stack.set(0, input->info()->dimension(0) * L1_hysteresis_stack_size);
     TensorInfo info_s32(shape_l1_stack, 1, arm_compute::DataType::S32);
-    info_s32.auto_padding();
     _l1_stack.allocator()->init(info_s32);
-    _l1_stack.allocator()->allocate();
 
     // Configure/Init sobelNxN
     if(gradient_size == 3)
@@ -121,15 +108,27 @@
     }
 
     // Configure gradient
-    _gradient.configure(&_gx, &_gy, &_mag, &_phase, norm_type, num_pixel_to_skip, border_mode == BorderMode::UNDEFINED);
+    _gradient.configure(&_gx, &_gy, &_mag, &_phase, norm_type);
 
     // Configure non-maxima suppression
-    _non_max_suppr.configure(&_mag, &_phase, &_nonmax, lower_thr, num_pixel_to_skip, border_mode == BorderMode::UNDEFINED);
+    _non_max_suppr.configure(&_mag, &_phase, &_nonmax, lower_thr, border_mode == BorderMode::UNDEFINED);
+
+    // Fill border around magnitude image as non-maxima suppression will access
+    // it. If border mode is undefined filling the border is a nop.
+    _border_mag_gradient.configure(&_mag, _non_max_suppr.border_size(), border_mode, constant_border_value);
 
     // Configure edge tracing
-    num_pixel_to_skip += 1;
-    _edge_trace.configure(&_nonmax, output, upper_thr, lower_thr,
-                          &_visited, &_recorded, &_l1_stack, &_l1_list_counter, num_pixel_to_skip, border_mode == BorderMode::UNDEFINED);
+    _edge_trace.configure(&_nonmax, output, upper_thr, lower_thr, &_visited, &_recorded, &_l1_stack, &_l1_list_counter);
+
+    _gx.allocator()->allocate();
+    _gy.allocator()->allocate();
+    _phase.allocator()->allocate();
+    _mag.allocator()->allocate();
+    _visited.allocator()->allocate();
+    _recorded.allocator()->allocate();
+    _l1_stack.allocator()->allocate();
+    _l1_list_counter.allocator()->allocate();
+    _nonmax.allocator()->allocate();
 }
 
 void CLCannyEdge::run()
@@ -140,6 +139,9 @@
     // Run phase and magnitude calculation
     CLScheduler::get().enqueue(_gradient, false);
 
+    // Fill border before non-maxima suppression. Nop for border mode undefined.
+    CLScheduler::get().enqueue(_border_mag_gradient, false);
+
     // Run non max suppresion
     _nonmax.clear(CLScheduler::get().queue());
     CLScheduler::get().enqueue(_non_max_suppr, false);
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index 4788999..bb47bf9 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -77,9 +77,9 @@
     // Get convolved dimensions
     unsigned int conv_w = 0;
     unsigned int conv_h = 0;
-
     std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0),
                                                  stride_x, stride_y, pad_x, pad_y, conv_info.round());
+    ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
 
     // Create tensor to store the reshaped weights
     const size_t      mat_weights_cols = weights->info()->dimension(3);
diff --git a/src/runtime/CL/functions/CLFastCorners.cpp b/src/runtime/CL/functions/CLFastCorners.cpp
index 329de46..d2903fb 100644
--- a/src/runtime/CL/functions/CLFastCorners.cpp
+++ b/src/runtime/CL/functions/CLFastCorners.cpp
@@ -60,7 +60,6 @@
     ARM_COMPUTE_ERROR_ON(threshold < 1 && threshold > 255);
 
     TensorInfo tensor_info(input->info()->tensor_shape(), 1, DataType::U8);
-
     _output.allocator()->init(tensor_info);
 
     _non_max               = nonmax_suppression;
@@ -83,9 +82,11 @@
 
         _suppr_func.configure(&_output, &_suppr, border_mode);
         _copy_array_kernel.configure(&_suppr, update_number, corners, &_num_buffer);
+
         _suppr.allocator()->allocate();
     }
 
+    // Allocate intermediate tensors
     _output.allocator()->allocate();
 }
 
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 2ca72c5..08e18df 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -26,68 +26,236 @@
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
 
+#include <algorithm>
+#include <cmath>
+
 using namespace arm_compute;
 
 CLFullyConnectedLayer::CLFullyConnectedLayer()
-    : _conv_function(), _gemm_function(), _transpose_kernel(), _acc_biases_kernel(), _run_func(), _weights_transpose(), _is_first_run(true), _run_acc_biases(false)
+    : _im2col_kernel(), _transpose_kernel(), _transpose1xW_kernel(), _interleave4x4_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _interleave4x4_output(), _transpose_output(),
+      _transpose1xW_output(), _is_first_run(true), _transpose_weights(true), _fc_after_conv(true), _batched_fc_layer(false), _accumulate_biases(false)
 {
 }
 
-void CLFullyConnectedLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *biases, ICLTensor *output)
+void CLFullyConnectedLayer::configure_conv_fc_wb(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
 {
-    ARM_COMPUTE_ERROR_ON((weights->info()->num_dimensions() != 2) && (weights->info()->num_dimensions() != 4));
+    ARM_COMPUTE_ERROR_ON(weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2)));
 
-    // Make sure that in the fully connected layer connected to fully connected layer case, the first dimension of the weights and input are same.
-    ARM_COMPUTE_ERROR_ON((weights->info()->num_dimensions() == 2) && (input->info()->dimension(0) != weights->info()->dimension(0)));
+    // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
 
-    if(weights->info()->num_dimensions() != 2)
-    {
-        _conv_function.configure(input, weights, biases, output, PadStrideInfo(1, 1, 0, 0, DimensionRoundingType::FLOOR));
-        _run_func = &CLFullyConnectedLayer::run_conv;
-        return;
-    }
+    // Initialize output tensor for im2col
+    TensorShape shape_im2col;
+    shape_im2col.set(0, weights->info()->dimension(1));
+    shape_im2col.set(1, input->info()->dimension(3));
+    shape_im2col.set(2, input->info()->dimension(4));
+    shape_im2col.set(3, input->info()->dimension(5));
+    _im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, input->info()->data_type()));
 
-    TensorShape shape_trans(weights->info()->dimension(1), weights->info()->dimension(0));
-    _weights_transpose.allocator()->init(TensorInfo(shape_trans, 1, weights->info()->data_type()));
+    // Initialize output tensor for interleave 4x4
+    TensorShape shape_interleaved = _im2col_output.info()->tensor_shape();
+    shape_interleaved.set(0, shape_interleaved.x() * 4);
+    shape_interleaved.set(1, std::ceil(static_cast<float>(shape_interleaved.y()) / 4));
+    _interleave4x4_output.allocator()->init(TensorInfo(shape_interleaved, 1, input->info()->data_type()));
 
-    // Configure kernels
-    _transpose_kernel.configure(weights, &_weights_transpose);
-    _gemm_function.configure(input, &_weights_transpose, nullptr, output, 1.0f, 0.0f);
+    // Initialize output tensor for transpose 1xW
+    TensorShape shape_transposed1xW(weights->info()->dimension(1) * 4, static_cast<size_t>(std::ceil(weights->info()->dimension(0) / 4.f)));
+    _transpose1xW_output.allocator()->init(TensorInfo(shape_transposed1xW, 1, weights->info()->data_type()));
+
+    // Configure im2col kernel
+    _im2col_kernel.configure(input, &_im2col_output, std::make_pair(1, 1), PadStrideInfo(1, 1, 0, 0), false);
+
+    // Configure interleave4x4 kernel
+    _interleave4x4_kernel.configure(&_im2col_output, &_interleave4x4_output);
+
+    // Configure transpose 1xW kernel
+    _transpose1xW_kernel.configure(weights, &_transpose1xW_output);
+
+    // Configure matrix multiply kernel
+    _mm_kernel.configure(&_interleave4x4_output, &_transpose1xW_output, output, 1.0f);
+
+    // Allocate the tensors once all the configure methods have been called
+    _im2col_output.allocator()->allocate();
+    _interleave4x4_output.allocator()->allocate();
+    _transpose1xW_output.allocator()->allocate();
+}
+
+void CLFullyConnectedLayer::configure_fc_fc_wb(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
+{
+    // Initialize output tensor for interleave 4x4
+    TensorShape shape_interleaved = input->info()->tensor_shape();
+    shape_interleaved.set(0, shape_interleaved.x() * 4);
+    shape_interleaved.set(1, std::ceil(static_cast<float>(shape_interleaved.y()) / 4));
+    _interleave4x4_output.allocator()->init(TensorInfo(shape_interleaved, 1, input->info()->data_type()));
+
+    // Initialize output tensor for transpose 1xW
+    TensorShape shape_transposed1xW(weights->info()->dimension(1) * 4, static_cast<size_t>(std::ceil(weights->info()->dimension(0) / 4.f)));
+    _transpose1xW_output.allocator()->init(TensorInfo(shape_transposed1xW, 1, weights->info()->data_type()));
+
+    // Configure interleave4x4 kernel
+    _interleave4x4_kernel.configure(input, &_interleave4x4_output);
+
+    // Configure transpose 1xW kernel
+    _transpose1xW_kernel.configure(weights, &_transpose1xW_output);
+
+    // Configure matrix multiply kernel
+    _mm_kernel.configure(&_interleave4x4_output, &_transpose1xW_output, output, 1.0f);
+
+    // Allocate the tensors once all the configure methods have been called
+    _interleave4x4_output.allocator()->allocate();
+    _transpose1xW_output.allocator()->allocate();
+}
+
+void CLFullyConnectedLayer::configure_conv_fc_nb(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
+{
+    ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
+
+    // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
+
+    // Initialize output tensor for im2col
+    TensorShape shape_im2col;
+    shape_im2col.set(0, weights->info()->dimension(1));
+    shape_im2col.set(1, 1);
+    _im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, input->info()->data_type()));
+
+    // Configure im2col kernel
+    _im2col_kernel.configure(input, &_im2col_output, std::make_pair(1, 1), PadStrideInfo(1, 1, 0, 0), false);
+
+    // Configure matrix multiply kernel
+    _mm_kernel.configure(&_im2col_output, weights, output, 1.0f);
+
+    // Allocate the output tensor for im2col once all the configure methods have been called
+    _im2col_output.allocator()->allocate();
+}
+
+void CLFullyConnectedLayer::configure_fc_fc_nb(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
+{
+    ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
+
+    // Configure matrix multiply kernel
+    _mm_kernel.configure(input, weights, output, 1.0f);
+}
+
+void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights)
+{
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::F32);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
+    ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() != 2);
+
+    const ICLTensor *weights_to_use = weights;
+
+    _is_first_run      = true;
+    _transpose_weights = transpose_weights;
+    _fc_after_conv     = true;
+    _batched_fc_layer  = false;
+    _accumulate_biases = false;
+
     if(biases != nullptr)
     {
-        _acc_biases_kernel.configure(output, biases);
-        _run_acc_biases = true;
+        ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+
+        _accumulate_biases = true;
+
+        // Configure accumulate biases kernel
+        _accumulate_biases_kernel.configure(output, biases);
     }
 
-    _run_func = &CLFullyConnectedLayer::run_fc;
-
-    // Allocate intermediate buffers
-    _weights_transpose.allocator()->allocate();
-}
-
-void CLFullyConnectedLayer::run_conv()
-{
-    _conv_function.run();
-}
-
-void CLFullyConnectedLayer::run_fc()
-{
-    if(_is_first_run)
+    // Check if we need to transpose the weights
+    if(_transpose_weights)
     {
-        _is_first_run = false;
-        CLScheduler::get().enqueue(_transpose_kernel);
+        // Initialize the output tensor for transpose
+        TensorShape shape_transposed(weights->info()->dimension(1), weights->info()->dimension(0));
+        _transpose_output.allocator()->init(TensorInfo(shape_transposed, 1, weights->info()->data_type()));
+        _transpose_kernel.configure(weights, &_transpose_output);
+
+        weights_to_use = &_transpose_output;
     }
 
-    _gemm_function.run();
+    // With the Fully Connected layer we can have 4 different cases:
+    //  1) Convolution layer -> Fully Connected layer without batches
+    //  2) Fully Connected layer -> Fully Connected layer without batches
+    //  3) Convolution layer -> Fully Connected layer with batches
+    //  4) Fully Connected layer -> Fully Connected layer with batches
 
-    if(_run_acc_biases)
+    // Check if we have a fully connected layer with batches
+    _batched_fc_layer = (output->info()->dimension(1) > 1);
+
+    if(_batched_fc_layer)
     {
-        CLScheduler::get().enqueue(_acc_biases_kernel);
+        _fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
+                                                                               input->info()->tensor_shape().cend(),
+                                                                               output->info()->tensor_shape().cbegin() + 1));
+
+        if(_fc_after_conv)
+        {
+            // Fully Connected layer after a Convolution Layer with batches
+            configure_conv_fc_wb(input, weights_to_use, output);
+        }
+        else
+        {
+            // Fully Connected layer after a Fully Connected Layer with batches
+            configure_fc_fc_wb(input, weights_to_use, output);
+        }
+    }
+    else
+    {
+        _fc_after_conv = (weights_to_use->info()->dimension(1) == (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2)));
+
+        if(_fc_after_conv)
+        {
+            // Fully Connected layer after a Convolution Layer without batches
+            configure_conv_fc_nb(input, weights_to_use, output);
+        }
+        else
+        {
+            // Fully Connected layer after a Fully Connected Layer without batches
+            configure_fc_fc_nb(input, weights_to_use, output);
+        }
+    }
+
+    // Allocate the transpose tensor if the transpose_weights flag is true and once all the configure methods have been called
+    if(_transpose_weights)
+    {
+        _transpose_output.allocator()->allocate();
     }
 }
 
 void CLFullyConnectedLayer::run()
 {
-    ARM_COMPUTE_ERROR_ON(_run_func == nullptr);
-    (this->*_run_func)();
+    // The reshape of the weights happens only once
+    if(_is_first_run)
+    {
+        _is_first_run = false;
+
+        if(_transpose_weights)
+        {
+            CLScheduler::get().enqueue(_transpose_kernel);
+        }
+
+        if(_batched_fc_layer)
+        {
+            CLScheduler::get().enqueue(_transpose1xW_kernel);
+        }
+    }
+
+    // Linearize input if it comes from a convolutional layer
+    if(_fc_after_conv)
+    {
+        CLScheduler::get().enqueue(_im2col_kernel, false);
+    }
+
+    // Interleave input
+    if(_batched_fc_layer)
+    {
+        CLScheduler::get().enqueue(_interleave4x4_kernel, false);
+    }
+
+    // Run matrix multiply
+    CLScheduler::get().enqueue(_mm_kernel, !_accumulate_biases);
+
+    // Accumulate biases if provided
+    if(_accumulate_biases)
+    {
+        CLScheduler::get().enqueue(_accumulate_biases_kernel);
+    }
 }
diff --git a/src/runtime/CL/functions/CLGaussianPyramid.cpp b/src/runtime/CL/functions/CLGaussianPyramid.cpp
index 89d376a..8a4279e 100644
--- a/src/runtime/CL/functions/CLGaussianPyramid.cpp
+++ b/src/runtime/CL/functions/CLGaussianPyramid.cpp
@@ -34,7 +34,6 @@
 #include "arm_compute/core/Window.h"
 
 #include "arm_compute/runtime/CL/CLPyramid.h"
-#include "arm_compute/runtime/CL/CLPyramid.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
@@ -79,23 +78,22 @@
         TensorShape tensor_shape = pyramid->info()->tensor_shape();
         tensor_shape.set(0, (pyramid->info()->width() + 1) * SCALE_PYRAMID_HALF);
 
-        PyramidInfo pyramid_info;
-        pyramid_info.init(num_levels - 1, SCALE_PYRAMID_HALF, tensor_shape, Format::U16);
+        PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_HALF, tensor_shape, Format::U16);
 
-        _tmp.init_auto_padding(pyramid_info);
-        _tmp.allocate();
+        _tmp.init(pyramid_info);
 
         for(size_t i = 0; i < num_levels - 1; ++i)
         {
-            /* Configure border */
-            _border_handler[i].configure(_pyramid->get_pyramid_level(i), 2, border_mode, PixelValue(constant_border_value));
-
             /* Configure horizontal kernel */
             _horizontal_reduction[i].configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i), border_mode == BorderMode::UNDEFINED);
 
             /* Configure vertical kernel */
             _vertical_reduction[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), border_mode == BorderMode::UNDEFINED);
+
+            /* Configure border */
+            _border_handler[i].configure(_pyramid->get_pyramid_level(i), _horizontal_reduction[i].border_size(), border_mode, PixelValue(constant_border_value));
         }
+        _tmp.allocate();
     }
 }
 
@@ -146,11 +144,9 @@
         _gauss5x5      = arm_compute::cpp14::make_unique<CLGaussian5x5[]>(num_levels - 1);
         _scale_nearest = arm_compute::cpp14::make_unique<CLScaleKernel[]>(num_levels - 1);
 
-        PyramidInfo pyramid_info;
-        pyramid_info.init(num_levels - 1, SCALE_PYRAMID_ORB, pyramid->info()->tensor_shape(), Format::U8);
+        PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_ORB, pyramid->info()->tensor_shape(), Format::U8);
 
-        _tmp.init_auto_padding(pyramid_info);
-        _tmp.allocate();
+        _tmp.init(pyramid_info);
 
         for(size_t i = 0; i < num_levels - 1; ++i)
         {
@@ -160,6 +156,8 @@
             /* Configure scale image kernel */
             _scale_nearest[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), InterpolationPolicy::NEAREST_NEIGHBOR, border_mode == BorderMode::UNDEFINED);
         }
+
+        _tmp.allocate();
     }
 }
 
diff --git a/src/runtime/CL/functions/CLHarrisCorners.cpp b/src/runtime/CL/functions/CLHarrisCorners.cpp
index 2cdca13..6501da3 100644
--- a/src/runtime/CL/functions/CLHarrisCorners.cpp
+++ b/src/runtime/CL/functions/CLHarrisCorners.cpp
@@ -68,6 +68,7 @@
     TensorInfo info_f32(shape, 1, DataType::F32);
     _score.allocator()->init(info_f32);
     _nonmax.allocator()->init(info_f32);
+
     _corners_list = arm_compute::cpp14::make_unique<InternalKeypoint[]>(shape.x() * shape.y());
 
     /* Set/init Sobel kernel accordingly with gradient_size */
diff --git a/src/runtime/CL/functions/CLLaplacianPyramid.cpp b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
index 3ed6376..d7ce206 100644
--- a/src/runtime/CL/functions/CLLaplacianPyramid.cpp
+++ b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
@@ -58,10 +58,8 @@
     PyramidInfo pyramid_info;
     pyramid_info.init(_num_levels, 0.5f, pyramid->info()->tensor_shape(), arm_compute::Format::U8);
 
-    _gauss_pyr.init_auto_padding(pyramid_info);
-    _gauss_pyr.allocate();
-    _conv_pyr.init_auto_padding(pyramid_info);
-    _conv_pyr.allocate();
+    _gauss_pyr.init(pyramid_info);
+    _conv_pyr.init(pyramid_info);
 
     // Create Gaussian Pyramid function
     _gaussian_pyr_function.configure(input, &_gauss_pyr, border_mode, constant_border_value);
@@ -76,6 +74,9 @@
     }
 
     _depth_function.configure(_conv_pyr.get_pyramid_level(_num_levels - 1), output, ConvertPolicy::WRAP, 0);
+
+    _gauss_pyr.allocate();
+    _conv_pyr.allocate();
 }
 
 void CLLaplacianPyramid::run()
diff --git a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
index d7725b3..1dfab74 100644
--- a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
+++ b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
@@ -57,8 +57,7 @@
     // Create and initialize the tmp pyramid: I(n-2) = upsample( input + Laplace(n-1) )
     PyramidInfo pyramid_info;
     pyramid_info.init(num_levels, 0.5f, output->info()->tensor_shape(), arm_compute::Format::S16);
-    _tmp_pyr.init_auto_padding(pyramid_info);
-    _tmp_pyr.allocate();
+    _tmp_pyr.init(pyramid_info);
 
     // Allocate add and scale functions. Level 0 does not need to be scaled.
     _addf   = arm_compute::cpp14::make_unique<CLArithmeticAddition[]>(num_levels);
@@ -77,6 +76,8 @@
 
     // Convert level 0 from S16 to U8
     _depthf.configure(_tmp_pyr.get_pyramid_level(0), output, ConvertPolicy::SATURATE, 0);
+
+    _tmp_pyr.allocate();
 }
 
 void CLLaplacianReconstruct::run()
diff --git a/src/runtime/CL/functions/CLOpticalFlow.cpp b/src/runtime/CL/functions/CLOpticalFlow.cpp
index 76124f4..a6b0eb3 100644
--- a/src/runtime/CL/functions/CLOpticalFlow.cpp
+++ b/src/runtime/CL/functions/CLOpticalFlow.cpp
@@ -65,9 +65,9 @@
     _new_points           = new_points;
     _num_levels           = old_pyramid->info()->num_levels();
 
-    const float pyr_scale     = old_pyramid->info()->scale();
-    const int   border_offset = (BorderMode::UNDEFINED == border_mode) ? 1 : 0;
-    const int   list_length   = old_points->num_values();
+    const float pyr_scale              = old_pyramid->info()->scale();
+    const int   list_length            = old_points->num_values();
+    const int   old_values_list_length = list_length * window_dimension * window_dimension;
 
     // Create kernels and tensors
     _tracker_init_kernel   = arm_compute::cpp14::make_unique<CLLKTrackerInitKernel[]>(_num_levels);
@@ -84,8 +84,8 @@
     _new_points_internal->resize(list_length);
     _coefficient_table = arm_compute::cpp14::make_unique<CLCoefficientTableArray>(list_length);
     _coefficient_table->resize(list_length);
-    _old_values = arm_compute::cpp14::make_unique<CLOldValueArray>(list_length * window_dimension * window_dimension);
-    _old_values->resize(list_length);
+    _old_values = arm_compute::cpp14::make_unique<CLOldValueArray>(old_values_list_length);
+    _old_values->resize(old_values_list_length);
     _new_points->resize(list_length);
 
     for(size_t i = 0; i < _num_levels; ++i)
@@ -98,13 +98,10 @@
         const unsigned int width_ith  = old_ith_input->info()->dimension(0);
         const unsigned int height_ith = new_ith_input->info()->dimension(1);
 
-        // Allocate Scharr tensors
+        // Initialize Scharr tensors
         TensorInfo tensor_info(TensorShape(width_ith, height_ith), 1, DataType::S16);
-        tensor_info.auto_padding();
         _scharr_gx[i].allocator()->init(tensor_info);
-        _scharr_gx[i].allocator()->allocate();
         _scharr_gy[i].allocator()->init(tensor_info);
-        _scharr_gy[i].allocator()->allocate();
 
         // Init Scharr kernel
         _func_scharr[i].configure(old_ith_input, &_scharr_gx[i], &_scharr_gy[i], border_mode, constant_border_value);
@@ -115,11 +112,15 @@
         // Init Lucas-Kanade stage0 kernel
         _tracker_stage0_kernel[i].configure(old_ith_input, &_scharr_gx[i], &_scharr_gy[i],
                                             _old_points_internal.get(), _new_points_internal.get(), _coefficient_table.get(), _old_values.get(),
-                                            window_dimension, i, border_offset);
+                                            window_dimension, i);
 
         // Init Lucas-Kanade stage1 kernel
         _tracker_stage1_kernel[i].configure(new_ith_input, _new_points_internal.get(), _coefficient_table.get(), _old_values.get(),
-                                            termination, epsilon, num_iterations, window_dimension, i, border_offset);
+                                            termination, epsilon, num_iterations, window_dimension, i);
+
+        // Allocate intermediate buffers
+        _scharr_gx[i].allocator()->allocate();
+        _scharr_gy[i].allocator()->allocate();
     }
 
     // Finalize Lucas-Kanade
@@ -144,5 +145,6 @@
         // Run Lucas-Kanade stage1 kernel
         CLScheduler::get().enqueue(_tracker_stage1_kernel[level - 1]);
     }
+
     CLScheduler::get().enqueue(_tracker_finalize_kernel, true);
 }