arm_compute v19.05
diff --git a/examples/SConscript b/examples/SConscript
index 7af8a7f..d08aa9d 100644
--- a/examples/SConscript
+++ b/examples/SConscript
@@ -58,13 +58,13 @@
 
     if env['os'] in ['android', 'bare_metal'] or env['standalone']:
         prog = examples_env.Program(example, ["{}.cpp".format(example), utils, graph_utils], LIBS = examples_libs + arm_compute_graph_libs, LINKFLAGS=examples_env["LINKFLAGS"]+['-Wl,--whole-archive',graph_dependency,'-Wl,--no-whole-archive'])
-        prog = install_bin(prog)
         Depends(prog, graph_dependency)
+        prog = install_bin(prog)
     else:
         #-Wl,--allow-shlib-undefined: Ignore dependencies of dependencies
         prog = examples_env.Program(example, ["{}.cpp".format(example), utils, graph_utils], LIBS = examples_libs + arm_compute_graph_libs, LINKFLAGS=examples_env["LINKFLAGS"]+['-Wl,--allow-shlib-undefined'] )
-        prog = install_bin(prog)
         Depends(prog, graph_dependency)
+        prog = install_bin(prog)
     alias = examples_env.Alias(example, prog)
     Default(alias)
 
@@ -72,8 +72,8 @@
     for file in Glob("./neoncl_*.cpp"):
         example = os.path.basename(os.path.splitext(str(file))[0])
         prog = examples_env.Program(example, ["{}.cpp".format(example), utils], CPPDEFINES=['ARM_COMPUTE_CL'], LIBS = examples_libs + arm_compute_libs)
-        prog = install_bin(prog)
         Depends(prog, arm_compute_dependency)
+        prog = install_bin(prog)
         alias = examples_env.Alias(example, prog)
         Default(alias)
 
@@ -81,8 +81,8 @@
     for file in Glob("./cl_*.cpp"):
         example = os.path.basename(os.path.splitext(str(file))[0])
         prog = examples_env.Program(example, ["{}.cpp".format(example), utils], CPPDEFINES=['ARM_COMPUTE_CL'], LIBS = examples_libs + arm_compute_libs)
-        prog = install_bin(prog)
         Depends(prog, arm_compute_dependency)
+        prog = install_bin(prog)
         alias = examples_env.Alias(example, prog)
         Default(alias)
 
@@ -90,8 +90,8 @@
     for file in Glob("./neon_*.cpp"):
         example = os.path.basename(os.path.splitext(str(file))[0])
         prog = examples_env.Program(example, ["{}.cpp".format(example), utils], LIBS = examples_libs + arm_compute_libs)
-        prog = install_bin(prog)
         Depends(prog, arm_compute_dependency)
+        prog = install_bin(prog)
         alias = examples_env.Alias(example, prog)
         Default(alias)
 
@@ -99,8 +99,8 @@
     for file in Glob("./gc_*.cpp"):
         example = os.path.basename(os.path.splitext(str(file))[0])
         prog = examples_env.Program(example, ["{}.cpp".format(example), utils], CPPDEFINES=['ARM_COMPUTE_GC'], LIBS = examples_libs + arm_compute_libs)
-        prog = install_bin(prog)
         Depends(prog, arm_compute_dependency)
+        prog = install_bin(prog)
         alias = examples_env.Alias(example, prog)
         Default(alias)
 
@@ -111,12 +111,12 @@
 
     if env['os'] in ['android', 'bare_metal'] or env['standalone']:
         prog = examples_env.Program(example, [examples_env.Object(source=file, target=example), utils, graph_utils], LIBS = examples_libs + arm_compute_graph_libs, LINKFLAGS=examples_env["LINKFLAGS"]+['-Wl,--whole-archive',graph_dependency,'-Wl,--no-whole-archive'])
-        prog = install_bin(prog)
         Depends(prog, graph_dependency)
+        prog = install_bin(prog)
     else:
         #-Wl,--allow-shlib-undefined: Ignore dependencies of dependencies
         prog = examples_env.Program(example, [examples_env.Object(source=file, target=example), utils, graph_utils], LIBS = examples_libs + arm_compute_graph_libs, LINKFLAGS=examples_env["LINKFLAGS"]+['-Wl,--allow-shlib-undefined'] )
-        prog = install_bin(prog)
         Depends(prog, graph_dependency)
+        prog = install_bin(prog)
     alias = examples_env.Alias(example, prog)
     Default(alias)
diff --git a/examples/cl_convolution.cpp b/examples/cl_convolution.cpp
index b15bbb6..f2d19ef 100644
--- a/examples/cl_convolution.cpp
+++ b/examples/cl_convolution.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -36,7 +36,7 @@
 
 /** Gaussian 3x3 matrix
  */
-const int16_t gaussian3x3[] =
+const std::array<int16_t, 9> gaussian3x3 =
 {
     1, 2, 1,
     2, 4, 2,
@@ -45,7 +45,7 @@
 
 /** Gaussian 5x5 matrix
  */
-const int16_t gaussian5x5[] =
+const std::array<int16_t, 25> gaussian5x5 =
 {
     1, 4, 6, 4, 1,
     4, 16, 24, 16, 4,
@@ -82,8 +82,8 @@
         dst.allocator()->init(*src.info());
 
         // Apply a Gaussian 3x3 filter to the source image followed by a Gaussian 5x5:
-        conv3x3.configure(&src, &tmp, gaussian3x3, 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
-        conv5x5.configure(&tmp, &dst, gaussian5x5, 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
+        conv3x3.configure(&src, &tmp, gaussian3x3.data(), 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
+        conv5x5.configure(&tmp, &dst, gaussian5x5.data(), 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
 
         // Allocate all the images
         src.allocator()->allocate();
@@ -115,7 +115,11 @@
             save_to_ppm(dst, output_filename); // save_to_ppm maps and unmaps the image to store as PPM
         }
     }
-    CLImage          src{}, tmp{}, dst{};
+
+private:
+    CLImage          src{};
+    CLImage          tmp{};
+    CLImage          dst{};
     CLConvolution3x3 conv3x3{};
     CLConvolution5x5 conv5x5{};
     std::string      output_filename{};
diff --git a/examples/cl_sgemm.cpp b/examples/cl_sgemm.cpp
index 805aec1..8e0263d 100644
--- a/examples/cl_sgemm.cpp
+++ b/examples/cl_sgemm.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -41,7 +41,9 @@
 public:
     bool do_setup(int argc, char **argv) override
     {
-        NPYLoader npy0, npy1, npy2;
+        NPYLoader npy0;
+        NPYLoader npy1;
+        NPYLoader npy2;
         alpha = 1.0f;
         beta  = 0.0f;
 
@@ -184,7 +186,10 @@
     }
 
 private:
-    CLTensor    src0{}, src1{}, src2{}, dst{};
+    CLTensor    src0{};
+    CLTensor    src1{};
+    CLTensor    src2{};
+    CLTensor    dst{};
     CLGEMM      sgemm{};
     CLTuner     tuner{};
     float       alpha{}, beta{};
diff --git a/examples/gc_absdiff.cpp b/examples/gc_absdiff.cpp
index f534592..6793df0 100644
--- a/examples/gc_absdiff.cpp
+++ b/examples/gc_absdiff.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -40,7 +40,8 @@
 public:
     bool do_setup(int argc, char **argv) override
     {
-        PPMLoader ppm1, ppm2;
+        PPMLoader ppm1{};
+        PPMLoader ppm2{};
 
         GCScheduler::get().default_init();
         if(argc < 2)
diff --git a/examples/gc_dc.cpp b/examples/gc_dc.cpp
index f3f1942..6d09eba 100644
--- a/examples/gc_dc.cpp
+++ b/examples/gc_dc.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -81,7 +81,7 @@
         Window window;
         window.use_tensor_dimensions(src_shape);
         Iterator it(&src, window);
-        execute_window_loop(window, [&](const Coordinates & id)
+        execute_window_loop(window, [&](const Coordinates &)
         {
             *reinterpret_cast<half_float::half *>(it.ptr()) = half_float::half(1.f);
         });
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index 989e232..a785dea 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -150,6 +150,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_deepspeech_v0_4_1.cpp b/examples/graph_deepspeech_v0_4_1.cpp
new file mode 100644
index 0000000..a69d235
--- /dev/null
+++ b/examples/graph_deepspeech_v0_4_1.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph.h"
+#include "arm_compute/graph/Types.h"
+#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
+#include "utils/GraphUtils.h"
+#include "utils/Utils.h"
+
+using namespace arm_compute::utils;
+using namespace arm_compute::graph;
+using namespace arm_compute::graph::frontend;
+using namespace arm_compute::graph_utils;
+
+/** Example demonstrating how to implement DeepSpeech v0.4.1's network using the Compute Library's graph API */
+class GraphDeepSpeechExample : public Example
+{
+public:
+    GraphDeepSpeechExample()
+        : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "DeepSpeech v0.4.1")
+    {
+    }
+    bool do_setup(int argc, char **argv) override
+    {
+        // Parse arguments
+        cmd_parser.parse(argc, argv);
+
+        // Consume common parameters
+        common_params = consume_common_graph_parameters(common_opts);
+
+        // Return when help menu is requested
+        if(common_params.help)
+        {
+            cmd_parser.print_help(argv[0]);
+            return false;
+        }
+
+        // Checks
+        ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph");
+
+        // Print parameter values
+        std::cout << common_params << std::endl;
+
+        // Get trainable parameters data path
+        std::string       data_path  = common_params.data_path;
+        const std::string model_path = "/cnn_data/deepspeech_model/";
+
+        if(!data_path.empty())
+        {
+            data_path += model_path;
+        }
+
+        // How many timesteps to process at once, higher values mean more latency
+        // Notice that this corresponds to the number of LSTM cells that will be instantiated
+        const unsigned int n_steps = 16;
+
+        // ReLU clipping value for non-recurrent layers
+        const float cell_clip = 20.f;
+
+        // Create input descriptor
+        const TensorShape tensor_shape     = permute_shape(TensorShape(26U, 19U, n_steps, 1U), DataLayout::NHWC, common_params.data_layout);
+        TensorDescriptor  input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+        // Set weights trained layout
+        const DataLayout weights_layout = DataLayout::NHWC;
+
+        graph << common_params.target
+              << common_params.fast_math_hint
+              << InputLayer(input_descriptor,
+                            get_weights_accessor(data_path, "input_values_x" + std::to_string(n_steps) + ".npy", weights_layout))
+              .set_name("input_node");
+
+        if(common_params.data_layout == DataLayout::NCHW)
+        {
+            graph << PermuteLayer(PermutationVector(2U, 0U, 1U), common_params.data_layout).set_name("permute_to_nhwc");
+        }
+
+        graph << ReshapeLayer(TensorShape(494U, n_steps)).set_name("Reshape_input")
+              // Layer 1
+              << FullyConnectedLayer(
+                  2048U,
+                  get_weights_accessor(data_path, "h1_transpose.npy", weights_layout),
+                  get_weights_accessor(data_path, "MatMul_bias.npy"))
+              .set_name("fc0")
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
+              .set_name("Relu")
+              // Layer 2
+              << FullyConnectedLayer(
+                  2048U,
+                  get_weights_accessor(data_path, "h2_transpose.npy", weights_layout),
+                  get_weights_accessor(data_path, "MatMul_1_bias.npy"))
+              .set_name("fc1")
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
+              .set_name("Relu_1")
+              // Layer 3
+              << FullyConnectedLayer(
+                  2048U,
+                  get_weights_accessor(data_path, "h3_transpose.npy", weights_layout),
+                  get_weights_accessor(data_path, "MatMul_2_bias.npy"))
+              .set_name("fc2")
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
+              .set_name("Relu_2")
+              // Layer 4
+              << ReshapeLayer(TensorShape(2048U, 1U, n_steps)).set_name("Reshape_1");
+
+        // Unstack Layer (using SplitLayerNode)
+        NodeParams unstack_params = { "unstack", graph.hints().target_hint };
+        NodeID     unstack_nid    = GraphBuilder::add_split_node(graph.graph(), unstack_params, { graph.tail_node(), 0 }, n_steps, 2);
+
+        // Create input state descriptor
+        TensorDescriptor state_descriptor = TensorDescriptor(TensorShape(2048U), common_params.data_type).set_layout(common_params.data_layout);
+        SubStream        previous_state(graph);
+        SubStream        add_y(graph);
+
+        // Initial state for LSTM is all zeroes for both state_h and state_c, therefore only one input is created
+        previous_state << InputLayer(state_descriptor,
+                                     get_weights_accessor(data_path, "zeros.npy"))
+                       .set_name("previous_state_c_h");
+        add_y << InputLayer(state_descriptor,
+                            get_weights_accessor(data_path, "ones.npy"))
+              .set_name("add_y");
+
+        // TODO(COMPMID-2103): Use sub stream for FC weights and bias in LSTM cells
+        // Create LSTM Fully Connected weights and bias descriptors
+        //TensorDescriptor lstm_weights_descriptor = TensorDescriptor(TensorShape(4096U, 8192U), common_params.data_type).set_layout(common_params.data_layout);
+        //TensorDescriptor lstm_bias_descriptor    = TensorDescriptor(TensorShape(8192U), common_params.data_type).set_layout(common_params.data_layout);
+        //SubStream        lstm_fc_weights(graph);
+        //SubStream        lstm_fc_bias(graph);
+
+        //lstm_fc_weights << InputLayer(lstm_weights_descriptor,
+        //                              get_weights_accessor(data_path, "rnn_lstm_cell_kernel_transpose.npy", weights_layout))
+        //                .set_name("h5/transpose");
+        //lstm_fc_bias << InputLayer(lstm_bias_descriptor,
+        //                           get_weights_accessor(data_path, "rnn_lstm_cell_MatMul_bias.npy"))
+        //             .set_name("MatMul_3_bias");
+
+        // LSTM Block
+        std::pair<SubStream, SubStream> new_state_1  = add_lstm_cell(data_path, unstack_nid, 0, previous_state, previous_state, add_y);
+        std::pair<SubStream, SubStream> new_state_2  = add_lstm_cell(data_path, unstack_nid, 1, new_state_1.first, new_state_1.second, add_y);
+        std::pair<SubStream, SubStream> new_state_3  = add_lstm_cell(data_path, unstack_nid, 2, new_state_2.first, new_state_2.second, add_y);
+        std::pair<SubStream, SubStream> new_state_4  = add_lstm_cell(data_path, unstack_nid, 3, new_state_3.first, new_state_3.second, add_y);
+        std::pair<SubStream, SubStream> new_state_5  = add_lstm_cell(data_path, unstack_nid, 4, new_state_4.first, new_state_4.second, add_y);
+        std::pair<SubStream, SubStream> new_state_6  = add_lstm_cell(data_path, unstack_nid, 5, new_state_5.first, new_state_5.second, add_y);
+        std::pair<SubStream, SubStream> new_state_7  = add_lstm_cell(data_path, unstack_nid, 6, new_state_6.first, new_state_6.second, add_y);
+        std::pair<SubStream, SubStream> new_state_8  = add_lstm_cell(data_path, unstack_nid, 7, new_state_7.first, new_state_7.second, add_y);
+        std::pair<SubStream, SubStream> new_state_9  = add_lstm_cell(data_path, unstack_nid, 8, new_state_8.first, new_state_8.second, add_y);
+        std::pair<SubStream, SubStream> new_state_10 = add_lstm_cell(data_path, unstack_nid, 9, new_state_9.first, new_state_9.second, add_y);
+        std::pair<SubStream, SubStream> new_state_11 = add_lstm_cell(data_path, unstack_nid, 10, new_state_10.first, new_state_10.second, add_y);
+        std::pair<SubStream, SubStream> new_state_12 = add_lstm_cell(data_path, unstack_nid, 11, new_state_11.first, new_state_11.second, add_y);
+        std::pair<SubStream, SubStream> new_state_13 = add_lstm_cell(data_path, unstack_nid, 12, new_state_12.first, new_state_12.second, add_y);
+        std::pair<SubStream, SubStream> new_state_14 = add_lstm_cell(data_path, unstack_nid, 13, new_state_13.first, new_state_13.second, add_y);
+        std::pair<SubStream, SubStream> new_state_15 = add_lstm_cell(data_path, unstack_nid, 14, new_state_14.first, new_state_14.second, add_y);
+        std::pair<SubStream, SubStream> new_state_16 = add_lstm_cell(data_path, unstack_nid, 15, new_state_15.first, new_state_15.second, add_y);
+
+        if(n_steps > 1)
+        {
+            // Concatenate new states on height
+            const int axis = 1;
+            graph << StackLayer(axis,
+                                std::move(new_state_1.second),
+                                std::move(new_state_2.second),
+                                std::move(new_state_3.second),
+                                std::move(new_state_4.second),
+                                std::move(new_state_5.second),
+                                std::move(new_state_6.second),
+                                std::move(new_state_7.second),
+                                std::move(new_state_8.second),
+                                std::move(new_state_9.second),
+                                std::move(new_state_10.second),
+                                std::move(new_state_11.second),
+                                std::move(new_state_12.second),
+                                std::move(new_state_13.second),
+                                std::move(new_state_14.second),
+                                std::move(new_state_15.second),
+                                std::move(new_state_16.second))
+                  .set_name("concat");
+        }
+
+        graph << FullyConnectedLayer(
+                  2048U,
+                  get_weights_accessor(data_path, "h5_transpose.npy", weights_layout),
+                  get_weights_accessor(data_path, "MatMul_3_bias.npy"))
+              .set_name("fc3")
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
+              .set_name("Relu3")
+              << FullyConnectedLayer(
+                  29U,
+                  get_weights_accessor(data_path, "h6_transpose.npy", weights_layout),
+                  get_weights_accessor(data_path, "MatMul_4_bias.npy"))
+              .set_name("fc3")
+              << SoftmaxLayer().set_name("logits");
+
+        graph << OutputLayer(get_output_accessor(common_params, 5));
+
+        // Finalize graph
+        GraphConfig config;
+        config.num_threads = common_params.threads;
+        config.use_tuner   = common_params.enable_tuner;
+        config.tuner_file  = common_params.tuner_file;
+
+        graph.finalize(common_params.target, config);
+
+        return true;
+    }
+    void do_run() override
+    {
+        // Run graph
+        graph.run();
+    }
+
+private:
+    CommandLineParser  cmd_parser;
+    CommonGraphOptions common_opts;
+    CommonGraphParams  common_params;
+    Stream             graph;
+
+    Status set_node_params(Graph &g, NodeID nid, NodeParams &params)
+    {
+        INode *node = g.node(nid);
+        ARM_COMPUTE_RETURN_ERROR_ON(!node);
+
+        node->set_common_node_parameters(params);
+
+        return Status{};
+    }
+
+    std::pair<SubStream, SubStream> add_lstm_cell(const std::string &data_path,
+                                                  NodeID       unstack_nid,
+                                                  unsigned int unstack_idx,
+                                                  SubStream    previous_state_c,
+                                                  SubStream    previous_state_h,
+                                                  SubStream    add_y)
+    // TODO(COMPMID-2103): Use sub streams for FC weights and bias
+    //SubStream lstm_fc_weights,
+    //SubStream lstm_fc_bias)
+    {
+        const std::string         cell_name("rnn/lstm_cell_" + std::to_string(unstack_idx));
+        const DataLayoutDimension concat_dim = (common_params.data_layout == DataLayout::NHWC) ? DataLayoutDimension::CHANNEL : DataLayoutDimension::WIDTH;
+
+        // Concatenate result of Unstack with previous_state_h
+        NodeParams concat_params = { cell_name + "/concat", graph.hints().target_hint };
+        NodeID     concat_nid    = graph.graph().add_node<ConcatenateLayerNode>(2, concat_dim);
+        graph.graph().add_connection(unstack_nid, unstack_idx, concat_nid, 0);
+        graph.graph().add_connection(previous_state_h.tail_node(), 0, concat_nid, 1);
+        set_node_params(graph.graph(), concat_nid, concat_params);
+        graph.forward_tail(concat_nid);
+
+        graph << FullyConnectedLayer(
+                  8192U,
+                  get_weights_accessor(data_path, "rnn_lstm_cell_kernel_transpose.npy", DataLayout::NHWC),
+                  get_weights_accessor(data_path, "rnn_lstm_cell_MatMul_bias.npy"))
+              .set_name(cell_name + "/BiasAdd");
+
+        // Split Layer
+        const unsigned int num_splits = 4;
+        const unsigned int split_axis = 0;
+
+        NodeParams split_params = { cell_name + "/split", graph.hints().target_hint };
+        NodeID     split_nid    = GraphBuilder::add_split_node(graph.graph(), split_params, { graph.tail_node(), 0 }, num_splits, split_axis);
+
+        NodeParams sigmoid_1_params = { cell_name + "/Sigmoid_1", graph.hints().target_hint };
+        NodeParams add_params       = { cell_name + "/add", graph.hints().target_hint };
+        NodeParams sigmoid_2_params = { cell_name + "/Sigmoid_2", graph.hints().target_hint };
+        NodeParams tanh_params      = { cell_name + "/Tanh", graph.hints().target_hint };
+
+        // Sigmoid 1 (first split)
+        NodeID sigmoid_1_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
+        graph.graph().add_connection(split_nid, 0, sigmoid_1_nid, 0);
+        set_node_params(graph.graph(), sigmoid_1_nid, sigmoid_1_params);
+
+        // Tanh (second split)
+        NodeID tanh_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.f, 1.f));
+        graph.graph().add_connection(split_nid, 1, tanh_nid, 0);
+        set_node_params(graph.graph(), tanh_nid, tanh_params);
+
+        SubStream tanh_ss(graph);
+        tanh_ss.forward_tail(tanh_nid);
+
+        // Add (third split)
+        NodeID add_nid = graph.graph().add_node<EltwiseLayerNode>(EltwiseOperation::Add);
+        graph.graph().add_connection(split_nid, 2, add_nid, 0);
+        graph.graph().add_connection(add_y.tail_node(), 0, add_nid, 1);
+        set_node_params(graph.graph(), add_nid, add_params);
+
+        // Sigmoid 2 (fourth split)
+        NodeID sigmoid_2_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
+        graph.graph().add_connection(split_nid, 3, sigmoid_2_nid, 0);
+        set_node_params(graph.graph(), sigmoid_2_nid, sigmoid_2_params);
+
+        SubStream sigmoid_1_ss(graph);
+        sigmoid_1_ss.forward_tail(sigmoid_1_nid);
+        SubStream mul_1_ss(sigmoid_1_ss);
+        mul_1_ss << EltwiseLayer(std::move(sigmoid_1_ss), std::move(tanh_ss), EltwiseOperation::Mul)
+                 .set_name(cell_name + "/mul_1");
+
+        SubStream tanh_1_ss_tmp(graph);
+        tanh_1_ss_tmp.forward_tail(add_nid);
+
+        tanh_1_ss_tmp << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC))
+                      .set_name(cell_name + "/Sigmoid");
+        SubStream tanh_1_ss_tmp2(tanh_1_ss_tmp);
+        tanh_1_ss_tmp2 << EltwiseLayer(std::move(tanh_1_ss_tmp), std::move(previous_state_c), EltwiseOperation::Mul)
+                       .set_name(cell_name + "/mul");
+        SubStream tanh_1_ss(tanh_1_ss_tmp2);
+        tanh_1_ss << EltwiseLayer(std::move(tanh_1_ss_tmp2), std::move(mul_1_ss), EltwiseOperation::Add)
+                  .set_name(cell_name + "/new_state_c");
+        SubStream new_state_c(tanh_1_ss);
+
+        tanh_1_ss << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.f, 1.f))
+                  .set_name(cell_name + "/Tanh_1");
+
+        SubStream sigmoid_2_ss(graph);
+        sigmoid_2_ss.forward_tail(sigmoid_2_nid);
+        graph << EltwiseLayer(std::move(sigmoid_2_ss), std::move(tanh_1_ss), EltwiseOperation::Mul)
+              .set_name(cell_name + "/new_state_h");
+
+        SubStream new_state_h(graph);
+        return std::pair<SubStream, SubStream>(new_state_c, new_state_h);
+    }
+};
+
+/** Main program for DeepSpeech v0.4.1
+ *
+ * Model is based on:
+ *      https://arxiv.org/abs/1412.5567
+ *      "Deep Speech: Scaling up end-to-end speech recognition"
+ *      Awni Hannun, Carl Case, Jared Casper, Bryan Catanzaro, Greg Diamos, Erich Elsen, Ryan Prenger, Sanjeev Satheesh, Shubho Sengupta, Adam Coates, Andrew Y. Ng
+ *
+ * Provenance: https://github.com/mozilla/DeepSpeech
+ *
+ * @note To list all the possible arguments execute the binary appended with the --help option
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments
+ *
+ * @return Return code
+ */
+int main(int argc, char **argv)
+{
+    return arm_compute::utils::run_example<GraphDeepSpeechExample>(argc, argv);
+}
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index 583ca2c..185680a 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -126,6 +126,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_inception_resnet_v1.cpp b/examples/graph_inception_resnet_v1.cpp
index e99f688..64c35e1 100644
--- a/examples/graph_inception_resnet_v1.cpp
+++ b/examples/graph_inception_resnet_v1.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -213,6 +213,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_inception_resnet_v2.cpp b/examples/graph_inception_resnet_v2.cpp
index 8e79978..921fada 100644
--- a/examples/graph_inception_resnet_v2.cpp
+++ b/examples/graph_inception_resnet_v2.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -192,6 +192,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index 517e492..0a1e312 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -200,6 +200,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index 0b0360a..3ea2b2f 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -151,6 +151,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 79cf122..c75a2f8 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -107,6 +107,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index 10bb890..e2e5eb9 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -78,7 +78,7 @@
 
         // Set graph hints
         graph << common_params.target
-              << DepthwiseConvolutionMethod::Optimized3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
+              << DepthwiseConvolutionMethod::Optimized3x3 // TODO(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
               << common_params.fast_math_hint;
 
         // Create core graph
@@ -100,6 +100,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_mobilenet_v2.cpp b/examples/graph_mobilenet_v2.cpp
index 429a3d2..25690aa 100644
--- a/examples/graph_mobilenet_v2.cpp
+++ b/examples/graph_mobilenet_v2.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -70,7 +70,7 @@
 
         // Set graph hints
         graph << common_params.target
-              << DepthwiseConvolutionMethod::Optimized3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
+              << DepthwiseConvolutionMethod::Optimized3x3 // TODO(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
               << common_params.fast_math_hint;
 
         // Create core graph
@@ -91,6 +91,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
@@ -263,7 +264,7 @@
     void create_graph_qasymm8(TensorDescriptor &input_descriptor)
     {
         // Create model path
-        const std::string model_path = "/cnn_data/mobilenet_v2_1.0_224_quantized_model";
+        const std::string model_path = "/cnn_data/mobilenet_v2_1.0_224_quantized_model/";
 
         // Get trainable parameters data path
         std::string data_path = common_params.data_path;
diff --git a/examples/graph_resnet12.cpp b/examples/graph_resnet12.cpp
index 5912863..db70b53 100644
--- a/examples/graph_resnet12.cpp
+++ b/examples/graph_resnet12.cpp
@@ -135,6 +135,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index b6e20d6..7c9b95e 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -114,6 +114,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_resnet_v2_50.cpp b/examples/graph_resnet_v2_50.cpp
index 77807b8..78845a8 100644
--- a/examples/graph_resnet_v2_50.cpp
+++ b/examples/graph_resnet_v2_50.cpp
@@ -117,6 +117,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_resnext50.cpp b/examples/graph_resnext50.cpp
index 8b33f90..766b8ff 100644
--- a/examples/graph_resnext50.cpp
+++ b/examples/graph_resnext50.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -98,6 +98,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_shufflenet.cpp b/examples/graph_shufflenet.cpp
index e6016f0..3704be7 100644
--- a/examples/graph_shufflenet.cpp
+++ b/examples/graph_shufflenet.cpp
@@ -144,6 +144,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index f78fe5d..4796dd3 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -167,6 +167,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index 22a15df..fd4561f 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -167,6 +167,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_srcnn955.cpp b/examples/graph_srcnn955.cpp
index a8976a1..066f16e 100644
--- a/examples/graph_srcnn955.cpp
+++ b/examples/graph_srcnn955.cpp
@@ -121,6 +121,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_ssd_mobilenet.cpp b/examples/graph_ssd_mobilenet.cpp
index 780ee38..55c9d75 100644
--- a/examples/graph_ssd_mobilenet.cpp
+++ b/examples/graph_ssd_mobilenet.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -72,7 +72,7 @@
 
         // Set graph hints
         graph << common_params.target
-              << DepthwiseConvolutionMethod::Optimized3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
+              << DepthwiseConvolutionMethod::Optimized3x3 // TODO(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
               << common_params.fast_math_hint;
 
         // Create core graph
@@ -80,7 +80,7 @@
 
         // Create a preprocessor object
         const std::array<float, 3> mean_rgb{ { 127.5f, 127.5f, 127.5f } };
-        std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb, 0.007843f);
+        std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb, true, 0.007843f);
 
         // Get trainable parameters data path
         std::string data_path = common_params.data_path;
@@ -246,6 +246,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index 290d1e7..e8055d4 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -225,6 +225,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index 298ffa0..63051fb 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -236,6 +236,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_vgg_vdsr.cpp b/examples/graph_vgg_vdsr.cpp
index ca7d10f..9f0b357 100644
--- a/examples/graph_vgg_vdsr.cpp
+++ b/examples/graph_vgg_vdsr.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -139,6 +139,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/graph_yolov3.cpp b/examples/graph_yolov3.cpp
index 6d0f67e..c0a97da 100644
--- a/examples/graph_yolov3.cpp
+++ b/examples/graph_yolov3.cpp
@@ -398,6 +398,7 @@
         GraphConfig config;
         config.num_threads = common_params.threads;
         config.use_tuner   = common_params.enable_tuner;
+        config.tuner_mode  = common_params.tuner_mode;
         config.tuner_file  = common_params.tuner_file;
 
         graph.finalize(common_params.target, config);
diff --git a/examples/neon_convolution.cpp b/examples/neon_convolution.cpp
index 1a7e865..56b4ddc 100644
--- a/examples/neon_convolution.cpp
+++ b/examples/neon_convolution.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,7 +32,7 @@
 
 /** Gaussian 3x3 matrix
  */
-const int16_t gaussian3x3[] =
+const std::array<int16_t, 9> gaussian3x3 =
 {
     1, 2, 1,
     2, 4, 2,
@@ -41,7 +41,7 @@
 
 /** Gaussian 5x5 matrix
  */
-const int16_t gaussian5x5[] =
+const std::array<int16_t, 25> gaussian5x5 =
 {
     1, 4, 6, 4, 1,
     4, 16, 24, 16, 4,
@@ -79,8 +79,8 @@
 
         // Apply a Gaussian 3x3 filter to the source image followed by a Gaussian 5x5:
         // The function will automatically update the padding information inside input and output to match its requirements
-        conv3x3.configure(&src, &tmp, gaussian3x3, 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
-        conv5x5.configure(&tmp, &dst, gaussian5x5, 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
+        conv3x3.configure(&src, &tmp, gaussian3x3.data(), 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
+        conv5x5.configure(&tmp, &dst, gaussian5x5.data(), 0 /* Let arm_compute calculate the scale */, BorderMode::UNDEFINED);
 
         // Now that the padding requirements are known we can allocate the images:
         src.allocator()->allocate();
diff --git a/examples/neon_sgemm.cpp b/examples/neon_sgemm.cpp
index f6f93dd..8f395de 100644
--- a/examples/neon_sgemm.cpp
+++ b/examples/neon_sgemm.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -36,7 +36,9 @@
 public:
     bool do_setup(int argc, char **argv) override
     {
-        NPYLoader npy0, npy1, npy2;
+        NPYLoader npy0;
+        NPYLoader npy1;
+        NPYLoader npy2;
         alpha = 1.0f;
         beta  = 0.0f;