arm_compute v17.09

Change-Id: I4bf8f4e6e5f84ce0d5b6f5ba570d276879f42a81
diff --git a/src/graph/CL/CLMap.cpp b/src/graph/CL/CLMap.cpp
new file mode 100644
index 0000000..4892b96
--- /dev/null
+++ b/src/graph/CL/CLMap.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/CL/CLMap.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+using namespace arm_compute::graph;
+
+CLMap::CLMap(Tensor *tensor, bool blocking)
+    : _tensor(dynamic_cast<arm_compute::CLTensor *>(tensor->tensor())), _blocking(blocking)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor);
+}
+
+void CLMap::run()
+{
+    _tensor->map(_blocking);
+}
diff --git a/src/graph/CL/CLUnmap.cpp b/src/graph/CL/CLUnmap.cpp
new file mode 100644
index 0000000..ec7d865
--- /dev/null
+++ b/src/graph/CL/CLUnmap.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/CL/CLUnmap.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+using namespace arm_compute::graph;
+
+CLUnmap::CLUnmap(Tensor *tensor)
+    : _tensor(dynamic_cast<arm_compute::CLTensor *>(tensor->tensor()))
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor);
+}
+
+void CLUnmap::run()
+{
+    _tensor->unmap();
+}
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
new file mode 100644
index 0000000..525506f
--- /dev/null
+++ b/src/graph/Graph.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/Graph.h"
+
+#include "arm_compute/graph/CL/CLMap.h"
+#include "arm_compute/graph/CL/CLUnmap.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/Tensor.h"
+
+using namespace arm_compute::graph;
+
+struct Stage
+{
+    Tensor                                 *_input;
+    Tensor                                 *_output;
+    std::unique_ptr<arm_compute::IFunction> _function;
+};
+
+struct Graph::Private
+{
+public:
+    /** Finalizes the current node's configuration
+     *
+     * @param _next_hint Device execution hint
+     */
+    void configure(Hint _next_hint);
+
+    /** Sets whether to enable information print out
+     *
+     * @param[in] is_enabled Set to true if need info printed out
+     */
+    void set_info_enablement(bool is_enabled);
+
+    std::vector<Stage>                   _pipeline{};
+    std::vector<std::unique_ptr<Tensor>> _tensors{};
+    std::vector<std::unique_ptr<INode>>  _nodes{};
+    Hint                                 _current_hint{ Hint::DONT_CARE };
+    Hint                                 _next_hint{ Hint::DONT_CARE };
+    std::unique_ptr<Tensor>              _graph_input{ nullptr };
+    std::unique_ptr<Tensor>              _graph_output{ nullptr };
+    std::unique_ptr<INode>               _current_node{ nullptr };
+    Tensor                              *_current_output{ nullptr };
+    bool                                 _info_enabled{ false };
+
+private:
+    Tensor *_current_input{ nullptr };
+    Hint    _previous_hint{ Hint::DONT_CARE };
+};
+
+Graph::~Graph() //NOLINT
+{
+    //Can't use =default because the destructor must be defined after Graph::Private's definition
+}
+
+Graph::Graph()
+    : _pimpl{ new Private() }
+{
+}
+
+void Graph::run()
+{
+    while(true)
+    {
+        if(!_pimpl->_graph_input->call_accessor())
+        {
+            return;
+        }
+
+        for(auto &stage : _pimpl->_pipeline)
+        {
+            stage._function->run();
+        }
+
+        if(!_pimpl->_graph_output->call_accessor())
+        {
+            return;
+        }
+    }
+}
+
+//Finalize current node's configuration
+void Graph::Private::configure(Hint _next_hint)
+{
+    ARM_COMPUTE_ERROR_ON(_current_node == nullptr);
+    ARM_COMPUTE_ERROR_ON(_graph_input == nullptr);
+
+    // Is it the first node of the graph ?
+    if(_current_input == nullptr)
+    {
+        _graph_input->set_target(_current_hint);
+        _current_input = _graph_input.get();
+        _previous_hint = _current_hint; // For the first node just assume the previous node was of the same type as this one
+    }
+
+    //Automatic output configuration ?
+    if(_current_output == nullptr)
+    {
+        _tensors.push_back(arm_compute::support::cpp14::make_unique<Tensor>(TensorInfo()));
+        _current_output = _tensors.back().get();
+    }
+
+    // If either the writer or reader node needs OpenCL then use OpenCL memory:
+    if((_next_hint == Hint::OPENCL || _current_hint == Hint::OPENCL))
+    {
+        _current_output->set_target(Hint::OPENCL);
+    }
+    else
+    {
+        _current_output->set_target(Hint::NEON);
+    }
+
+    // Map input if needed
+    std::unique_ptr<arm_compute::IFunction> func = _current_node->instantiate_node(_current_hint, _current_input->tensor(), _current_output->tensor());
+    _current_input->allocate();
+
+    if(_current_input->target() == Hint::OPENCL)
+    {
+        if(_previous_hint == Hint::NEON)
+        {
+            ARM_COMPUTE_ERROR_ON(_current_hint == Hint::NEON);
+            _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLUnmap>(_current_input) });
+        }
+        if(_current_hint == Hint::NEON)
+        {
+            ARM_COMPUTE_ERROR_ON(_previous_hint == Hint::NEON);
+            _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLMap>(_current_input, true) });
+        }
+    }
+
+    _pipeline.push_back({ _current_input, _current_output, std::move(func) });
+
+    _current_input  = _current_output;
+    _current_output = nullptr;
+    _previous_hint  = _current_hint;
+    _current_hint   = _next_hint;
+}
+
+void Graph::Private::set_info_enablement(bool is_enabled)
+{
+    _info_enabled = is_enabled;
+}
+
+void Graph::add_node(std::unique_ptr<INode> node)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_input == nullptr, "The graph's input must be set before the first node is added");
+    ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_output != nullptr, "Nothing can be added after the output tensor");
+    //Trigger the creation of the current Node:
+
+    Hint _next_hint = node->override_hint(_pimpl->_next_hint);
+    ARM_COMPUTE_ERROR_ON(_next_hint == Hint::DONT_CARE);
+    if(_pimpl->_current_node)
+    {
+        //Finalize the previous Node:
+        _pimpl->configure(_pimpl->_next_hint);
+
+        if(_pimpl->_info_enabled)
+        {
+            _pimpl->_current_node->print_info();
+        }
+    }
+    else
+    {
+        // If that's the first node then use the same Hint before and after the node.
+        _pimpl->_current_hint = _next_hint;
+    }
+    if(_pimpl->_current_node)
+    {
+        _pimpl->_nodes.push_back(std::move(_pimpl->_current_node));
+    }
+    _pimpl->_current_node = std::move(node);
+}
+void Graph::set_hint(Hint hint)
+{
+    _pimpl->_next_hint = hint;
+}
+
+void Graph::set_info_enablement(bool is_enabled)
+{
+    _pimpl->set_info_enablement(is_enabled);
+}
+
+//Add a tensor with an Accessor (i.e either the input or output of the graph)
+void Graph::add_tensor(std::unique_ptr<Tensor> tensor)
+{
+    // If it's the first Tensor added then it will be the input of the Graph.
+    if(_pimpl->_graph_input == nullptr)
+    {
+        ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
+        ARM_COMPUTE_ERROR_ON(_pimpl->_current_node != nullptr);
+        _pimpl->_graph_input = std::move(tensor);
+    }
+    else
+    {
+        // Else it will be the output of the Graph
+        ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
+        ARM_COMPUTE_ERROR_ON(_pimpl->_current_node == nullptr);
+        _pimpl->_graph_output   = std::move(tensor);
+        _pimpl->_current_output = _pimpl->_graph_output.get();
+
+        // Finalize the graph by configuring the last Node of the graph:
+        _pimpl->configure(_pimpl->_current_hint); // Ignore _next_hint as this is the last node, and just use the same hint as before this node.
+        _pimpl->_graph_output->allocate();
+    }
+}
+
+void Graph::set_temp(TensorInfo &&tmp)
+{
+    ARM_COMPUTE_ERROR_ON(_pimpl->_graph_input == nullptr);
+    ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
+    ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_current_output != nullptr, "TensorInfo for temporary tensor already set");
+
+    _pimpl->_tensors.push_back(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tmp)));
+    _pimpl->_current_output = _pimpl->_tensors.back().get();
+}
+
+Graph &arm_compute::graph::operator<<(Graph &graph, TensorInfo &&info)
+{
+    graph.set_temp(std::move(info));
+    return graph;
+}
+
+Graph &arm_compute::graph::operator<<(Graph &graph, Tensor &&tensor)
+{
+    graph.add_tensor(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tensor)));
+    return graph;
+}
+
+Graph &arm_compute::graph::operator<<(Graph &graph, Hint hint)
+{
+    graph.set_hint(hint);
+    return graph;
+}
diff --git a/src/graph/INode.cpp b/src/graph/INode.cpp
new file mode 100644
index 0000000..6b25022
--- /dev/null
+++ b/src/graph/INode.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/graph/INode.h"
+
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Validate.h"
+
+#include <ostream>
+
+using namespace arm_compute::graph;
+
+Hint INode::override_hint(Hint hint) const
+{
+    if(hint == Hint::OPENCL && !opencl_is_available())
+    {
+        hint = Hint::DONT_CARE;
+    }
+    hint = node_override_hint(hint);
+    ARM_COMPUTE_ERROR_ON(hint == Hint::OPENCL && !opencl_is_available());
+    return hint;
+}
+Hint INode::node_override_hint(Hint hint) const
+{
+    return hint == Hint::DONT_CARE ? Hint::NEON : hint;
+}
diff --git a/src/graph/Tensor.cpp b/src/graph/Tensor.cpp
new file mode 100644
index 0000000..c534ae0
--- /dev/null
+++ b/src/graph/Tensor.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/graph/Tensor.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename TensorType>
+std::unique_ptr<ITensor> initialise_tensor(TensorInfo &info)
+{
+    auto tensor = arm_compute::support::cpp14::make_unique<TensorType>();
+    tensor->allocator()->init(info);
+    return std::move(tensor);
+}
+
+template <typename TensorType>
+void tensor_allocate(ITensor &tensor)
+{
+    auto itensor = dynamic_cast<TensorType *>(&tensor);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(itensor);
+    itensor->allocator()->allocate();
+}
+} // namespace
+
+Tensor::Tensor(TensorInfo &&info)
+    : _target(Hint::DONT_CARE), _info(info), _accessor(nullptr), _tensor(nullptr)
+{
+}
+
+Tensor::Tensor(Tensor &&src) noexcept
+    : _target(src._target),
+      _info(std::move(src._info)),
+      _accessor(std::move(src._accessor)),
+      _tensor(std::move(src._tensor))
+{
+}
+
+void Tensor::set_info(TensorInfo &&info)
+{
+    _info = info;
+}
+
+bool Tensor::call_accessor()
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_accessor.get());
+    auto cl_tensor = dynamic_cast<arm_compute::CLTensor *>(_tensor.get());
+    if(cl_tensor != nullptr && cl_tensor->buffer() == nullptr)
+    {
+        cl_tensor->map();
+    }
+    bool retval = _accessor->access_tensor(*_tensor);
+    if(cl_tensor != nullptr)
+    {
+        cl_tensor->unmap();
+    }
+    return retval;
+}
+
+ITensor *Tensor::tensor()
+{
+    return _tensor.get();
+}
+
+const TensorInfo &Tensor::info() const
+{
+    return _info;
+}
+
+ITensor *Tensor::set_target(Hint target)
+{
+    if(_tensor != nullptr)
+    {
+        ARM_COMPUTE_ERROR_ON(target != _target);
+    }
+    else
+    {
+        switch(target)
+        {
+            case Hint::OPENCL:
+                _tensor = initialise_tensor<arm_compute::CLTensor>(_info);
+                break;
+            case Hint::NEON:
+                _tensor = initialise_tensor<arm_compute::Tensor>(_info);
+                break;
+            default:
+                ARM_COMPUTE_ERROR("Invalid Hint");
+        }
+        _target = target;
+    }
+    return _tensor.get();
+}
+
+void Tensor::allocate()
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor.get());
+    switch(_target)
+    {
+        case Hint::OPENCL:
+            tensor_allocate<arm_compute::CLTensor>(*_tensor);
+            break;
+        case Hint::NEON:
+            tensor_allocate<arm_compute::Tensor>(*_tensor);
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Invalid Hint");
+    }
+}
+
+void Tensor::allocate_and_fill_if_needed()
+{
+    allocate();
+    if(_accessor != nullptr)
+    {
+        call_accessor();
+    }
+}
+
+Hint Tensor::target() const
+{
+    return _target;
+}
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
new file mode 100644
index 0000000..b71e22c
--- /dev/null
+++ b/src/graph/nodes/ActivationLayer.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ActivationLayer.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "support/ToolchainSupport.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename ActivationType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+{
+    auto activation = arm_compute::support::cpp14::make_unique<ActivationType>();
+    activation->configure(
+        dynamic_cast<TensorType *>(input),
+        dynamic_cast<TensorType *>(output),
+        activation_info);
+
+    return std::move(activation);
+}
+
+template <Hint                          hint>
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info);
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+{
+    return instantiate_function<arm_compute::CLActivationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, activation_info);
+}
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+{
+    return instantiate_function<arm_compute::NEActivationLayer, arm_compute::Tensor, Hint::NEON>(input, output, activation_info);
+}
+} // namespace
+
+ActivationLayer::ActivationLayer(const ActivationLayerInfo activation_info)
+    : _activation_info(activation_info)
+{
+}
+
+std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+{
+    std::unique_ptr<arm_compute::IFunction> func;
+    _hint   = hint;
+    _input  = input;
+    _output = output;
+
+    if(_hint == Hint::OPENCL)
+    {
+        func = instantiate<Hint::OPENCL>(input, output, _activation_info);
+    }
+    else
+    {
+        func = instantiate<Hint::NEON>(input, output, _activation_info);
+    }
+    return func;
+}
+
+void ActivationLayer::print_info()
+{
+    if(_hint == Hint::OPENCL)
+    {
+        std::cout << "Instantiating CLActivationLayer";
+    }
+    else
+    {
+        std::cout << "Instantiating NEActivationLayer";
+    }
+
+    std::cout << " Data Type: " << _input->info()->data_type()
+              << " Input shape: " << _input->info()->tensor_shape()
+              << " Output shape: " << _output->info()->tensor_shape()
+              << " Activation function: " << _activation_info.activation()
+              << " a: " << _activation_info.a()
+              << " b: " << _activation_info.b()
+              << std::endl;
+}
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
new file mode 100644
index 0000000..b80bf93
--- /dev/null
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ConvolutionLayer.h"
+
+#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
+#include "support/ToolchainSupport.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename ConvolutionType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+{
+    bool weights_are_loaded = weights.tensor() != nullptr;
+    bool biases_are_loaded  = biases.tensor() != nullptr;
+
+    auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
+    conv->configure(
+        dynamic_cast<TensorType *>(input),
+        dynamic_cast<TensorType *>(weights.set_target(hint)),
+        dynamic_cast<TensorType *>(biases.set_target(hint)),
+        dynamic_cast<TensorType *>(output),
+        conv_info, weights_info);
+    if(!weights_are_loaded)
+    {
+        weights.allocate_and_fill_if_needed();
+    }
+    if(!biases_are_loaded)
+    {
+        biases.allocate_and_fill_if_needed();
+    }
+
+    return std::move(conv);
+}
+
+template <Hint                          hint>
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info);
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+{
+    return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::CLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+}
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+{
+    return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::Tensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+}
+} // namespace
+
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+{
+    if(_weights.tensor() == nullptr)
+    {
+        _weights.set_info(TensorInfo(TensorShape(_conv_width, _conv_height, input->info()->dimension(2), _ofm), input->info()->num_channels(), input->info()->data_type(),
+                                     input->info()->fixed_point_position()));
+    }
+    if(_biases.tensor() == nullptr)
+    {
+        _biases.set_info(TensorInfo(TensorShape(_ofm), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()));
+    }
+
+    std::unique_ptr<arm_compute::IFunction> func;
+    _hint   = hint;
+    _input  = input;
+    _output = output;
+
+    if(_hint == Hint::OPENCL)
+    {
+        func = instantiate<Hint::OPENCL>(input, _weights, _biases, output, _conv_info, _weights_info);
+    }
+    else
+    {
+        func = instantiate<Hint::NEON>(input, _weights, _biases, output, _conv_info, _weights_info);
+    }
+
+    return func;
+}
+
+void ConvolutionLayer::print_info()
+{
+    if(_hint == Hint::OPENCL)
+    {
+        std::cout << "Instantiating CLConvolutionLayer";
+    }
+    else
+    {
+        std::cout << "Instantiating NEConvolutionLayer";
+    }
+    std::cout << " Type: " << _input->info()->data_type() << " Input Shape: " << _input->info()->tensor_shape() << " Weights shape: " << _weights.info().tensor_shape() << " Biases Shape: " <<
+              _biases.info().tensor_shape() << " Output Shape: " << _output->info()->tensor_shape() << " PadStrideInfo: " << _conv_info << "WeightsInfo: " << _weights_info << std::endl;
+}
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
new file mode 100644
index 0000000..8d244cb
--- /dev/null
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/FullyConnectedLayer.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
+#include "support/ToolchainSupport.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename FullyConnectedType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+{
+    bool weights_are_loaded = weights.tensor() != nullptr;
+    bool biases_are_loaded  = biases.tensor() != nullptr;
+
+    auto conv = arm_compute::support::cpp14::make_unique<FullyConnectedType>();
+    conv->configure(
+        dynamic_cast<TensorType *>(input),
+        dynamic_cast<TensorType *>(weights.set_target(hint)),
+        dynamic_cast<TensorType *>(biases.set_target(hint)),
+        dynamic_cast<TensorType *>(output));
+    if(!weights_are_loaded)
+    {
+        weights.allocate_and_fill_if_needed();
+    }
+    if(!biases_are_loaded)
+    {
+        biases.allocate_and_fill_if_needed();
+    }
+
+    return std::move(conv);
+}
+
+template <Hint                          hint>
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output);
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+{
+    return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::CLTensor, Hint::OPENCL>(input, weights, biases, output);
+}
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+{
+    return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::Tensor, Hint::NEON>(input, weights, biases, output);
+}
+} // namespace
+
+std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+{
+    if(_weights.tensor() == nullptr)
+    {
+        unsigned int num_weights    = 1;
+        unsigned int num_dimensions = input->info()->num_dimensions();
+        // Ignore the batch dimension if there is one:
+        if(num_dimensions == 2 || num_dimensions == 4)
+        {
+            num_dimensions--;
+        }
+        for(unsigned int i = 0; i < num_dimensions; i++)
+        {
+            num_weights *= input->info()->dimension(i);
+        }
+        _weights.set_info(TensorInfo(TensorShape(num_weights, _num_neurons), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()));
+    }
+    if(_biases.tensor() == nullptr)
+    {
+        _biases.set_info(TensorInfo(TensorShape(_num_neurons), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()));
+    }
+
+    arm_compute::auto_init_if_empty(*output->info(), TensorShape(_num_neurons, input->info()->dimension(1)), input->info()->num_channels(), input->info()->data_type(),
+                                    input->info()->fixed_point_position());
+
+    std::unique_ptr<arm_compute::IFunction> func;
+    _hint   = hint;
+    _input  = input;
+    _output = output;
+
+    if(_hint == Hint::OPENCL)
+    {
+        func = instantiate<Hint::OPENCL>(input, _weights, _biases, output);
+    }
+    else
+    {
+        func = instantiate<Hint::NEON>(input, _weights, _biases, output);
+    }
+
+    return func;
+}
+
+void FullyConnectedLayer::print_info()
+{
+    if(_hint == Hint::OPENCL)
+    {
+        std::cout << "Instantiating CLFullyConnectedLayer";
+    }
+    else
+    {
+        std::cout << "Instantiating NEFullyConnectedLayer";
+    }
+    std::cout << " Type: " << _input->info()->data_type() << " Input Shape: " << _input->info()->tensor_shape() << " Weights shape: " << _weights.info().tensor_shape() << " Biases Shape: " <<
+              _biases.info().tensor_shape() << " Output Shape: " << _output->info()->tensor_shape() << std::endl;
+}
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
new file mode 100644
index 0000000..f29332f
--- /dev/null
+++ b/src/graph/nodes/PoolingLayer.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/PoolingLayer.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "support/ToolchainSupport.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename PoolingType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+{
+    auto pool = arm_compute::support::cpp14::make_unique<PoolingType>();
+    pool->configure(
+        dynamic_cast<TensorType *>(input),
+        dynamic_cast<TensorType *>(output),
+        pool_info);
+
+    return std::move(pool);
+}
+
+template <Hint                          hint>
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+{
+    return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, pool_info);
+}
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+{
+    return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, Hint::NEON>(input, output, pool_info);
+}
+} // namespace
+
+PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
+    : _pool_info(pool_info)
+{
+}
+
+std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+{
+    std::unique_ptr<arm_compute::IFunction> func;
+    _hint   = hint;
+    _input  = input;
+    _output = output;
+
+    if(_hint == Hint::OPENCL)
+    {
+        func = instantiate<Hint::OPENCL>(input, output, _pool_info);
+    }
+    else
+    {
+        func = instantiate<Hint::NEON>(input, output, _pool_info);
+    }
+
+    return func;
+}
+
+void PoolingLayer::print_info()
+{
+    if(_hint == Hint::OPENCL)
+    {
+        std::cout << "Instantiating CLPoolingLayer";
+    }
+    else
+    {
+        std::cout << "Instantiating NEPoolingLayer";
+    }
+
+    std::cout << " Data Type: " << _input->info()->data_type()
+              << " Input shape: " << _input->info()->tensor_shape()
+              << " Output shape: " << _output->info()->tensor_shape()
+              << " Pooling info: " << _pool_info << std::endl;
+}
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
new file mode 100644
index 0000000..fee8897
--- /dev/null
+++ b/src/graph/nodes/SoftmaxLayer.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/SoftmaxLayer.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
+#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "support/ToolchainSupport.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename SoftmaxType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output)
+{
+    auto softmax = arm_compute::support::cpp14::make_unique<SoftmaxType>();
+    softmax->configure(
+        dynamic_cast<TensorType *>(input),
+        dynamic_cast<TensorType *>(output));
+
+    return std::move(softmax);
+}
+
+template <Hint                          hint>
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output);
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output)
+{
+    return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output);
+}
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output)
+{
+    return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, Hint::NEON>(input, output);
+}
+} // namespace
+
+std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+{
+    std::unique_ptr<arm_compute::IFunction> func;
+    _hint   = hint;
+    _input  = input;
+    _output = output;
+
+    if(_hint == Hint::OPENCL)
+    {
+        func = instantiate<Hint::OPENCL>(input, output);
+    }
+    else
+    {
+        func = instantiate<Hint::NEON>(input, output);
+    }
+
+    return func;
+}
+
+void SoftmaxLayer::print_info()
+{
+    if(_hint == Hint::OPENCL)
+    {
+        std::cout << "Instantiating CLSoftmaxLayer";
+    }
+    else
+    {
+        std::cout << "Instantiating NESoftmaxLayer";
+    }
+    std::cout << " Data Type: " << _input->info()->data_type()
+              << " Input shape: " << _input->info()->tensor_shape()
+              << " Output shape: " << _output->info()->tensor_shape()
+              << std::endl;
+}