arm_compute v17.12
diff --git a/documentation/neon__cnn_8cpp_source.xhtml b/documentation/neon__cnn_8cpp_source.xhtml
index ef6676f..1278b95 100644
--- a/documentation/neon__cnn_8cpp_source.xhtml
+++ b/documentation/neon__cnn_8cpp_source.xhtml
@@ -4,7 +4,7 @@
 <head>
 <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
 <meta http-equiv="X-UA-Compatible" content="IE=9"/>
-<meta name="generator" content="Doxygen 1.8.6"/>
+<meta name="generator" content="Doxygen 1.8.11"/>
 <meta name="robots" content="NOINDEX, NOFOLLOW" /> <!-- Prevent indexing by search engines -->
 <title>Compute Library: examples/neon_cnn.cpp Source File</title>
 <link href="tabs.css" rel="stylesheet" type="text/css"/>
@@ -12,22 +12,24 @@
 <script type="text/javascript" src="dynsections.js"></script>
 <link href="navtree.css" rel="stylesheet" type="text/css"/>
 <script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
 <script type="text/javascript" src="navtree.js"></script>
 <script type="text/javascript">
   $(document).ready(initResizable);
   $(window).load(resizeHeight);
 </script>
 <link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
 <script type="text/javascript" src="search/search.js"></script>
 <script type="text/javascript">
-  $(document).ready(function() { searchBox.OnSelectItem(0); });
+  $(document).ready(function() { init_search(); });
 </script>
 <script type="text/x-mathjax-config">
   MathJax.Hub.Config({
     extensions: ["tex2jax.js"],
     jax: ["input/TeX","output/HTML-CSS"],
 });
-</script><script src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
+</script><script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
 <link href="doxygen.css" rel="stylesheet" type="text/css" />
 </head>
 <body>
@@ -38,7 +40,7 @@
  <tr style="height: 56px;">
   <td style="padding-left: 0.5em;">
    <div id="projectname">Compute Library
-   &#160;<span id="projectnumber">17.10</span>
+   &#160;<span id="projectnumber">17.12</span>
    </div>
   </td>
  </tr>
@@ -46,7 +48,7 @@
 </table>
 </div>
 <!-- end header part -->
-<!-- Generated by Doxygen 1.8.6 -->
+<!-- Generated by Doxygen 1.8.11 -->
 <script type="text/javascript">
 var searchBox = new SearchBox("searchBox", "search",false,'Search');
 </script>
@@ -101,7 +103,7 @@
      onmouseover="return searchBox.OnSearchSelectShow()"
      onmouseout="return searchBox.OnSearchSelectHide()"
      onkeydown="return searchBox.OnSearchSelectKey(event)">
-<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Data Structures</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Namespaces</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Files</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark">&#160;</span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(6)"><span class="SelectionMark">&#160;</span>Typedefs</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(7)"><span class="SelectionMark">&#160;</span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(8)"><span class="SelectionMark">&#160;</span>Enumerator</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(9)"><span class="SelectionMark">&#160;</span>Friends</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(10)"><span class="SelectionMark">&#160;</span>Macros</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(11)"><span class="SelectionMark">&#160;</span>Pages</a></div>
+</div>
 
 <!-- iframe showing the search results (closed by default) -->
 <div id="MSearchResultsWindow">
@@ -115,317 +117,30 @@
 <div class="title">neon_cnn.cpp</div>  </div>
 </div><!--header-->
 <div class="contents">
-<a href="neon__cnn_8cpp.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno">    1</span>&#160;<span class="comment">/*</span></div>
-<div class="line"><a name="l00002"></a><span class="lineno">    2</span>&#160;<span class="comment"> * Copyright (c) 2016, 2017 ARM Limited.</span></div>
-<div class="line"><a name="l00003"></a><span class="lineno">    3</span>&#160;<span class="comment"> *</span></div>
-<div class="line"><a name="l00004"></a><span class="lineno">    4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div>
-<div class="line"><a name="l00005"></a><span class="lineno">    5</span>&#160;<span class="comment"> *</span></div>
-<div class="line"><a name="l00006"></a><span class="lineno">    6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div>
-<div class="line"><a name="l00007"></a><span class="lineno">    7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div>
-<div class="line"><a name="l00008"></a><span class="lineno">    8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div>
-<div class="line"><a name="l00009"></a><span class="lineno">    9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div>
-<div class="line"><a name="l00010"></a><span class="lineno">   10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div>
-<div class="line"><a name="l00011"></a><span class="lineno">   11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div>
-<div class="line"><a name="l00012"></a><span class="lineno">   12</span>&#160;<span class="comment"> *</span></div>
-<div class="line"><a name="l00013"></a><span class="lineno">   13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div>
-<div class="line"><a name="l00014"></a><span class="lineno">   14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div>
-<div class="line"><a name="l00015"></a><span class="lineno">   15</span>&#160;<span class="comment"> *</span></div>
-<div class="line"><a name="l00016"></a><span class="lineno">   16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div>
-<div class="line"><a name="l00017"></a><span class="lineno">   17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div>
-<div class="line"><a name="l00018"></a><span class="lineno">   18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div>
-<div class="line"><a name="l00019"></a><span class="lineno">   19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div>
-<div class="line"><a name="l00020"></a><span class="lineno">   20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div>
-<div class="line"><a name="l00021"></a><span class="lineno">   21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div>
-<div class="line"><a name="l00022"></a><span class="lineno">   22</span>&#160;<span class="comment"> * SOFTWARE.</span></div>
-<div class="line"><a name="l00023"></a><span class="lineno">   23</span>&#160;<span class="comment"> */</span></div>
-<div class="line"><a name="l00024"></a><span class="lineno">   24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_functions_8h.xhtml">arm_compute/runtime/NEON/NEFunctions.h</a>&quot;</span></div>
-<div class="line"><a name="l00025"></a><span class="lineno">   25</span>&#160;</div>
-<div class="line"><a name="l00026"></a><span class="lineno">   26</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_types_8h.xhtml">arm_compute/core/Types.h</a>&quot;</span></div>
-<div class="line"><a name="l00027"></a><span class="lineno">   27</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_allocator_8h.xhtml">arm_compute/runtime/Allocator.h</a>&quot;</span></div>
-<div class="line"><a name="l00028"></a><span class="lineno">   28</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_blob_lifetime_manager_8h.xhtml">arm_compute/runtime/BlobLifetimeManager.h</a>&quot;</span></div>
-<div class="line"><a name="l00029"></a><span class="lineno">   29</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_memory_manager_on_demand_8h.xhtml">arm_compute/runtime/MemoryManagerOnDemand.h</a>&quot;</span></div>
-<div class="line"><a name="l00030"></a><span class="lineno">   30</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_pool_manager_8h.xhtml">arm_compute/runtime/PoolManager.h</a>&quot;</span></div>
-<div class="line"><a name="l00031"></a><span class="lineno">   31</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="utils_2_utils_8h.xhtml">utils/Utils.h</a>&quot;</span></div>
-<div class="line"><a name="l00032"></a><span class="lineno">   32</span>&#160;</div>
-<div class="line"><a name="l00033"></a><span class="lineno">   33</span>&#160;<span class="keyword">using namespace </span>arm_compute;</div>
-<div class="line"><a name="l00034"></a><span class="lineno">   34</span>&#160;<span class="keyword">using namespace </span>utils;</div>
-<div class="line"><a name="l00035"></a><span class="lineno">   35</span>&#160;</div>
-<div class="line"><a name="l00036"></a><span class="lineno"><a class="line" href="neon__cnn_8cpp.xhtml#a7616847a3120a787be556c0bb30f43b4">   36</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="neon__cnn_8cpp.xhtml#a7616847a3120a787be556c0bb30f43b4">main_cnn</a>(<span class="keywordtype">int</span> argc, <span class="keyword">const</span> <span class="keywordtype">char</span> **argv)</div>
-<div class="line"><a name="l00037"></a><span class="lineno">   37</span>&#160;{</div>
-<div class="line"><a name="l00038"></a><span class="lineno">   38</span>&#160;    <a class="code" href="_error_8h.xhtml#a4103adbb45806b2f2002d44b91d0d206">ARM_COMPUTE_UNUSED</a>(argc);</div>
-<div class="line"><a name="l00039"></a><span class="lineno">   39</span>&#160;    <a class="code" href="_error_8h.xhtml#a4103adbb45806b2f2002d44b91d0d206">ARM_COMPUTE_UNUSED</a>(argv);</div>
-<div class="line"><a name="l00040"></a><span class="lineno">   40</span>&#160;</div>
-<div class="line"><a name="l00041"></a><span class="lineno">   41</span>&#160;    <span class="comment">// Create NEON allocator</span></div>
-<div class="line"><a name="l00042"></a><span class="lineno">   42</span>&#160;    <a class="code" href="classarm__compute_1_1_allocator.xhtml">Allocator</a> allocator;</div>
-<div class="line"><a name="l00043"></a><span class="lineno">   43</span>&#160;</div>
-<div class="line"><a name="l00044"></a><span class="lineno">   44</span>&#160;    <span class="comment">// Create memory manager components</span></div>
-<div class="line"><a name="l00045"></a><span class="lineno">   45</span>&#160;    <span class="comment">// We need 2 memory managers: 1 for handling the tensors within the functions (mm_layers) and 1 for handling the input and output tensors of the functions (mm_transitions))</span></div>
-<div class="line"><a name="l00046"></a><span class="lineno">   46</span>&#160;    <span class="keyword">auto</span> lifetime_mgr0  = std::make_shared&lt;BlobLifetimeManager&gt;();                           <span class="comment">// Create lifetime manager</span></div>
-<div class="line"><a name="l00047"></a><span class="lineno">   47</span>&#160;    <span class="keyword">auto</span> lifetime_mgr1  = std::make_shared&lt;BlobLifetimeManager&gt;();                           <span class="comment">// Create lifetime manager</span></div>
-<div class="line"><a name="l00048"></a><span class="lineno">   48</span>&#160;    <span class="keyword">auto</span> pool_mgr0      = std::make_shared&lt;PoolManager&gt;();                                   <span class="comment">// Create pool manager</span></div>
-<div class="line"><a name="l00049"></a><span class="lineno">   49</span>&#160;    <span class="keyword">auto</span> pool_mgr1      = std::make_shared&lt;PoolManager&gt;();                                   <span class="comment">// Create pool manager</span></div>
-<div class="line"><a name="l00050"></a><span class="lineno">   50</span>&#160;    <span class="keyword">auto</span> mm_layers      = std::make_shared&lt;MemoryManagerOnDemand&gt;(lifetime_mgr0, pool_mgr0); <span class="comment">// Create the memory manager</span></div>
-<div class="line"><a name="l00051"></a><span class="lineno">   51</span>&#160;    <span class="keyword">auto</span> mm_transitions = std::make_shared&lt;MemoryManagerOnDemand&gt;(lifetime_mgr1, pool_mgr1); <span class="comment">// Create the memory manager</span></div>
-<div class="line"><a name="l00052"></a><span class="lineno">   52</span>&#160;</div>
-<div class="line"><a name="l00053"></a><span class="lineno">   53</span>&#160;    <span class="comment">// The src tensor should contain the input image</span></div>
-<div class="line"><a name="l00054"></a><span class="lineno">   54</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> src;</div>
-<div class="line"><a name="l00055"></a><span class="lineno">   55</span>&#160;</div>
-<div class="line"><a name="l00056"></a><span class="lineno">   56</span>&#160;    <span class="comment">// The weights and biases tensors should be initialized with the values inferred with the training</span></div>
-<div class="line"><a name="l00057"></a><span class="lineno">   57</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> weights0;</div>
-<div class="line"><a name="l00058"></a><span class="lineno">   58</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> weights1;</div>
-<div class="line"><a name="l00059"></a><span class="lineno">   59</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> weights2;</div>
-<div class="line"><a name="l00060"></a><span class="lineno">   60</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> biases0;</div>
-<div class="line"><a name="l00061"></a><span class="lineno">   61</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> biases1;</div>
-<div class="line"><a name="l00062"></a><span class="lineno">   62</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> biases2;</div>
-<div class="line"><a name="l00063"></a><span class="lineno">   63</span>&#160;</div>
-<div class="line"><a name="l00064"></a><span class="lineno">   64</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_conv0;</div>
-<div class="line"><a name="l00065"></a><span class="lineno">   65</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_conv1;</div>
-<div class="line"><a name="l00066"></a><span class="lineno">   66</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_act0;</div>
-<div class="line"><a name="l00067"></a><span class="lineno">   67</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_act1;</div>
-<div class="line"><a name="l00068"></a><span class="lineno">   68</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_act2;</div>
-<div class="line"><a name="l00069"></a><span class="lineno">   69</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_pool0;</div>
-<div class="line"><a name="l00070"></a><span class="lineno">   70</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_pool1;</div>
-<div class="line"><a name="l00071"></a><span class="lineno">   71</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_fc0;</div>
-<div class="line"><a name="l00072"></a><span class="lineno">   72</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_softmax;</div>
-<div class="line"><a name="l00073"></a><span class="lineno">   73</span>&#160;</div>
-<div class="line"><a name="l00074"></a><span class="lineno">   74</span>&#160;    <span class="comment">// Create layers and set memory manager where allowed to manage internal memory requirements</span></div>
-<div class="line"><a name="l00075"></a><span class="lineno">   75</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml">NEConvolutionLayer</a>    conv0(mm_layers);</div>
-<div class="line"><a name="l00076"></a><span class="lineno">   76</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml">NEConvolutionLayer</a>    conv1(mm_layers);</div>
-<div class="line"><a name="l00077"></a><span class="lineno">   77</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml">NEPoolingLayer</a>        pool0;</div>
-<div class="line"><a name="l00078"></a><span class="lineno">   78</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml">NEPoolingLayer</a>        pool1;</div>
-<div class="line"><a name="l00079"></a><span class="lineno">   79</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">NEFullyConnectedLayer</a> fc0(mm_layers);</div>
-<div class="line"><a name="l00080"></a><span class="lineno">   80</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml">NEActivationLayer</a>     act0;</div>
-<div class="line"><a name="l00081"></a><span class="lineno">   81</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml">NEActivationLayer</a>     act1;</div>
-<div class="line"><a name="l00082"></a><span class="lineno">   82</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml">NEActivationLayer</a>     act2;</div>
-<div class="line"><a name="l00083"></a><span class="lineno">   83</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_softmax_layer.xhtml">NESoftmaxLayer</a>        softmax(mm_layers);</div>
-<div class="line"><a name="l00084"></a><span class="lineno">   84</span>&#160;</div>
-<div class="line"><a name="l00085"></a><span class="lineno">   85</span>&#160;    <span class="comment">/* [Initialize tensors] */</span></div>
-<div class="line"><a name="l00086"></a><span class="lineno">   86</span>&#160;</div>
-<div class="line"><a name="l00087"></a><span class="lineno">   87</span>&#160;    <span class="comment">// Initialize src tensor</span></div>
-<div class="line"><a name="l00088"></a><span class="lineno">   88</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> width_src_image  = 32;</div>
-<div class="line"><a name="l00089"></a><span class="lineno">   89</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> height_src_image = 32;</div>
-<div class="line"><a name="l00090"></a><span class="lineno">   90</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> ifm_src_img      = 1;</div>
-<div class="line"><a name="l00091"></a><span class="lineno">   91</span>&#160;</div>
-<div class="line"><a name="l00092"></a><span class="lineno">   92</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> src_shape(width_src_image, height_src_image, ifm_src_img);</div>
-<div class="line"><a name="l00093"></a><span class="lineno">   93</span>&#160;    src.allocator()-&gt;init(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(src_shape, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00094"></a><span class="lineno">   94</span>&#160;</div>
-<div class="line"><a name="l00095"></a><span class="lineno">   95</span>&#160;    <span class="comment">// Initialize tensors of conv0</span></div>
-<div class="line"><a name="l00096"></a><span class="lineno">   96</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_x_conv0 = 5;</div>
-<div class="line"><a name="l00097"></a><span class="lineno">   97</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_y_conv0 = 5;</div>
-<div class="line"><a name="l00098"></a><span class="lineno">   98</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> ofm_conv0      = 8;</div>
-<div class="line"><a name="l00099"></a><span class="lineno">   99</span>&#160;</div>
-<div class="line"><a name="l00100"></a><span class="lineno">  100</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> weights_shape_conv0(kernel_x_conv0, kernel_y_conv0, src_shape.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">z</a>(), ofm_conv0);</div>
-<div class="line"><a name="l00101"></a><span class="lineno">  101</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> biases_shape_conv0(weights_shape_conv0[3]);</div>
-<div class="line"><a name="l00102"></a><span class="lineno">  102</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_conv0(src_shape.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>(), src_shape.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>(), weights_shape_conv0[3]);</div>
-<div class="line"><a name="l00103"></a><span class="lineno">  103</span>&#160;</div>
-<div class="line"><a name="l00104"></a><span class="lineno">  104</span>&#160;    weights0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(weights_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00105"></a><span class="lineno">  105</span>&#160;    biases0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(biases_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00106"></a><span class="lineno">  106</span>&#160;    out_conv0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00107"></a><span class="lineno">  107</span>&#160;</div>
-<div class="line"><a name="l00108"></a><span class="lineno">  108</span>&#160;    <span class="comment">// Initialize tensor of act0</span></div>
-<div class="line"><a name="l00109"></a><span class="lineno">  109</span>&#160;    out_act0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00110"></a><span class="lineno">  110</span>&#160;</div>
-<div class="line"><a name="l00111"></a><span class="lineno">  111</span>&#160;    <span class="comment">// Initialize tensor of pool0</span></div>
-<div class="line"><a name="l00112"></a><span class="lineno">  112</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_pool0 = out_shape_conv0;</div>
-<div class="line"><a name="l00113"></a><span class="lineno">  113</span>&#160;    out_shape_pool0.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(0, out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>() / 2);</div>
-<div class="line"><a name="l00114"></a><span class="lineno">  114</span>&#160;    out_shape_pool0.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(1, out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>() / 2);</div>
-<div class="line"><a name="l00115"></a><span class="lineno">  115</span>&#160;    out_pool0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_pool0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00116"></a><span class="lineno">  116</span>&#160;</div>
-<div class="line"><a name="l00117"></a><span class="lineno">  117</span>&#160;    <span class="comment">// Initialize tensors of conv1</span></div>
-<div class="line"><a name="l00118"></a><span class="lineno">  118</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_x_conv1 = 3;</div>
-<div class="line"><a name="l00119"></a><span class="lineno">  119</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_y_conv1 = 3;</div>
-<div class="line"><a name="l00120"></a><span class="lineno">  120</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> ofm_conv1      = 16;</div>
-<div class="line"><a name="l00121"></a><span class="lineno">  121</span>&#160;</div>
-<div class="line"><a name="l00122"></a><span class="lineno">  122</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> weights_shape_conv1(kernel_x_conv1, kernel_y_conv1, out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">z</a>(), ofm_conv1);</div>
-<div class="line"><a name="l00123"></a><span class="lineno">  123</span>&#160;</div>
-<div class="line"><a name="l00124"></a><span class="lineno">  124</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> biases_shape_conv1(weights_shape_conv1[3]);</div>
-<div class="line"><a name="l00125"></a><span class="lineno">  125</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_conv1(out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>(), out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>(), weights_shape_conv1[3]);</div>
-<div class="line"><a name="l00126"></a><span class="lineno">  126</span>&#160;</div>
-<div class="line"><a name="l00127"></a><span class="lineno">  127</span>&#160;    weights1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(weights_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00128"></a><span class="lineno">  128</span>&#160;    biases1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(biases_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00129"></a><span class="lineno">  129</span>&#160;    out_conv1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00130"></a><span class="lineno">  130</span>&#160;</div>
-<div class="line"><a name="l00131"></a><span class="lineno">  131</span>&#160;    <span class="comment">// Initialize tensor of act1</span></div>
-<div class="line"><a name="l00132"></a><span class="lineno">  132</span>&#160;    out_act1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00133"></a><span class="lineno">  133</span>&#160;</div>
-<div class="line"><a name="l00134"></a><span class="lineno">  134</span>&#160;    <span class="comment">// Initialize tensor of pool1</span></div>
-<div class="line"><a name="l00135"></a><span class="lineno">  135</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_pool1 = out_shape_conv1;</div>
-<div class="line"><a name="l00136"></a><span class="lineno">  136</span>&#160;    out_shape_pool1.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(0, out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>() / 2);</div>
-<div class="line"><a name="l00137"></a><span class="lineno">  137</span>&#160;    out_shape_pool1.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(1, out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>() / 2);</div>
-<div class="line"><a name="l00138"></a><span class="lineno">  138</span>&#160;    out_pool1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_pool1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00139"></a><span class="lineno">  139</span>&#160;</div>
-<div class="line"><a name="l00140"></a><span class="lineno">  140</span>&#160;    <span class="comment">// Initialize tensor of fc0</span></div>
-<div class="line"><a name="l00141"></a><span class="lineno">  141</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> num_labels = 128;</div>
-<div class="line"><a name="l00142"></a><span class="lineno">  142</span>&#160;</div>
-<div class="line"><a name="l00143"></a><span class="lineno">  143</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> weights_shape_fc0(out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>() * out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>() * out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">z</a>(), num_labels);</div>
-<div class="line"><a name="l00144"></a><span class="lineno">  144</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> biases_shape_fc0(num_labels);</div>
-<div class="line"><a name="l00145"></a><span class="lineno">  145</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_fc0(num_labels);</div>
-<div class="line"><a name="l00146"></a><span class="lineno">  146</span>&#160;</div>
-<div class="line"><a name="l00147"></a><span class="lineno">  147</span>&#160;    weights2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(weights_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00148"></a><span class="lineno">  148</span>&#160;    biases2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(biases_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00149"></a><span class="lineno">  149</span>&#160;    out_fc0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00150"></a><span class="lineno">  150</span>&#160;</div>
-<div class="line"><a name="l00151"></a><span class="lineno">  151</span>&#160;    <span class="comment">// Initialize tensor of act2</span></div>
-<div class="line"><a name="l00152"></a><span class="lineno">  152</span>&#160;    out_act2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00153"></a><span class="lineno">  153</span>&#160;</div>
-<div class="line"><a name="l00154"></a><span class="lineno">  154</span>&#160;    <span class="comment">// Initialize tensor of softmax</span></div>
-<div class="line"><a name="l00155"></a><span class="lineno">  155</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_softmax(out_shape_fc0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>());</div>
-<div class="line"><a name="l00156"></a><span class="lineno">  156</span>&#160;    out_softmax.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_softmax, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div>
-<div class="line"><a name="l00157"></a><span class="lineno">  157</span>&#160;</div>
-<div class="line"><a name="l00158"></a><span class="lineno">  158</span>&#160;    <span class="comment">/* -----------------------End: [Initialize tensors] */</span></div>
-<div class="line"><a name="l00159"></a><span class="lineno">  159</span>&#160;</div>
-<div class="line"><a name="l00160"></a><span class="lineno">  160</span>&#160;    <span class="comment">/* [Configure functions] */</span></div>
-<div class="line"><a name="l00161"></a><span class="lineno">  161</span>&#160;</div>
-<div class="line"><a name="l00162"></a><span class="lineno">  162</span>&#160;    <span class="comment">// in:32x32x1: 5x5 convolution, 8 output features maps (OFM)</span></div>
-<div class="line"><a name="l00163"></a><span class="lineno">  163</span>&#160;    conv0.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#aee13eaa771696a8257ededf5bf921cbb">configure</a>(&amp;src, &amp;weights0, &amp;biases0, &amp;out_conv0, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(1 <span class="comment">/* stride_x */</span>, 1 <span class="comment">/* stride_y */</span>, 2 <span class="comment">/* pad_x */</span>, 2 <span class="comment">/* pad_y */</span>));</div>
-<div class="line"><a name="l00164"></a><span class="lineno">  164</span>&#160;</div>
-<div class="line"><a name="l00165"></a><span class="lineno">  165</span>&#160;    <span class="comment">// in:32x32x8, out:32x32x8, Activation function: relu</span></div>
-<div class="line"><a name="l00166"></a><span class="lineno">  166</span>&#160;    act0.<a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;out_conv0, &amp;out_act0, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c">ActivationLayerInfo::ActivationFunction::RELU</a>));</div>
-<div class="line"><a name="l00167"></a><span class="lineno">  167</span>&#160;</div>
-<div class="line"><a name="l00168"></a><span class="lineno">  168</span>&#160;    <span class="comment">// in:32x32x8, out:16x16x8 (2x2 pooling), Pool type function: Max</span></div>
-<div class="line"><a name="l00169"></a><span class="lineno">  169</span>&#160;    pool0.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#a6fa6e4b65796fd0bf43da9b4d617d568">configure</a>(&amp;out_act0, &amp;out_pool0, <a class="code" href="classarm__compute_1_1_pooling_layer_info.xhtml">PoolingLayerInfo</a>(<a class="code" href="namespacearm__compute.xhtml#adf2ced65e536375a1c96425d9fced858a26a4b44a837bf97b972628509912b4a5">PoolingType::MAX</a>, 2, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(2 <span class="comment">/* stride_x */</span>, 2 <span class="comment">/* stride_y */</span>)));</div>
-<div class="line"><a name="l00170"></a><span class="lineno">  170</span>&#160;</div>
-<div class="line"><a name="l00171"></a><span class="lineno">  171</span>&#160;    <span class="comment">// in:16x16x8: 3x3 convolution, 16 output features maps (OFM)</span></div>
-<div class="line"><a name="l00172"></a><span class="lineno">  172</span>&#160;    conv1.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#aee13eaa771696a8257ededf5bf921cbb">configure</a>(&amp;out_pool0, &amp;weights1, &amp;biases1, &amp;out_conv1, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(1 <span class="comment">/* stride_x */</span>, 1 <span class="comment">/* stride_y */</span>, 1 <span class="comment">/* pad_x */</span>, 1 <span class="comment">/* pad_y */</span>));</div>
-<div class="line"><a name="l00173"></a><span class="lineno">  173</span>&#160;</div>
-<div class="line"><a name="l00174"></a><span class="lineno">  174</span>&#160;    <span class="comment">// in:16x16x16, out:16x16x16, Activation function: relu</span></div>
-<div class="line"><a name="l00175"></a><span class="lineno">  175</span>&#160;    act1.<a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;out_conv1, &amp;out_act1, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c">ActivationLayerInfo::ActivationFunction::RELU</a>));</div>
-<div class="line"><a name="l00176"></a><span class="lineno">  176</span>&#160;</div>
-<div class="line"><a name="l00177"></a><span class="lineno">  177</span>&#160;    <span class="comment">// in:16x16x16, out:8x8x16 (2x2 pooling), Pool type function: Average</span></div>
-<div class="line"><a name="l00178"></a><span class="lineno">  178</span>&#160;    pool1.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#a6fa6e4b65796fd0bf43da9b4d617d568">configure</a>(&amp;out_act1, &amp;out_pool1, <a class="code" href="classarm__compute_1_1_pooling_layer_info.xhtml">PoolingLayerInfo</a>(<a class="code" href="namespacearm__compute.xhtml#a9172da722f0a434e5cc07c0a3c115d93afcefd647d6a866603c627b11347c707a">PoolingType::AVG</a>, 2, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(2 <span class="comment">/* stride_x */</span>, 2 <span class="comment">/* stride_y */</span>)));</div>
-<div class="line"><a name="l00179"></a><span class="lineno">  179</span>&#160;</div>
-<div class="line"><a name="l00180"></a><span class="lineno">  180</span>&#160;    <span class="comment">// in:8x8x16, out:128</span></div>
-<div class="line"><a name="l00181"></a><span class="lineno">  181</span>&#160;    fc0.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ae184041d029cd0dded821875db8a0929">configure</a>(&amp;out_pool1, &amp;weights2, &amp;biases2, &amp;out_fc0);</div>
-<div class="line"><a name="l00182"></a><span class="lineno">  182</span>&#160;</div>
-<div class="line"><a name="l00183"></a><span class="lineno">  183</span>&#160;    <span class="comment">// in:128, out:128, Activation function: relu</span></div>
-<div class="line"><a name="l00184"></a><span class="lineno">  184</span>&#160;    act2.<a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;out_fc0, &amp;out_act2, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c">ActivationLayerInfo::ActivationFunction::RELU</a>));</div>
-<div class="line"><a name="l00185"></a><span class="lineno">  185</span>&#160;</div>
-<div class="line"><a name="l00186"></a><span class="lineno">  186</span>&#160;    <span class="comment">// in:128, out:128</span></div>
-<div class="line"><a name="l00187"></a><span class="lineno">  187</span>&#160;    softmax.<a class="code" href="classarm__compute_1_1_n_e_softmax_layer.xhtml#a9daf8026e68559806afe7d0aa12693d6">configure</a>(&amp;out_act2, &amp;out_softmax);</div>
-<div class="line"><a name="l00188"></a><span class="lineno">  188</span>&#160;</div>
-<div class="line"><a name="l00189"></a><span class="lineno">  189</span>&#160;    <span class="comment">/* -----------------------End: [Configure functions] */</span></div>
-<div class="line"><a name="l00190"></a><span class="lineno">  190</span>&#160;</div>
-<div class="line"><a name="l00191"></a><span class="lineno">  191</span>&#160;    <span class="comment">/*[ Add tensors to memory manager ]*/</span></div>
-<div class="line"><a name="l00192"></a><span class="lineno">  192</span>&#160;</div>
-<div class="line"><a name="l00193"></a><span class="lineno">  193</span>&#160;    <span class="comment">// We need 2 memory groups for handling the input and output</span></div>
-<div class="line"><a name="l00194"></a><span class="lineno">  194</span>&#160;    <span class="comment">// We call explicitly allocate after manage() in order to avoid overlapping lifetimes</span></div>
-<div class="line"><a name="l00195"></a><span class="lineno">  195</span>&#160;    <a class="code" href="classarm__compute_1_1_memory_group_base.xhtml">MemoryGroup</a> memory_group0(mm_transitions);</div>
-<div class="line"><a name="l00196"></a><span class="lineno">  196</span>&#160;    <a class="code" href="classarm__compute_1_1_memory_group_base.xhtml">MemoryGroup</a> memory_group1(mm_transitions);</div>
-<div class="line"><a name="l00197"></a><span class="lineno">  197</span>&#160;</div>
-<div class="line"><a name="l00198"></a><span class="lineno">  198</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_conv0);</div>
-<div class="line"><a name="l00199"></a><span class="lineno">  199</span>&#160;    out_conv0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00200"></a><span class="lineno">  200</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_act0);</div>
-<div class="line"><a name="l00201"></a><span class="lineno">  201</span>&#160;    out_act0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00202"></a><span class="lineno">  202</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_pool0);</div>
-<div class="line"><a name="l00203"></a><span class="lineno">  203</span>&#160;    out_pool0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00204"></a><span class="lineno">  204</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_conv1);</div>
-<div class="line"><a name="l00205"></a><span class="lineno">  205</span>&#160;    out_conv1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00206"></a><span class="lineno">  206</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_act1);</div>
-<div class="line"><a name="l00207"></a><span class="lineno">  207</span>&#160;    out_act1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00208"></a><span class="lineno">  208</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_pool1);</div>
-<div class="line"><a name="l00209"></a><span class="lineno">  209</span>&#160;    out_pool1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00210"></a><span class="lineno">  210</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_fc0);</div>
-<div class="line"><a name="l00211"></a><span class="lineno">  211</span>&#160;    out_fc0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00212"></a><span class="lineno">  212</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_act2);</div>
-<div class="line"><a name="l00213"></a><span class="lineno">  213</span>&#160;    out_act2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00214"></a><span class="lineno">  214</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_softmax);</div>
-<div class="line"><a name="l00215"></a><span class="lineno">  215</span>&#160;    out_softmax.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00216"></a><span class="lineno">  216</span>&#160;</div>
-<div class="line"><a name="l00217"></a><span class="lineno">  217</span>&#160;    <span class="comment">/* -----------------------End: [ Add tensors to memory manager ] */</span></div>
-<div class="line"><a name="l00218"></a><span class="lineno">  218</span>&#160;</div>
-<div class="line"><a name="l00219"></a><span class="lineno">  219</span>&#160;    <span class="comment">/* [Allocate tensors] */</span></div>
-<div class="line"><a name="l00220"></a><span class="lineno">  220</span>&#160;</div>
-<div class="line"><a name="l00221"></a><span class="lineno">  221</span>&#160;    <span class="comment">// Now that the padding requirements are known we can allocate all tensors</span></div>
-<div class="line"><a name="l00222"></a><span class="lineno">  222</span>&#160;    src.allocator()-&gt;allocate();</div>
-<div class="line"><a name="l00223"></a><span class="lineno">  223</span>&#160;    weights0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00224"></a><span class="lineno">  224</span>&#160;    weights1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00225"></a><span class="lineno">  225</span>&#160;    weights2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00226"></a><span class="lineno">  226</span>&#160;    biases0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00227"></a><span class="lineno">  227</span>&#160;    biases1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00228"></a><span class="lineno">  228</span>&#160;    biases2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div>
-<div class="line"><a name="l00229"></a><span class="lineno">  229</span>&#160;</div>
-<div class="line"><a name="l00230"></a><span class="lineno">  230</span>&#160;    <span class="comment">/* -----------------------End: [Allocate tensors] */</span></div>
-<div class="line"><a name="l00231"></a><span class="lineno">  231</span>&#160;</div>
-<div class="line"><a name="l00232"></a><span class="lineno">  232</span>&#160;    <span class="comment">// Finalize layers memory manager</span></div>
-<div class="line"><a name="l00233"></a><span class="lineno">  233</span>&#160;</div>
-<div class="line"><a name="l00234"></a><span class="lineno">  234</span>&#160;    <span class="comment">// Set allocator that the memory manager will use</span></div>
-<div class="line"><a name="l00235"></a><span class="lineno">  235</span>&#160;    mm_layers-&gt;set_allocator(&amp;allocator);</div>
-<div class="line"><a name="l00236"></a><span class="lineno">  236</span>&#160;</div>
-<div class="line"><a name="l00237"></a><span class="lineno">  237</span>&#160;    <span class="comment">// Number of pools that the manager will create. This specifies how many layers you want to run in parallel</span></div>
-<div class="line"><a name="l00238"></a><span class="lineno">  238</span>&#160;    mm_layers-&gt;set_num_pools(1);</div>
-<div class="line"><a name="l00239"></a><span class="lineno">  239</span>&#160;</div>
-<div class="line"><a name="l00240"></a><span class="lineno">  240</span>&#160;    <span class="comment">// Finalize the manager. (Validity checks, memory allocations etc)</span></div>
-<div class="line"><a name="l00241"></a><span class="lineno">  241</span>&#160;    mm_layers-&gt;finalize();</div>
-<div class="line"><a name="l00242"></a><span class="lineno">  242</span>&#160;</div>
-<div class="line"><a name="l00243"></a><span class="lineno">  243</span>&#160;    <span class="comment">// Finalize transitions memory manager</span></div>
-<div class="line"><a name="l00244"></a><span class="lineno">  244</span>&#160;</div>
-<div class="line"><a name="l00245"></a><span class="lineno">  245</span>&#160;    <span class="comment">// Set allocator that the memory manager will use</span></div>
-<div class="line"><a name="l00246"></a><span class="lineno">  246</span>&#160;    mm_transitions-&gt;set_allocator(&amp;allocator);</div>
-<div class="line"><a name="l00247"></a><span class="lineno">  247</span>&#160;</div>
-<div class="line"><a name="l00248"></a><span class="lineno">  248</span>&#160;    <span class="comment">// Number of pools that the manager will create. This specifies how many models we can run in parallel.</span></div>
-<div class="line"><a name="l00249"></a><span class="lineno">  249</span>&#160;    <span class="comment">// Setting to 2 as we need one for the input and one for the output at any given time</span></div>
-<div class="line"><a name="l00250"></a><span class="lineno">  250</span>&#160;    mm_transitions-&gt;set_num_pools(2);</div>
-<div class="line"><a name="l00251"></a><span class="lineno">  251</span>&#160;</div>
-<div class="line"><a name="l00252"></a><span class="lineno">  252</span>&#160;    <span class="comment">// Finalize the manager. (Validity checks, memory allocations etc)</span></div>
-<div class="line"><a name="l00253"></a><span class="lineno">  253</span>&#160;    mm_transitions-&gt;finalize();</div>
-<div class="line"><a name="l00254"></a><span class="lineno">  254</span>&#160;</div>
-<div class="line"><a name="l00255"></a><span class="lineno">  255</span>&#160;    <span class="comment">/* [Initialize weights and biases tensors] */</span></div>
-<div class="line"><a name="l00256"></a><span class="lineno">  256</span>&#160;</div>
-<div class="line"><a name="l00257"></a><span class="lineno">  257</span>&#160;    <span class="comment">// Once the tensors have been allocated, the src, weights and biases tensors can be initialized</span></div>
-<div class="line"><a name="l00258"></a><span class="lineno">  258</span>&#160;    <span class="comment">// ...</span></div>
-<div class="line"><a name="l00259"></a><span class="lineno">  259</span>&#160;</div>
-<div class="line"><a name="l00260"></a><span class="lineno">  260</span>&#160;    <span class="comment">/* -----------------------[Initialize weights and biases tensors] */</span></div>
-<div class="line"><a name="l00261"></a><span class="lineno">  261</span>&#160;</div>
-<div class="line"><a name="l00262"></a><span class="lineno">  262</span>&#160;    <span class="comment">/* [Execute the functions] */</span></div>
-<div class="line"><a name="l00263"></a><span class="lineno">  263</span>&#160;</div>
-<div class="line"><a name="l00264"></a><span class="lineno">  264</span>&#160;    <span class="comment">// Acquire memory for the memory groups</span></div>
-<div class="line"><a name="l00265"></a><span class="lineno">  265</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#a8d16047fa6fdcf5f5453056cc0e1daba">acquire</a>();</div>
-<div class="line"><a name="l00266"></a><span class="lineno">  266</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#a8d16047fa6fdcf5f5453056cc0e1daba">acquire</a>();</div>
-<div class="line"><a name="l00267"></a><span class="lineno">  267</span>&#160;</div>
-<div class="line"><a name="l00268"></a><span class="lineno">  268</span>&#160;    conv0.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div>
-<div class="line"><a name="l00269"></a><span class="lineno">  269</span>&#160;    act0.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#ab5fd6e96c07aaaed2747c7e16ed5951e">run</a>();</div>
-<div class="line"><a name="l00270"></a><span class="lineno">  270</span>&#160;    pool0.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div>
-<div class="line"><a name="l00271"></a><span class="lineno">  271</span>&#160;    conv1.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div>
-<div class="line"><a name="l00272"></a><span class="lineno">  272</span>&#160;    act1.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#ab5fd6e96c07aaaed2747c7e16ed5951e">run</a>();</div>
-<div class="line"><a name="l00273"></a><span class="lineno">  273</span>&#160;    pool1.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div>
-<div class="line"><a name="l00274"></a><span class="lineno">  274</span>&#160;    fc0.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div>
-<div class="line"><a name="l00275"></a><span class="lineno">  275</span>&#160;    act2.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#ab5fd6e96c07aaaed2747c7e16ed5951e">run</a>();</div>
-<div class="line"><a name="l00276"></a><span class="lineno">  276</span>&#160;    softmax.<a class="code" href="classarm__compute_1_1_n_e_softmax_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div>
-<div class="line"><a name="l00277"></a><span class="lineno">  277</span>&#160;</div>
-<div class="line"><a name="l00278"></a><span class="lineno">  278</span>&#160;    <span class="comment">// Release memory</span></div>
-<div class="line"><a name="l00279"></a><span class="lineno">  279</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ad7ed3842c3160ae4b9f51acbf4564438">release</a>();</div>
-<div class="line"><a name="l00280"></a><span class="lineno">  280</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ad7ed3842c3160ae4b9f51acbf4564438">release</a>();</div>
-<div class="line"><a name="l00281"></a><span class="lineno">  281</span>&#160;</div>
-<div class="line"><a name="l00282"></a><span class="lineno">  282</span>&#160;    <span class="comment">/* -----------------------End: [Execute the functions] */</span></div>
-<div class="line"><a name="l00283"></a><span class="lineno">  283</span>&#160;}</div>
-<div class="line"><a name="l00284"></a><span class="lineno">  284</span>&#160;</div>
-<div class="line"><a name="l00294"></a><span class="lineno"><a class="line" href="neon__cnn_8cpp.xhtml#a217dbf8b442f20279ea00b898af96f52">  294</a></span>&#160;<span class="keywordtype">int</span> <a class="code" href="neon__cnn_8cpp.xhtml#a217dbf8b442f20279ea00b898af96f52">main</a>(<span class="keywordtype">int</span> argc, <span class="keyword">const</span> <span class="keywordtype">char</span> **argv)</div>
-<div class="line"><a name="l00295"></a><span class="lineno">  295</span>&#160;{</div>
-<div class="line"><a name="l00296"></a><span class="lineno">  296</span>&#160;    <span class="keywordflow">return</span> <a class="code" href="namespacearm__compute_1_1utils.xhtml#a4c9395db2c8b8d0c336656a7b58fca3e">utils::run_example</a>(argc, argv, <a class="code" href="neon__cnn_8cpp.xhtml#a7616847a3120a787be556c0bb30f43b4">main_cnn</a>);</div>
-<div class="line"><a name="l00297"></a><span class="lineno">  297</span>&#160;}</div>
-<div class="ttc" id="classarm__compute_1_1_dimensions_xhtml_a336121cb63ed79fa0a072eed03d694ac"><div class="ttname"><a href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">arm_compute::Dimensions::z</a></div><div class="ttdeci">T z() const </div><div class="ttdoc">Alias to access the size of the third dimension. </div><div class="ttdef"><b>Definition:</b> <a href="_dimensions_8h_source.xhtml#l00091">Dimensions.h:91</a></div></div>
+<a href="neon__cnn_8cpp.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno">    1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno">    2</span>&#160;<span class="comment"> * Copyright (c) 2016, 2017 ARM Limited.</span></div><div class="line"><a name="l00003"></a><span class="lineno">    3</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00004"></a><span class="lineno">    4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00005"></a><span class="lineno">    5</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00006"></a><span class="lineno">    6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div><div class="line"><a name="l00007"></a><span class="lineno">    7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div><div class="line"><a name="l00008"></a><span class="lineno">    8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div><div class="line"><a name="l00009"></a><span class="lineno">    9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div><div class="line"><a name="l00010"></a><span class="lineno">   10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div><div class="line"><a name="l00011"></a><span class="lineno">   11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div><div class="line"><a name="l00012"></a><span class="lineno">   12</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00013"></a><span class="lineno">   13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div><div class="line"><a name="l00014"></a><span class="lineno">   14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div><div class="line"><a name="l00015"></a><span class="lineno">   15</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00016"></a><span class="lineno">   16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div><div class="line"><a name="l00017"></a><span class="lineno">   17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div><div class="line"><a name="l00018"></a><span class="lineno">   18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div><div class="line"><a name="l00019"></a><span class="lineno">   19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div><div class="line"><a name="l00020"></a><span class="lineno">   20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div><div class="line"><a name="l00021"></a><span class="lineno">   21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div><div class="line"><a name="l00022"></a><span class="lineno">   22</span>&#160;<span class="comment"> * SOFTWARE.</span></div><div class="line"><a name="l00023"></a><span class="lineno">   23</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00024"></a><span class="lineno">   24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_functions_8h.xhtml">arm_compute/runtime/NEON/NEFunctions.h</a>&quot;</span></div><div class="line"><a name="l00025"></a><span class="lineno">   25</span>&#160;</div><div class="line"><a name="l00026"></a><span class="lineno">   26</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_types_8h.xhtml">arm_compute/core/Types.h</a>&quot;</span></div><div class="line"><a name="l00027"></a><span class="lineno">   27</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_allocator_8h.xhtml">arm_compute/runtime/Allocator.h</a>&quot;</span></div><div class="line"><a name="l00028"></a><span class="lineno">   28</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_blob_lifetime_manager_8h.xhtml">arm_compute/runtime/BlobLifetimeManager.h</a>&quot;</span></div><div class="line"><a name="l00029"></a><span class="lineno">   29</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_memory_manager_on_demand_8h.xhtml">arm_compute/runtime/MemoryManagerOnDemand.h</a>&quot;</span></div><div class="line"><a name="l00030"></a><span class="lineno">   30</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_pool_manager_8h.xhtml">arm_compute/runtime/PoolManager.h</a>&quot;</span></div><div class="line"><a name="l00031"></a><span class="lineno">   31</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="utils_2_utils_8h.xhtml">utils/Utils.h</a>&quot;</span></div><div class="line"><a name="l00032"></a><span class="lineno">   32</span>&#160;</div><div class="line"><a name="l00033"></a><span class="lineno">   33</span>&#160;<span class="keyword">using namespace </span><a class="code" href="namespacearm__compute.xhtml">arm_compute</a>;</div><div class="line"><a name="l00034"></a><span class="lineno">   34</span>&#160;<span class="keyword">using namespace </span>utils;</div><div class="line"><a name="l00035"></a><span class="lineno">   35</span>&#160;</div><div class="line"><a name="l00036"></a><span class="lineno"><a class="line" href="neon__cnn_8cpp.xhtml#a7616847a3120a787be556c0bb30f43b4">   36</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="neon__cnn_8cpp.xhtml#a7616847a3120a787be556c0bb30f43b4">main_cnn</a>(<span class="keywordtype">int</span> argc, <span class="keyword">const</span> <span class="keywordtype">char</span> **argv)</div><div class="line"><a name="l00037"></a><span class="lineno">   37</span>&#160;{</div><div class="line"><a name="l00038"></a><span class="lineno">   38</span>&#160;    <a class="code" href="core_2_error_8h.xhtml#a4103adbb45806b2f2002d44b91d0d206">ARM_COMPUTE_UNUSED</a>(argc);</div><div class="line"><a name="l00039"></a><span class="lineno">   39</span>&#160;    <a class="code" href="core_2_error_8h.xhtml#a4103adbb45806b2f2002d44b91d0d206">ARM_COMPUTE_UNUSED</a>(argv);</div><div class="line"><a name="l00040"></a><span class="lineno">   40</span>&#160;</div><div class="line"><a name="l00041"></a><span class="lineno">   41</span>&#160;    <span class="comment">// Create NEON allocator</span></div><div class="line"><a name="l00042"></a><span class="lineno">   42</span>&#160;    <a class="code" href="classarm__compute_1_1_allocator.xhtml">Allocator</a> allocator;</div><div class="line"><a name="l00043"></a><span class="lineno">   43</span>&#160;</div><div class="line"><a name="l00044"></a><span class="lineno">   44</span>&#160;    <span class="comment">// Create memory manager components</span></div><div class="line"><a name="l00045"></a><span class="lineno">   45</span>&#160;    <span class="comment">// We need 2 memory managers: 1 for handling the tensors within the functions (mm_layers) and 1 for handling the input and output tensors of the functions (mm_transitions))</span></div><div class="line"><a name="l00046"></a><span class="lineno">   46</span>&#160;    <span class="keyword">auto</span> lifetime_mgr0  = std::make_shared&lt;BlobLifetimeManager&gt;();                           <span class="comment">// Create lifetime manager</span></div><div class="line"><a name="l00047"></a><span class="lineno">   47</span>&#160;    <span class="keyword">auto</span> lifetime_mgr1  = std::make_shared&lt;BlobLifetimeManager&gt;();                           <span class="comment">// Create lifetime manager</span></div><div class="line"><a name="l00048"></a><span class="lineno">   48</span>&#160;    <span class="keyword">auto</span> pool_mgr0      = std::make_shared&lt;PoolManager&gt;();                                   <span class="comment">// Create pool manager</span></div><div class="line"><a name="l00049"></a><span class="lineno">   49</span>&#160;    <span class="keyword">auto</span> pool_mgr1      = std::make_shared&lt;PoolManager&gt;();                                   <span class="comment">// Create pool manager</span></div><div class="line"><a name="l00050"></a><span class="lineno">   50</span>&#160;    <span class="keyword">auto</span> mm_layers      = std::make_shared&lt;MemoryManagerOnDemand&gt;(lifetime_mgr0, pool_mgr0); <span class="comment">// Create the memory manager</span></div><div class="line"><a name="l00051"></a><span class="lineno">   51</span>&#160;    <span class="keyword">auto</span> mm_transitions = std::make_shared&lt;MemoryManagerOnDemand&gt;(lifetime_mgr1, pool_mgr1); <span class="comment">// Create the memory manager</span></div><div class="line"><a name="l00052"></a><span class="lineno">   52</span>&#160;</div><div class="line"><a name="l00053"></a><span class="lineno">   53</span>&#160;    <span class="comment">// The src tensor should contain the input image</span></div><div class="line"><a name="l00054"></a><span class="lineno">   54</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> src;</div><div class="line"><a name="l00055"></a><span class="lineno">   55</span>&#160;</div><div class="line"><a name="l00056"></a><span class="lineno">   56</span>&#160;    <span class="comment">// The weights and biases tensors should be initialized with the values inferred with the training</span></div><div class="line"><a name="l00057"></a><span class="lineno">   57</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> weights0;</div><div class="line"><a name="l00058"></a><span class="lineno">   58</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> weights1;</div><div class="line"><a name="l00059"></a><span class="lineno">   59</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> weights2;</div><div class="line"><a name="l00060"></a><span class="lineno">   60</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> biases0;</div><div class="line"><a name="l00061"></a><span class="lineno">   61</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> biases1;</div><div class="line"><a name="l00062"></a><span class="lineno">   62</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> biases2;</div><div class="line"><a name="l00063"></a><span class="lineno">   63</span>&#160;</div><div class="line"><a name="l00064"></a><span class="lineno">   64</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_conv0;</div><div class="line"><a name="l00065"></a><span class="lineno">   65</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_conv1;</div><div class="line"><a name="l00066"></a><span class="lineno">   66</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_act0;</div><div class="line"><a name="l00067"></a><span class="lineno">   67</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_act1;</div><div class="line"><a name="l00068"></a><span class="lineno">   68</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_act2;</div><div class="line"><a name="l00069"></a><span class="lineno">   69</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_pool0;</div><div class="line"><a name="l00070"></a><span class="lineno">   70</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_pool1;</div><div class="line"><a name="l00071"></a><span class="lineno">   71</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_fc0;</div><div class="line"><a name="l00072"></a><span class="lineno">   72</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> out_softmax;</div><div class="line"><a name="l00073"></a><span class="lineno">   73</span>&#160;</div><div class="line"><a name="l00074"></a><span class="lineno">   74</span>&#160;    <span class="comment">// Create layers and set memory manager where allowed to manage internal memory requirements</span></div><div class="line"><a name="l00075"></a><span class="lineno">   75</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml">NEConvolutionLayer</a>    conv0(mm_layers);</div><div class="line"><a name="l00076"></a><span class="lineno">   76</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml">NEConvolutionLayer</a>    conv1(mm_layers);</div><div class="line"><a name="l00077"></a><span class="lineno">   77</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml">NEPoolingLayer</a>        pool0;</div><div class="line"><a name="l00078"></a><span class="lineno">   78</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml">NEPoolingLayer</a>        pool1;</div><div class="line"><a name="l00079"></a><span class="lineno">   79</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">NEFullyConnectedLayer</a> fc0(mm_layers);</div><div class="line"><a name="l00080"></a><span class="lineno">   80</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml">NEActivationLayer</a>     act0;</div><div class="line"><a name="l00081"></a><span class="lineno">   81</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml">NEActivationLayer</a>     act1;</div><div class="line"><a name="l00082"></a><span class="lineno">   82</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml">NEActivationLayer</a>     act2;</div><div class="line"><a name="l00083"></a><span class="lineno">   83</span>&#160;    <a class="code" href="classarm__compute_1_1_n_e_softmax_layer.xhtml">NESoftmaxLayer</a>        softmax(mm_layers);</div><div class="line"><a name="l00084"></a><span class="lineno">   84</span>&#160;</div><div class="line"><a name="l00085"></a><span class="lineno">   85</span>&#160;    <span class="comment">/* [Initialize tensors] */</span></div><div class="line"><a name="l00086"></a><span class="lineno">   86</span>&#160;</div><div class="line"><a name="l00087"></a><span class="lineno">   87</span>&#160;    <span class="comment">// Initialize src tensor</span></div><div class="line"><a name="l00088"></a><span class="lineno">   88</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> width_src_image  = 32;</div><div class="line"><a name="l00089"></a><span class="lineno">   89</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> height_src_image = 32;</div><div class="line"><a name="l00090"></a><span class="lineno">   90</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> ifm_src_img      = 1;</div><div class="line"><a name="l00091"></a><span class="lineno">   91</span>&#160;</div><div class="line"><a name="l00092"></a><span class="lineno">   92</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> src_shape(width_src_image, height_src_image, ifm_src_img);</div><div class="line"><a name="l00093"></a><span class="lineno">   93</span>&#160;    src.allocator()-&gt;init(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(src_shape, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00094"></a><span class="lineno">   94</span>&#160;</div><div class="line"><a name="l00095"></a><span class="lineno">   95</span>&#160;    <span class="comment">// Initialize tensors of conv0</span></div><div class="line"><a name="l00096"></a><span class="lineno">   96</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_x_conv0 = 5;</div><div class="line"><a name="l00097"></a><span class="lineno">   97</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_y_conv0 = 5;</div><div class="line"><a name="l00098"></a><span class="lineno">   98</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> ofm_conv0      = 8;</div><div class="line"><a name="l00099"></a><span class="lineno">   99</span>&#160;</div><div class="line"><a name="l00100"></a><span class="lineno">  100</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> weights_shape_conv0(kernel_x_conv0, kernel_y_conv0, src_shape.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">z</a>(), ofm_conv0);</div><div class="line"><a name="l00101"></a><span class="lineno">  101</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> biases_shape_conv0(weights_shape_conv0[3]);</div><div class="line"><a name="l00102"></a><span class="lineno">  102</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_conv0(src_shape.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>(), src_shape.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>(), weights_shape_conv0[3]);</div><div class="line"><a name="l00103"></a><span class="lineno">  103</span>&#160;</div><div class="line"><a name="l00104"></a><span class="lineno">  104</span>&#160;    weights0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(weights_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00105"></a><span class="lineno">  105</span>&#160;    biases0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(biases_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00106"></a><span class="lineno">  106</span>&#160;    out_conv0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00107"></a><span class="lineno">  107</span>&#160;</div><div class="line"><a name="l00108"></a><span class="lineno">  108</span>&#160;    <span class="comment">// Initialize tensor of act0</span></div><div class="line"><a name="l00109"></a><span class="lineno">  109</span>&#160;    out_act0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00110"></a><span class="lineno">  110</span>&#160;</div><div class="line"><a name="l00111"></a><span class="lineno">  111</span>&#160;    <span class="comment">// Initialize tensor of pool0</span></div><div class="line"><a name="l00112"></a><span class="lineno">  112</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_pool0 = out_shape_conv0;</div><div class="line"><a name="l00113"></a><span class="lineno">  113</span>&#160;    out_shape_pool0.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(0, out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>() / 2);</div><div class="line"><a name="l00114"></a><span class="lineno">  114</span>&#160;    out_shape_pool0.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(1, out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>() / 2);</div><div class="line"><a name="l00115"></a><span class="lineno">  115</span>&#160;    out_pool0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_pool0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00116"></a><span class="lineno">  116</span>&#160;</div><div class="line"><a name="l00117"></a><span class="lineno">  117</span>&#160;    <span class="comment">// Initialize tensors of conv1</span></div><div class="line"><a name="l00118"></a><span class="lineno">  118</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_x_conv1 = 3;</div><div class="line"><a name="l00119"></a><span class="lineno">  119</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> kernel_y_conv1 = 3;</div><div class="line"><a name="l00120"></a><span class="lineno">  120</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> ofm_conv1      = 16;</div><div class="line"><a name="l00121"></a><span class="lineno">  121</span>&#160;</div><div class="line"><a name="l00122"></a><span class="lineno">  122</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> weights_shape_conv1(kernel_x_conv1, kernel_y_conv1, out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">z</a>(), ofm_conv1);</div><div class="line"><a name="l00123"></a><span class="lineno">  123</span>&#160;</div><div class="line"><a name="l00124"></a><span class="lineno">  124</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> biases_shape_conv1(weights_shape_conv1[3]);</div><div class="line"><a name="l00125"></a><span class="lineno">  125</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_conv1(out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>(), out_shape_pool0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>(), weights_shape_conv1[3]);</div><div class="line"><a name="l00126"></a><span class="lineno">  126</span>&#160;</div><div class="line"><a name="l00127"></a><span class="lineno">  127</span>&#160;    weights1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(weights_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00128"></a><span class="lineno">  128</span>&#160;    biases1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(biases_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00129"></a><span class="lineno">  129</span>&#160;    out_conv1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00130"></a><span class="lineno">  130</span>&#160;</div><div class="line"><a name="l00131"></a><span class="lineno">  131</span>&#160;    <span class="comment">// Initialize tensor of act1</span></div><div class="line"><a name="l00132"></a><span class="lineno">  132</span>&#160;    out_act1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_conv1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00133"></a><span class="lineno">  133</span>&#160;</div><div class="line"><a name="l00134"></a><span class="lineno">  134</span>&#160;    <span class="comment">// Initialize tensor of pool1</span></div><div class="line"><a name="l00135"></a><span class="lineno">  135</span>&#160;    <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_pool1 = out_shape_conv1;</div><div class="line"><a name="l00136"></a><span class="lineno">  136</span>&#160;    out_shape_pool1.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(0, out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>() / 2);</div><div class="line"><a name="l00137"></a><span class="lineno">  137</span>&#160;    out_shape_pool1.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">set</a>(1, out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>() / 2);</div><div class="line"><a name="l00138"></a><span class="lineno">  138</span>&#160;    out_pool1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_pool1, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00139"></a><span class="lineno">  139</span>&#160;</div><div class="line"><a name="l00140"></a><span class="lineno">  140</span>&#160;    <span class="comment">// Initialize tensor of fc0</span></div><div class="line"><a name="l00141"></a><span class="lineno">  141</span>&#160;    constexpr <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> num_labels = 128;</div><div class="line"><a name="l00142"></a><span class="lineno">  142</span>&#160;</div><div class="line"><a name="l00143"></a><span class="lineno">  143</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> weights_shape_fc0(out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>() * out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a691c9cb93365c2e33f3429de43244098">y</a>() * out_shape_pool1.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">z</a>(), num_labels);</div><div class="line"><a name="l00144"></a><span class="lineno">  144</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> biases_shape_fc0(num_labels);</div><div class="line"><a name="l00145"></a><span class="lineno">  145</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_fc0(num_labels);</div><div class="line"><a name="l00146"></a><span class="lineno">  146</span>&#160;</div><div class="line"><a name="l00147"></a><span class="lineno">  147</span>&#160;    weights2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(weights_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00148"></a><span class="lineno">  148</span>&#160;    biases2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(biases_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00149"></a><span class="lineno">  149</span>&#160;    out_fc0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00150"></a><span class="lineno">  150</span>&#160;</div><div class="line"><a name="l00151"></a><span class="lineno">  151</span>&#160;    <span class="comment">// Initialize tensor of act2</span></div><div class="line"><a name="l00152"></a><span class="lineno">  152</span>&#160;    out_act2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_fc0, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00153"></a><span class="lineno">  153</span>&#160;</div><div class="line"><a name="l00154"></a><span class="lineno">  154</span>&#160;    <span class="comment">// Initialize tensor of softmax</span></div><div class="line"><a name="l00155"></a><span class="lineno">  155</span>&#160;    <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> out_shape_softmax(out_shape_fc0.<a class="code" href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">x</a>());</div><div class="line"><a name="l00156"></a><span class="lineno">  156</span>&#160;    out_softmax.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(out_shape_softmax, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>));</div><div class="line"><a name="l00157"></a><span class="lineno">  157</span>&#160;</div><div class="line"><a name="l00158"></a><span class="lineno">  158</span>&#160;    <span class="comment">/* -----------------------End: [Initialize tensors] */</span></div><div class="line"><a name="l00159"></a><span class="lineno">  159</span>&#160;</div><div class="line"><a name="l00160"></a><span class="lineno">  160</span>&#160;    <span class="comment">/* [Configure functions] */</span></div><div class="line"><a name="l00161"></a><span class="lineno">  161</span>&#160;</div><div class="line"><a name="l00162"></a><span class="lineno">  162</span>&#160;    <span class="comment">// in:32x32x1: 5x5 convolution, 8 output features maps (OFM)</span></div><div class="line"><a name="l00163"></a><span class="lineno">  163</span>&#160;    conv0.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#aee13eaa771696a8257ededf5bf921cbb">configure</a>(&amp;src, &amp;weights0, &amp;biases0, &amp;out_conv0, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(1 <span class="comment">/* stride_x */</span>, 1 <span class="comment">/* stride_y */</span>, 2 <span class="comment">/* pad_x */</span>, 2 <span class="comment">/* pad_y */</span>));</div><div class="line"><a name="l00164"></a><span class="lineno">  164</span>&#160;</div><div class="line"><a name="l00165"></a><span class="lineno">  165</span>&#160;    <span class="comment">// in:32x32x8, out:32x32x8, Activation function: relu</span></div><div class="line"><a name="l00166"></a><span class="lineno">  166</span>&#160;    act0.<a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;out_conv0, &amp;out_act0, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c">ActivationLayerInfo::ActivationFunction::RELU</a>));</div><div class="line"><a name="l00167"></a><span class="lineno">  167</span>&#160;</div><div class="line"><a name="l00168"></a><span class="lineno">  168</span>&#160;    <span class="comment">// in:32x32x8, out:16x16x8 (2x2 pooling), Pool type function: Max</span></div><div class="line"><a name="l00169"></a><span class="lineno">  169</span>&#160;    pool0.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#a6fa6e4b65796fd0bf43da9b4d617d568">configure</a>(&amp;out_act0, &amp;out_pool0, <a class="code" href="classarm__compute_1_1_pooling_layer_info.xhtml">PoolingLayerInfo</a>(<a class="code" href="namespacearm__compute.xhtml#adf2ced65e536375a1c96425d9fced858a26a4b44a837bf97b972628509912b4a5">PoolingType::MAX</a>, 2, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(2 <span class="comment">/* stride_x */</span>, 2 <span class="comment">/* stride_y */</span>)));</div><div class="line"><a name="l00170"></a><span class="lineno">  170</span>&#160;</div><div class="line"><a name="l00171"></a><span class="lineno">  171</span>&#160;    <span class="comment">// in:16x16x8: 3x3 convolution, 16 output features maps (OFM)</span></div><div class="line"><a name="l00172"></a><span class="lineno">  172</span>&#160;    conv1.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#aee13eaa771696a8257ededf5bf921cbb">configure</a>(&amp;out_pool0, &amp;weights1, &amp;biases1, &amp;out_conv1, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(1 <span class="comment">/* stride_x */</span>, 1 <span class="comment">/* stride_y */</span>, 1 <span class="comment">/* pad_x */</span>, 1 <span class="comment">/* pad_y */</span>));</div><div class="line"><a name="l00173"></a><span class="lineno">  173</span>&#160;</div><div class="line"><a name="l00174"></a><span class="lineno">  174</span>&#160;    <span class="comment">// in:16x16x16, out:16x16x16, Activation function: relu</span></div><div class="line"><a name="l00175"></a><span class="lineno">  175</span>&#160;    act1.<a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;out_conv1, &amp;out_act1, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c">ActivationLayerInfo::ActivationFunction::RELU</a>));</div><div class="line"><a name="l00176"></a><span class="lineno">  176</span>&#160;</div><div class="line"><a name="l00177"></a><span class="lineno">  177</span>&#160;    <span class="comment">// in:16x16x16, out:8x8x16 (2x2 pooling), Pool type function: Average</span></div><div class="line"><a name="l00178"></a><span class="lineno">  178</span>&#160;    pool1.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#a6fa6e4b65796fd0bf43da9b4d617d568">configure</a>(&amp;out_act1, &amp;out_pool1, <a class="code" href="classarm__compute_1_1_pooling_layer_info.xhtml">PoolingLayerInfo</a>(<a class="code" href="namespacearm__compute.xhtml#a9172da722f0a434e5cc07c0a3c115d93afcefd647d6a866603c627b11347c707a">PoolingType::AVG</a>, 2, <a class="code" href="classarm__compute_1_1_pad_stride_info.xhtml">PadStrideInfo</a>(2 <span class="comment">/* stride_x */</span>, 2 <span class="comment">/* stride_y */</span>)));</div><div class="line"><a name="l00179"></a><span class="lineno">  179</span>&#160;</div><div class="line"><a name="l00180"></a><span class="lineno">  180</span>&#160;    <span class="comment">// in:8x8x16, out:128</span></div><div class="line"><a name="l00181"></a><span class="lineno">  181</span>&#160;    fc0.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ae184041d029cd0dded821875db8a0929">configure</a>(&amp;out_pool1, &amp;weights2, &amp;biases2, &amp;out_fc0);</div><div class="line"><a name="l00182"></a><span class="lineno">  182</span>&#160;</div><div class="line"><a name="l00183"></a><span class="lineno">  183</span>&#160;    <span class="comment">// in:128, out:128, Activation function: relu</span></div><div class="line"><a name="l00184"></a><span class="lineno">  184</span>&#160;    act2.<a class="code" href="classarm__compute_1_1_n_e_activation_layer.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;out_fc0, &amp;out_act2, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c">ActivationLayerInfo::ActivationFunction::RELU</a>));</div><div class="line"><a name="l00185"></a><span class="lineno">  185</span>&#160;</div><div class="line"><a name="l00186"></a><span class="lineno">  186</span>&#160;    <span class="comment">// in:128, out:128</span></div><div class="line"><a name="l00187"></a><span class="lineno">  187</span>&#160;    softmax.<a class="code" href="classarm__compute_1_1_n_e_softmax_layer.xhtml#a062268dfb7b8a63b7331d1cafcb7a081">configure</a>(&amp;out_act2, &amp;out_softmax);</div><div class="line"><a name="l00188"></a><span class="lineno">  188</span>&#160;</div><div class="line"><a name="l00189"></a><span class="lineno">  189</span>&#160;    <span class="comment">/* -----------------------End: [Configure functions] */</span></div><div class="line"><a name="l00190"></a><span class="lineno">  190</span>&#160;</div><div class="line"><a name="l00191"></a><span class="lineno">  191</span>&#160;    <span class="comment">/*[ Add tensors to memory manager ]*/</span></div><div class="line"><a name="l00192"></a><span class="lineno">  192</span>&#160;</div><div class="line"><a name="l00193"></a><span class="lineno">  193</span>&#160;    <span class="comment">// We need 2 memory groups for handling the input and output</span></div><div class="line"><a name="l00194"></a><span class="lineno">  194</span>&#160;    <span class="comment">// We call explicitly allocate after manage() in order to avoid overlapping lifetimes</span></div><div class="line"><a name="l00195"></a><span class="lineno">  195</span>&#160;    <a class="code" href="classarm__compute_1_1_memory_group_base.xhtml">MemoryGroup</a> memory_group0(mm_transitions);</div><div class="line"><a name="l00196"></a><span class="lineno">  196</span>&#160;    <a class="code" href="classarm__compute_1_1_memory_group_base.xhtml">MemoryGroup</a> memory_group1(mm_transitions);</div><div class="line"><a name="l00197"></a><span class="lineno">  197</span>&#160;</div><div class="line"><a name="l00198"></a><span class="lineno">  198</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_conv0);</div><div class="line"><a name="l00199"></a><span class="lineno">  199</span>&#160;    out_conv0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00200"></a><span class="lineno">  200</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_act0);</div><div class="line"><a name="l00201"></a><span class="lineno">  201</span>&#160;    out_act0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00202"></a><span class="lineno">  202</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_pool0);</div><div class="line"><a name="l00203"></a><span class="lineno">  203</span>&#160;    out_pool0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00204"></a><span class="lineno">  204</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_conv1);</div><div class="line"><a name="l00205"></a><span class="lineno">  205</span>&#160;    out_conv1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00206"></a><span class="lineno">  206</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_act1);</div><div class="line"><a name="l00207"></a><span class="lineno">  207</span>&#160;    out_act1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00208"></a><span class="lineno">  208</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_pool1);</div><div class="line"><a name="l00209"></a><span class="lineno">  209</span>&#160;    out_pool1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00210"></a><span class="lineno">  210</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_fc0);</div><div class="line"><a name="l00211"></a><span class="lineno">  211</span>&#160;    out_fc0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00212"></a><span class="lineno">  212</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_act2);</div><div class="line"><a name="l00213"></a><span class="lineno">  213</span>&#160;    out_act2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00214"></a><span class="lineno">  214</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">manage</a>(&amp;out_softmax);</div><div class="line"><a name="l00215"></a><span class="lineno">  215</span>&#160;    out_softmax.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00216"></a><span class="lineno">  216</span>&#160;</div><div class="line"><a name="l00217"></a><span class="lineno">  217</span>&#160;    <span class="comment">/* -----------------------End: [ Add tensors to memory manager ] */</span></div><div class="line"><a name="l00218"></a><span class="lineno">  218</span>&#160;</div><div class="line"><a name="l00219"></a><span class="lineno">  219</span>&#160;    <span class="comment">/* [Allocate tensors] */</span></div><div class="line"><a name="l00220"></a><span class="lineno">  220</span>&#160;</div><div class="line"><a name="l00221"></a><span class="lineno">  221</span>&#160;    <span class="comment">// Now that the padding requirements are known we can allocate all tensors</span></div><div class="line"><a name="l00222"></a><span class="lineno">  222</span>&#160;    src.allocator()-&gt;allocate();</div><div class="line"><a name="l00223"></a><span class="lineno">  223</span>&#160;    weights0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00224"></a><span class="lineno">  224</span>&#160;    weights1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00225"></a><span class="lineno">  225</span>&#160;    weights2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00226"></a><span class="lineno">  226</span>&#160;    biases0.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00227"></a><span class="lineno">  227</span>&#160;    biases1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00228"></a><span class="lineno">  228</span>&#160;    biases2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00229"></a><span class="lineno">  229</span>&#160;</div><div class="line"><a name="l00230"></a><span class="lineno">  230</span>&#160;    <span class="comment">/* -----------------------End: [Allocate tensors] */</span></div><div class="line"><a name="l00231"></a><span class="lineno">  231</span>&#160;</div><div class="line"><a name="l00232"></a><span class="lineno">  232</span>&#160;    <span class="comment">// Finalize layers memory manager</span></div><div class="line"><a name="l00233"></a><span class="lineno">  233</span>&#160;</div><div class="line"><a name="l00234"></a><span class="lineno">  234</span>&#160;    <span class="comment">// Set allocator that the memory manager will use</span></div><div class="line"><a name="l00235"></a><span class="lineno">  235</span>&#160;    mm_layers-&gt;set_allocator(&amp;allocator);</div><div class="line"><a name="l00236"></a><span class="lineno">  236</span>&#160;</div><div class="line"><a name="l00237"></a><span class="lineno">  237</span>&#160;    <span class="comment">// Number of pools that the manager will create. This specifies how many layers you want to run in parallel</span></div><div class="line"><a name="l00238"></a><span class="lineno">  238</span>&#160;    mm_layers-&gt;set_num_pools(1);</div><div class="line"><a name="l00239"></a><span class="lineno">  239</span>&#160;</div><div class="line"><a name="l00240"></a><span class="lineno">  240</span>&#160;    <span class="comment">// Finalize the manager. (Validity checks, memory allocations etc)</span></div><div class="line"><a name="l00241"></a><span class="lineno">  241</span>&#160;    mm_layers-&gt;finalize();</div><div class="line"><a name="l00242"></a><span class="lineno">  242</span>&#160;</div><div class="line"><a name="l00243"></a><span class="lineno">  243</span>&#160;    <span class="comment">// Finalize transitions memory manager</span></div><div class="line"><a name="l00244"></a><span class="lineno">  244</span>&#160;</div><div class="line"><a name="l00245"></a><span class="lineno">  245</span>&#160;    <span class="comment">// Set allocator that the memory manager will use</span></div><div class="line"><a name="l00246"></a><span class="lineno">  246</span>&#160;    mm_transitions-&gt;set_allocator(&amp;allocator);</div><div class="line"><a name="l00247"></a><span class="lineno">  247</span>&#160;</div><div class="line"><a name="l00248"></a><span class="lineno">  248</span>&#160;    <span class="comment">// Number of pools that the manager will create. This specifies how many models we can run in parallel.</span></div><div class="line"><a name="l00249"></a><span class="lineno">  249</span>&#160;    <span class="comment">// Setting to 2 as we need one for the input and one for the output at any given time</span></div><div class="line"><a name="l00250"></a><span class="lineno">  250</span>&#160;    mm_transitions-&gt;set_num_pools(2);</div><div class="line"><a name="l00251"></a><span class="lineno">  251</span>&#160;</div><div class="line"><a name="l00252"></a><span class="lineno">  252</span>&#160;    <span class="comment">// Finalize the manager. (Validity checks, memory allocations etc)</span></div><div class="line"><a name="l00253"></a><span class="lineno">  253</span>&#160;    mm_transitions-&gt;finalize();</div><div class="line"><a name="l00254"></a><span class="lineno">  254</span>&#160;</div><div class="line"><a name="l00255"></a><span class="lineno">  255</span>&#160;    <span class="comment">/* [Initialize weights and biases tensors] */</span></div><div class="line"><a name="l00256"></a><span class="lineno">  256</span>&#160;</div><div class="line"><a name="l00257"></a><span class="lineno">  257</span>&#160;    <span class="comment">// Once the tensors have been allocated, the src, weights and biases tensors can be initialized</span></div><div class="line"><a name="l00258"></a><span class="lineno">  258</span>&#160;    <span class="comment">// ...</span></div><div class="line"><a name="l00259"></a><span class="lineno">  259</span>&#160;</div><div class="line"><a name="l00260"></a><span class="lineno">  260</span>&#160;    <span class="comment">/* -----------------------[Initialize weights and biases tensors] */</span></div><div class="line"><a name="l00261"></a><span class="lineno">  261</span>&#160;</div><div class="line"><a name="l00262"></a><span class="lineno">  262</span>&#160;    <span class="comment">/* [Execute the functions] */</span></div><div class="line"><a name="l00263"></a><span class="lineno">  263</span>&#160;</div><div class="line"><a name="l00264"></a><span class="lineno">  264</span>&#160;    <span class="comment">// Acquire memory for the memory groups</span></div><div class="line"><a name="l00265"></a><span class="lineno">  265</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#a8d16047fa6fdcf5f5453056cc0e1daba">acquire</a>();</div><div class="line"><a name="l00266"></a><span class="lineno">  266</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#a8d16047fa6fdcf5f5453056cc0e1daba">acquire</a>();</div><div class="line"><a name="l00267"></a><span class="lineno">  267</span>&#160;</div><div class="line"><a name="l00268"></a><span class="lineno">  268</span>&#160;    conv0.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00269"></a><span class="lineno">  269</span>&#160;    act0.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#ab5fd6e96c07aaaed2747c7e16ed5951e">run</a>();</div><div class="line"><a name="l00270"></a><span class="lineno">  270</span>&#160;    pool0.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00271"></a><span class="lineno">  271</span>&#160;    conv1.<a class="code" href="classarm__compute_1_1_n_e_convolution_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00272"></a><span class="lineno">  272</span>&#160;    act1.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#ab5fd6e96c07aaaed2747c7e16ed5951e">run</a>();</div><div class="line"><a name="l00273"></a><span class="lineno">  273</span>&#160;    pool1.<a class="code" href="classarm__compute_1_1_n_e_pooling_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00274"></a><span class="lineno">  274</span>&#160;    fc0.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00275"></a><span class="lineno">  275</span>&#160;    act2.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#ab5fd6e96c07aaaed2747c7e16ed5951e">run</a>();</div><div class="line"><a name="l00276"></a><span class="lineno">  276</span>&#160;    softmax.<a class="code" href="classarm__compute_1_1_n_e_softmax_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00277"></a><span class="lineno">  277</span>&#160;</div><div class="line"><a name="l00278"></a><span class="lineno">  278</span>&#160;    <span class="comment">// Release memory</span></div><div class="line"><a name="l00279"></a><span class="lineno">  279</span>&#160;    memory_group0.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ad7ed3842c3160ae4b9f51acbf4564438">release</a>();</div><div class="line"><a name="l00280"></a><span class="lineno">  280</span>&#160;    memory_group1.<a class="code" href="classarm__compute_1_1_memory_group_base.xhtml#ad7ed3842c3160ae4b9f51acbf4564438">release</a>();</div><div class="line"><a name="l00281"></a><span class="lineno">  281</span>&#160;</div><div class="line"><a name="l00282"></a><span class="lineno">  282</span>&#160;    <span class="comment">/* -----------------------End: [Execute the functions] */</span></div><div class="line"><a name="l00283"></a><span class="lineno">  283</span>&#160;}</div><div class="line"><a name="l00284"></a><span class="lineno">  284</span>&#160;</div><div class="line"><a name="l00294"></a><span class="lineno"><a class="line" href="neon__cnn_8cpp.xhtml#a217dbf8b442f20279ea00b898af96f52">  294</a></span>&#160;<span class="keywordtype">int</span> <a class="code" href="neon__cnn_8cpp.xhtml#a217dbf8b442f20279ea00b898af96f52">main</a>(<span class="keywordtype">int</span> argc, <span class="keyword">const</span> <span class="keywordtype">char</span> **argv)</div><div class="line"><a name="l00295"></a><span class="lineno">  295</span>&#160;{</div><div class="line"><a name="l00296"></a><span class="lineno">  296</span>&#160;    <span class="keywordflow">return</span> <a class="code" href="namespacearm__compute_1_1utils.xhtml#a4c9395db2c8b8d0c336656a7b58fca3e">utils::run_example</a>(argc, argv, <a class="code" href="neon__cnn_8cpp.xhtml#a7616847a3120a787be556c0bb30f43b4">main_cnn</a>);</div><div class="line"><a name="l00297"></a><span class="lineno">  297</span>&#160;}</div><div class="ttc" id="classarm__compute_1_1_dimensions_xhtml_a336121cb63ed79fa0a072eed03d694ac"><div class="ttname"><a href="classarm__compute_1_1_dimensions.xhtml#a336121cb63ed79fa0a072eed03d694ac">arm_compute::Dimensions::z</a></div><div class="ttdeci">T z() const </div><div class="ttdoc">Alias to access the size of the third dimension. </div><div class="ttdef"><b>Definition:</b> <a href="_dimensions_8h_source.xhtml#l00091">Dimensions.h:91</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_tensor_shape_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_shape.xhtml">arm_compute::TensorShape</a></div><div class="ttdoc">Shape of a tensor. </div><div class="ttdef"><b>Definition:</b> <a href="_tensor_shape_8h_source.xhtml#l00038">TensorShape.h:38</a></div></div>
-<div class="ttc" id="classarm__compute_1_1_memory_group_base_xhtml_a8d16047fa6fdcf5f5453056cc0e1daba"><div class="ttname"><a href="classarm__compute_1_1_memory_group_base.xhtml#a8d16047fa6fdcf5f5453056cc0e1daba">arm_compute::MemoryGroupBase::acquire</a></div><div class="ttdeci">void acquire() override</div><div class="ttdoc">Acquires backing memory for the whole group. </div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_base_8h_source.xhtml#l00124">MemoryGroupBase.h:124</a></div></div>
+<div class="ttc" id="classarm__compute_1_1_memory_group_base_xhtml_a8d16047fa6fdcf5f5453056cc0e1daba"><div class="ttname"><a href="classarm__compute_1_1_memory_group_base.xhtml#a8d16047fa6fdcf5f5453056cc0e1daba">arm_compute::MemoryGroupBase::acquire</a></div><div class="ttdeci">void acquire() override</div><div class="ttdoc">Acquires backing memory for the whole group. </div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_base_8h_source.xhtml#l00125">MemoryGroupBase.h:125</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml_a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaad346bb4679d29be241279f15d7795c1c">arm_compute::ActivationLayerInfo::ActivationFunction::RELU</a></div><div class="ttdoc">Rectifier (  ) </div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_convolution_layer_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_convolution_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NEConvolutionLayer::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function. </div></div>
 <div class="ttc" id="utils_2_utils_8h_xhtml"><div class="ttname"><a href="utils_2_utils_8h.xhtml">Utils.h</a></div></div>
-<div class="ttc" id="_error_8h_xhtml_a4103adbb45806b2f2002d44b91d0d206"><div class="ttname"><a href="_error_8h.xhtml#a4103adbb45806b2f2002d44b91d0d206">ARM_COMPUTE_UNUSED</a></div><div class="ttdeci">#define ARM_COMPUTE_UNUSED(var)</div><div class="ttdoc">To avoid unused variables warnings. </div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00049">Error.h:49</a></div></div>
-<div class="ttc" id="namespacearm__compute_xhtml_ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda"><div class="ttname"><a href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">arm_compute::Format::F32</a></div><div class="ttdoc">1 channel, 1 F16 per channel </div></div>
+<div class="ttc" id="namespacearm__compute_xhtml_ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda"><div class="ttname"><a href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">arm_compute::Format::F32</a></div><div class="ttdoc">1 channel, 1 F32 per channel </div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_fully_connected_layer_xhtml_ae184041d029cd0dded821875db8a0929"><div class="ttname"><a href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ae184041d029cd0dded821875db8a0929">arm_compute::NEFullyConnectedLayer::configure</a></div><div class="ttdeci">void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose_weights=true, bool are_weights_reshaped=false)</div><div class="ttdoc">Set the input and output tensors. </div></div>
-<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml">arm_compute::ActivationLayerInfo</a></div><div class="ttdoc">Activation Layer Information class. </div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l00511">Types.h:511</a></div></div>
+<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml">arm_compute::ActivationLayerInfo</a></div><div class="ttdoc">Activation Layer Information class. </div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l00650">Types.h:650</a></div></div>
+<div class="ttc" id="core_2_error_8h_xhtml_a4103adbb45806b2f2002d44b91d0d206"><div class="ttname"><a href="core_2_error_8h.xhtml#a4103adbb45806b2f2002d44b91d0d206">ARM_COMPUTE_UNUSED</a></div><div class="ttdeci">#define ARM_COMPUTE_UNUSED(var)</div><div class="ttdoc">To avoid unused variables warnings. </div><div class="ttdef"><b>Definition:</b> <a href="core_2_error_8h_source.xhtml#l00147">Error.h:147</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_softmax_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_softmax_layer.xhtml">arm_compute::NESoftmaxLayer</a></div><div class="ttdoc">Basic function to compute a SoftmaxLayer. </div><div class="ttdef"><b>Definition:</b> <a href="_n_e_softmax_layer_8h_source.xhtml#l00047">NESoftmaxLayer.h:47</a></div></div>
 <div class="ttc" id="_n_e_functions_8h_xhtml"><div class="ttname"><a href="_n_e_functions_8h.xhtml">NEFunctions.h</a></div></div>
+<div class="ttc" id="namespacearm__compute_xhtml"><div class="ttname"><a href="namespacearm__compute.xhtml">arm_compute</a></div><div class="ttdoc">This file contains all available output stages for GEMMLowp on OpenCL. </div><div class="ttdef"><b>Definition:</b> <a href="01__library_8dox_source.xhtml#l00001">01_library.dox:1</a></div></div>
 <div class="ttc" id="namespacearm__compute_1_1utils_xhtml_a4c9395db2c8b8d0c336656a7b58fca3e"><div class="ttname"><a href="namespacearm__compute_1_1utils.xhtml#a4c9395db2c8b8d0c336656a7b58fca3e">arm_compute::utils::run_example</a></div><div class="ttdeci">int run_example(int argc, const char **argv, example &amp;func)</div><div class="ttdoc">Run an example and handle the potential exceptions it throws. </div><div class="ttdef"><b>Definition:</b> <a href="utils_2_utils_8cpp_source.xhtml#l00069">Utils.cpp:69</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_convolution_layer_xhtml_aee13eaa771696a8257ededf5bf921cbb"><div class="ttname"><a href="classarm__compute_1_1_n_e_convolution_layer.xhtml#aee13eaa771696a8257ededf5bf921cbb">arm_compute::NEConvolutionLayer::configure</a></div><div class="ttdeci">void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &amp;conv_info, const WeightsInfo &amp;weights_info=WeightsInfo())</div><div class="ttdoc">Set the input and output tensors. </div></div>
-<div class="ttc" id="classarm__compute_1_1_tensor_xhtml_a531ec877bfc923dea3ab6f1be5e6e1ac"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">arm_compute::Tensor::allocator</a></div><div class="ttdeci">TensorAllocator * allocator()</div><div class="ttdoc">Return a pointer to the tensor&#39;s allocator. </div></div>
+<div class="ttc" id="classarm__compute_1_1_tensor_xhtml_a531ec877bfc923dea3ab6f1be5e6e1ac"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml#a531ec877bfc923dea3ab6f1be5e6e1ac">arm_compute::Tensor::allocator</a></div><div class="ttdeci">TensorAllocator * allocator()</div><div class="ttdoc">Return a pointer to the tensor&amp;#39;s allocator. </div></div>
 <div class="ttc" id="classarm__compute_1_1_dimensions_xhtml_afb5cd37bb08f1029691590372e6330f0"><div class="ttname"><a href="classarm__compute_1_1_dimensions.xhtml#afb5cd37bb08f1029691590372e6330f0">arm_compute::Dimensions::x</a></div><div class="ttdeci">T x() const </div><div class="ttdoc">Alias to access the size of the first dimension. </div><div class="ttdef"><b>Definition:</b> <a href="_dimensions_8h_source.xhtml#l00081">Dimensions.h:81</a></div></div>
 <div class="ttc" id="neon__cnn_8cpp_xhtml_a217dbf8b442f20279ea00b898af96f52"><div class="ttname"><a href="neon__cnn_8cpp.xhtml#a217dbf8b442f20279ea00b898af96f52">main</a></div><div class="ttdeci">int main(int argc, const char **argv)</div><div class="ttdoc">Main program for cnn test. </div><div class="ttdef"><b>Definition:</b> <a href="neon__cnn_8cpp_source.xhtml#l00294">neon_cnn.cpp:294</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_pooling_layer_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_pooling_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NEPoolingLayer::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function. </div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_convolution_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_convolution_layer.xhtml">arm_compute::NEConvolutionLayer</a></div><div class="ttdoc">Basic function to simulate a convolution layer. </div><div class="ttdef"><b>Definition:</b> <a href="_n_e_convolution_layer_8h_source.xhtml#l00084">NEConvolutionLayer.h:84</a></div></div>
-<div class="ttc" id="classarm__compute_1_1_memory_group_base_xhtml_ac1f67376afb7822f262a0174ef4a3104"><div class="ttname"><a href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">arm_compute::MemoryGroupBase::manage</a></div><div class="ttdeci">void manage(TensorType *obj)</div><div class="ttdoc">Sets a object to be managed by the given memory group. </div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_base_8h_source.xhtml#l00096">MemoryGroupBase.h:96</a></div></div>
+<div class="ttc" id="classarm__compute_1_1_memory_group_base_xhtml_ac1f67376afb7822f262a0174ef4a3104"><div class="ttname"><a href="classarm__compute_1_1_memory_group_base.xhtml#ac1f67376afb7822f262a0174ef4a3104">arm_compute::MemoryGroupBase::manage</a></div><div class="ttdeci">void manage(TensorType *obj)</div><div class="ttdoc">Sets a object to be managed by the given memory group. </div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_base_8h_source.xhtml#l00097">MemoryGroupBase.h:97</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_tensor_allocator_xhtml_a6e509c2a177b0b29e9e2369535094dee"><div class="ttname"><a href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">arm_compute::TensorAllocator::allocate</a></div><div class="ttdeci">void allocate() override</div><div class="ttdoc">Allocate size specified by TensorInfo of CPU memory. </div></div>
 <div class="ttc" id="classarm__compute_1_1_tensor_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml">arm_compute::Tensor</a></div><div class="ttdoc">Basic implementation of the tensor interface. </div><div class="ttdef"><b>Definition:</b> <a href="runtime_2_tensor_8h_source.xhtml#l00037">Tensor.h:37</a></div></div>
-<div class="ttc" id="classarm__compute_1_1_pad_stride_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_pad_stride_info.xhtml">arm_compute::PadStrideInfo</a></div><div class="ttdoc">Padding and stride information class. </div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l00406">Types.h:406</a></div></div>
+<div class="ttc" id="classarm__compute_1_1_pad_stride_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_pad_stride_info.xhtml">arm_compute::PadStrideInfo</a></div><div class="ttdoc">Padding and stride information class. </div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l00460">Types.h:460</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_tensor_shape_xhtml_a0cb0e1f5da2e1cc2e0ea5690450f53e8"><div class="ttname"><a href="classarm__compute_1_1_tensor_shape.xhtml#a0cb0e1f5da2e1cc2e0ea5690450f53e8">arm_compute::TensorShape::set</a></div><div class="ttdeci">void set(size_t dimension, size_t value)</div><div class="ttdoc">Accessor to set the value of one of the dimensions. </div><div class="ttdef"><b>Definition:</b> <a href="_tensor_shape_8h_source.xhtml#l00074">TensorShape.h:74</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_activation_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_activation_layer.xhtml">arm_compute::NEActivationLayer</a></div><div class="ttdoc">Basic function to run NEActivationLayerKernel. </div><div class="ttdef"><b>Definition:</b> <a href="_n_e_activation_layer_8h_source.xhtml#l00039">NEActivationLayer.h:39</a></div></div>
 <div class="ttc" id="_blob_lifetime_manager_8h_xhtml"><div class="ttname"><a href="_blob_lifetime_manager_8h.xhtml">BlobLifetimeManager.h</a></div></div>
@@ -441,25 +156,25 @@
 <div class="ttc" id="classarm__compute_1_1_n_e_activation_layer_xhtml_adfb5ef37594fc9371c4a2b95e3d5e31b"><div class="ttname"><a href="classarm__compute_1_1_n_e_activation_layer.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">arm_compute::NEActivationLayer::configure</a></div><div class="ttdeci">void configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info)</div><div class="ttdoc">Set the input and output tensor. </div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_pooling_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_pooling_layer.xhtml">arm_compute::NEPoolingLayer</a></div><div class="ttdoc">Basic function to simulate a pooling layer with the specified pooling operation. </div><div class="ttdef"><b>Definition:</b> <a href="_n_e_pooling_layer_8h_source.xhtml#l00042">NEPoolingLayer.h:42</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_n_e_softmax_layer_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_softmax_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NESoftmaxLayer::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function. </div></div>
-<div class="ttc" id="classarm__compute_1_1_memory_group_base_xhtml_ad7ed3842c3160ae4b9f51acbf4564438"><div class="ttname"><a href="classarm__compute_1_1_memory_group_base.xhtml#ad7ed3842c3160ae4b9f51acbf4564438">arm_compute::MemoryGroupBase::release</a></div><div class="ttdeci">void release() override</div><div class="ttdoc">Releases backing memory of the whole group. </div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_base_8h_source.xhtml#l00135">MemoryGroupBase.h:135</a></div></div>
+<div class="ttc" id="classarm__compute_1_1_memory_group_base_xhtml_ad7ed3842c3160ae4b9f51acbf4564438"><div class="ttname"><a href="classarm__compute_1_1_memory_group_base.xhtml#ad7ed3842c3160ae4b9f51acbf4564438">arm_compute::MemoryGroupBase::release</a></div><div class="ttdeci">void release() override</div><div class="ttdoc">Releases backing memory of the whole group. </div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_base_8h_source.xhtml#l00136">MemoryGroupBase.h:136</a></div></div>
 <div class="ttc" id="neon__cnn_8cpp_xhtml_a7616847a3120a787be556c0bb30f43b4"><div class="ttname"><a href="neon__cnn_8cpp.xhtml#a7616847a3120a787be556c0bb30f43b4">main_cnn</a></div><div class="ttdeci">void main_cnn(int argc, const char **argv)</div><div class="ttdef"><b>Definition:</b> <a href="neon__cnn_8cpp_source.xhtml#l00036">neon_cnn.cpp:36</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_allocator_xhtml"><div class="ttname"><a href="classarm__compute_1_1_allocator.xhtml">arm_compute::Allocator</a></div><div class="ttdoc">Default malloc allocator implementation. </div><div class="ttdef"><b>Definition:</b> <a href="_allocator_8h_source.xhtml#l00034">Allocator.h:34</a></div></div>
 <div class="ttc" id="classarm__compute_1_1_tensor_allocator_xhtml_a3014ce2f4215e8a44331aa5daf3ba0d4"><div class="ttname"><a href="classarm__compute_1_1_tensor_allocator.xhtml#a3014ce2f4215e8a44331aa5daf3ba0d4">arm_compute::TensorAllocator::init</a></div><div class="ttdeci">void init(const TensorAllocator &amp;allocator, const Coordinates &amp;coords, TensorInfo sub_info)</div><div class="ttdoc">Shares the same backing memory with another tensor allocator, while the tensor info might be differen...</div></div>
-<div class="ttc" id="classarm__compute_1_1_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_info.xhtml">arm_compute::TensorInfo</a></div><div class="ttdoc">Store the tensor&#39;s metadata. </div><div class="ttdef"><b>Definition:</b> <a href="_tensor_info_8h_source.xhtml#l00042">TensorInfo.h:42</a></div></div>
-<div class="ttc" id="classarm__compute_1_1_n_e_softmax_layer_xhtml_a9daf8026e68559806afe7d0aa12693d6"><div class="ttname"><a href="classarm__compute_1_1_n_e_softmax_layer.xhtml#a9daf8026e68559806afe7d0aa12693d6">arm_compute::NESoftmaxLayer::configure</a></div><div class="ttdeci">void configure(ITensor *input, ITensor *output)</div><div class="ttdoc">Set the input and output tensors. </div></div>
+<div class="ttc" id="classarm__compute_1_1_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_info.xhtml">arm_compute::TensorInfo</a></div><div class="ttdoc">Store the tensor&amp;#39;s metadata. </div><div class="ttdef"><b>Definition:</b> <a href="_tensor_info_8h_source.xhtml#l00044">TensorInfo.h:44</a></div></div>
 <div class="ttc" id="namespacearm__compute_xhtml_adf2ced65e536375a1c96425d9fced858a26a4b44a837bf97b972628509912b4a5"><div class="ttname"><a href="namespacearm__compute.xhtml#adf2ced65e536375a1c96425d9fced858a26a4b44a837bf97b972628509912b4a5">arm_compute::NonLinearFilterFunction::MAX</a></div><div class="ttdoc">Non linear dilate. </div></div>
 <div class="ttc" id="arm__compute_2core_2_types_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_types_8h.xhtml">Types.h</a></div></div>
-<div class="ttc" id="classarm__compute_1_1_pooling_layer_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_pooling_layer_info.xhtml">arm_compute::PoolingLayerInfo</a></div><div class="ttdoc">Pooling Layer Information class. </div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l00445">Types.h:445</a></div></div>
+<div class="ttc" id="classarm__compute_1_1_n_e_softmax_layer_xhtml_a062268dfb7b8a63b7331d1cafcb7a081"><div class="ttname"><a href="classarm__compute_1_1_n_e_softmax_layer.xhtml#a062268dfb7b8a63b7331d1cafcb7a081">arm_compute::NESoftmaxLayer::configure</a></div><div class="ttdeci">void configure(ITensor *input, ITensor *output, float beta=1.0f)</div><div class="ttdoc">Set the input and output tensors. </div></div>
+<div class="ttc" id="classarm__compute_1_1_pooling_layer_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_pooling_layer_info.xhtml">arm_compute::PoolingLayerInfo</a></div><div class="ttdoc">Pooling Layer Information class. </div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l00553">Types.h:553</a></div></div>
 <div class="ttc" id="_allocator_8h_xhtml"><div class="ttname"><a href="_allocator_8h.xhtml">Allocator.h</a></div></div>
 </div><!-- fragment --></div><!-- contents -->
 </div><!-- doc-content -->
 <!-- start footer part -->
 <div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
   <ul>
-    <li class="navelem"><a class="el" href="dir_1253bad92dedae5edd993ead924afb7b.xhtml">examples</a></li><li class="navelem"><a class="el" href="neon__cnn_8cpp.xhtml">neon_cnn.cpp</a></li>
-    <li class="footer">Generated on Thu Oct 12 2017 14:26:33 for Compute Library by
+    <li class="navelem"><a class="el" href="dir_d28a4824dc47e487b107a5db32ef43c4.xhtml">examples</a></li><li class="navelem"><a class="el" href="neon__cnn_8cpp.xhtml">neon_cnn.cpp</a></li>
+    <li class="footer">Generated on Thu Dec 14 2017 23:48:31 for Compute Library by
     <a href="http://www.doxygen.org/index.html">
-    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.6 </li>
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.11 </li>
   </ul>
 </div>
 </body>