arm_compute v17.09

Change-Id: I4bf8f4e6e5f84ce0d5b6f5ba570d276879f42a81
diff --git a/documentation/tests.xhtml b/documentation/tests.xhtml
index e3e55c8..3d2b9c6 100644
--- a/documentation/tests.xhtml
+++ b/documentation/tests.xhtml
@@ -4,7 +4,7 @@
 <head>
 <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
 <meta http-equiv="X-UA-Compatible" content="IE=9"/>
-<meta name="generator" content="Doxygen 1.8.11"/>
+<meta name="generator" content="Doxygen 1.8.6"/>
 <meta name="robots" content="NOINDEX, NOFOLLOW" /> <!-- Prevent indexing by search engines -->
 <title>Compute Library: Validation and benchmarks tests</title>
 <link href="tabs.css" rel="stylesheet" type="text/css"/>
@@ -12,24 +12,22 @@
 <script type="text/javascript" src="dynsections.js"></script>
 <link href="navtree.css" rel="stylesheet" type="text/css"/>
 <script type="text/javascript" src="resize.js"></script>
-<script type="text/javascript" src="navtreedata.js"></script>
 <script type="text/javascript" src="navtree.js"></script>
 <script type="text/javascript">
   $(document).ready(initResizable);
   $(window).load(resizeHeight);
 </script>
 <link href="search/search.css" rel="stylesheet" type="text/css"/>
-<script type="text/javascript" src="search/searchdata.js"></script>
 <script type="text/javascript" src="search/search.js"></script>
 <script type="text/javascript">
-  $(document).ready(function() { init_search(); });
+  $(document).ready(function() { searchBox.OnSelectItem(0); });
 </script>
 <script type="text/x-mathjax-config">
   MathJax.Hub.Config({
     extensions: ["tex2jax.js"],
     jax: ["input/TeX","output/HTML-CSS"],
 });
-</script><script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
+</script><script src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
 <link href="doxygen.css" rel="stylesheet" type="text/css" />
 </head>
 <body>
@@ -40,7 +38,7 @@
  <tr style="height: 56px;">
   <td style="padding-left: 0.5em;">
    <div id="projectname">Compute Library
-   &#160;<span id="projectnumber">v17.06</span>
+   &#160;<span id="projectnumber">17.09</span>
    </div>
   </td>
  </tr>
@@ -48,7 +46,7 @@
 </table>
 </div>
 <!-- end header part -->
-<!-- Generated by Doxygen 1.8.11 -->
+<!-- Generated by Doxygen 1.8.6 -->
 <script type="text/javascript">
 var searchBox = new SearchBox("searchBox", "search",false,'Search');
 </script>
@@ -97,7 +95,7 @@
      onmouseover="return searchBox.OnSearchSelectShow()"
      onmouseout="return searchBox.OnSearchSelectHide()"
      onkeydown="return searchBox.OnSearchSelectKey(event)">
-</div>
+<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Data Structures</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Namespaces</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Files</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark">&#160;</span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(6)"><span class="SelectionMark">&#160;</span>Typedefs</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(7)"><span class="SelectionMark">&#160;</span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(8)"><span class="SelectionMark">&#160;</span>Enumerator</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(9)"><span class="SelectionMark">&#160;</span>Friends</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(10)"><span class="SelectionMark">&#160;</span>Macros</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(11)"><span class="SelectionMark">&#160;</span>Pages</a></div>
 
 <!-- iframe showing the search results (closed by default) -->
 <div id="MSearchResultsWindow">
@@ -112,78 +110,263 @@
 </div><!--header-->
 <div class="contents">
 <div class="toc"><h3>Table of Contents</h3>
-<ul><li class="level1"><a href="#building_test_dependencies">Building dependencies</a><ul><li class="level2"><a href="#building_boost">Building Boost</a></li>
-<li class="level2"><a href="#building_google_benchmark">Building Google Benchmark</a></li>
+<ul><li class="level1"><a href="#tests_overview">Overview</a><ul><li class="level2"><a href="#tests_overview_fixtures">Fixtures</a><ul><li class="level3"><a href="#tests_overview_fixtures_fixture">Fixture</a></li>
+<li class="level3"><a href="#tests_overview_fixtures_data_fixture">Data fixture</a></li>
 </ul>
 </li>
+<li class="level2"><a href="#tests_overview_test_cases">Test cases</a><ul><li class="level3"><a href="#tests_overview_test_cases_test_case">Test case</a></li>
+<li class="level3"><a href="#tests_overview_test_cases_fixture_fixture_test_case">Fixture test case</a></li>
+<li class="level3"><a href="#tests_overview_test_cases_fixture_register_fixture_test_case">Registering a fixture as test case</a></li>
+<li class="level3"><a href="#tests_overview_test_cases_data_test_case">Data test case</a></li>
+<li class="level3"><a href="#tests_overview_test_cases_fixture_data_test_case">Fixture data test case</a></li>
+<li class="level3"><a href="#tests_overview_test_cases_register_fixture_data_test_case">Registering a fixture as data test case</a></li>
+</ul>
+</li>
+</ul>
+</li>
+<li class="level1"><a href="#writing_tests">Writing validation tests</a></li>
 <li class="level1"><a href="#tests_running_tests">Running tests</a><ul><li class="level2"><a href="#tests_running_tests_benchmarking">Benchmarking</a><ul><li class="level3"><a href="#tests_running_tests_benchmarking_filter">Filter tests</a></li>
 <li class="level3"><a href="#tests_running_tests_benchmarking_runtime">Runtime</a></li>
-<li class="level3"><a href="#tests_running_tests_benchmarking_verbosity">Verbosity</a></li>
+<li class="level3"><a href="#tests_running_tests_benchmarking_output">Output</a></li>
 </ul>
 </li>
-<li class="level2"><a href="#tests_running_tests_validation">Validation</a><ul><li class="level3"><a href="#tests_running_tests_validation_filter">Filter tests</a></li>
-<li class="level3"><a href="#tests_running_tests_validation_verbosity">Verbosity</a></li>
-</ul>
-</li>
+<li class="level2"><a href="#tests_running_tests_validation">Validation</a></li>
 </ul>
 </li>
 </ul>
 </div>
-<div class="textblock"><h1><a class="anchor" id="building_test_dependencies"></a>
-Building dependencies</h1>
-<p>The tests currently make use of Boost (Test and Program options) for validation and Google Benchmark for performance runs. Below are instructions about how to build these 3rd party libraries.</p>
-<dl class="section note"><dt>Note</dt><dd>By default the build of the validation and benchmark tests is disabled, to enable it use <code>validation_tests=1</code> and <code>benchmark_tests=1</code></dd></dl>
-<h2><a class="anchor" id="building_boost"></a>
-Building Boost</h2>
-<p>First follow the instructions from the Boost library on how to setup the Boost build system (<a href="http://www.boost.org/doc/libs/1_64_0/more/getting_started/index.html">http://www.boost.org/doc/libs/1_64_0/more/getting_started/index.html</a>). Afterwards the required libraries can be build with: </p><pre class="fragment">./b2 --with-program_options --with-test link=static \
-define=BOOST_TEST_ALTERNATIVE_INIT_API
-</pre><p>Additionally, depending on your environment, it might be necessary to specify the <code>toolset=</code> option to choose the right compiler. Moreover, <code>address-model=32</code> can be used to force building for 32bit and <code>target-os=android</code> must be specified to build for Android.</p>
-<p>After executing the build command the libraries <code>libboost_program_options.a</code> and <code>libboost_unit_test_framework.a</code> can be found in <code>./stage/lib</code>.</p>
-<h2><a class="anchor" id="building_google_benchmark"></a>
-Building Google Benchmark</h2>
-<p>Instructions on how to build Google Benchmark using CMake can be found in their repository: <a href="https://github.com/google/benchmark">https://github.com/google/benchmark</a>. For example, building for Android 32bit can be achieved via </p><pre class="fragment">cmake -DCMAKE_BUILD_TYPE=Release \
--DCMAKE_CXX_COMPILER=arm-linux-androideabi-clang++ \
--DBENCHMARK_ENABLE_LTO=false -DBENCHMARK_ENABLE_TESTING=false ..
-</pre><p>The library required by the compute library is <code>libbenchmark.a</code>.</p>
+<div class="textblock"><h1><a class="anchor" id="tests_overview"></a>
+Overview</h1>
+<p>Benchmark and validation tests are based on the same framework to setup and run the tests. In addition to running simple, self-contained test functions the framework supports fixtures and data test cases. The former allows to share common setup routines between various backends thus reducing the amount of duplicated code. The latter can be used to parameterize tests or fixtures with different inputs, e.g. different tensor shapes. One limitation is that tests/fixtures cannot be parameterized based on the data type if static type information is needed within the test (e.g. to validate the results).</p>
+<h2><a class="anchor" id="tests_overview_fixtures"></a>
+Fixtures</h2>
+<p>Fixtures can be used to share common setup, teardown or even run tasks among multiple test cases. For that purpose a fixture can define a <code>setup</code>, <code>teardown</code> and <code>run</code> method. Additionally the constructor and destructor might also be customized.</p>
+<p>An instance of the fixture is created immediately before the actual test is executed. After construction the <a class="el" href="classarm__compute_1_1test_1_1framework_1_1_fixture.xhtml#a4fc01d736fe50cf5b977f755b675f11d">framework::Fixture::setup</a> method is called. Then the test function or the fixtures <code>run</code> method is invoked. After test execution the <a class="el" href="classarm__compute_1_1test_1_1framework_1_1_fixture.xhtml#a4adab6322a0276f34a7d656d49fc865c">framework::Fixture::teardown</a> method is called and lastly the fixture is destructed.</p>
+<h3><a class="anchor" id="tests_overview_fixtures_fixture"></a>
+Fixture</h3>
+<p>Fixtures for non-parameterized test are straightforward. The custom fixture class has to inherit from <a class="el" href="classarm__compute_1_1test_1_1framework_1_1_fixture.xhtml">framework::Fixture</a> and choose to implement any of the <code>setup</code>, <code>teardown</code> or <code>run</code> methods. None of the methods takes any arguments or returns anything. </p>
+<pre class="fragment">class CustomFixture : public framework::Fixture
+{
+    void setup()
+    {
+        _ptr = malloc(4000);
+    }
+
+    void run()
+    {
+        ARM_COMPUTE_ASSERT(_ptr != nullptr);
+    }
+
+    void teardown()
+    {
+        free(_ptr);
+    }
+
+    void *_ptr;
+};
+</pre><h3><a class="anchor" id="tests_overview_fixtures_data_fixture"></a>
+Data fixture</h3>
+<p>The advantage of a parameterized fixture is that arguments can be passed to the setup method at runtime. To make this possible the setup method has to be a template with a type parameter for every argument (though the template parameter doesn't have to be used). All other methods remain the same. </p>
+<pre class="fragment">class CustomFixture : public framework::Fixture
+{
+#ifdef ALTERNATIVE_DECLARATION
+    template &lt;typename ...&gt;
+    void setup(size_t size)
+    {
+        _ptr = malloc(size);
+    }
+#else
+    template &lt;typename T&gt;
+    void setup(T size)
+    {
+        _ptr = malloc(size);
+    }
+#endif
+
+    void run()
+    {
+        ARM_COMPUTE_ASSERT(_ptr != nullptr);
+    }
+
+    void teardown()
+    {
+        free(_ptr);
+    }
+
+    void *_ptr;
+};
+</pre><h2><a class="anchor" id="tests_overview_test_cases"></a>
+Test cases</h2>
+<p>All following commands can be optionally prefixed with <code>EXPECTED_FAILURE_</code> or <code>DISABLED_</code>.</p>
+<h3><a class="anchor" id="tests_overview_test_cases_test_case"></a>
+Test case</h3>
+<p>A simple test case function taking no inputs and having no (shared) state.</p>
+<ul>
+<li>First argument is the name of the test case (has to be unique within the enclosing test suite).</li>
+<li>Second argument is the dataset mode in which the test will be active.</li>
+</ul>
+<pre class="fragment">TEST_CASE(TestCaseName, DatasetMode::PRECOMMIT)
+{
+    ARM_COMPUTE_ASSERT_EQUAL(1 + 1, 2);
+}
+</pre><h3><a class="anchor" id="tests_overview_test_cases_fixture_fixture_test_case"></a>
+Fixture test case</h3>
+<p>A simple test case function taking no inputs that inherits from a fixture. The test case will have access to all public and protected members of the fixture. Only the setup and teardown methods of the fixture will be used. The body of this function will be used as test function.</p>
+<ul>
+<li>First argument is the name of the test case (has to be unique within the enclosing test suite).</li>
+<li>Second argument is the class name of the fixture.</li>
+<li>Third argument is the dataset mode in which the test will be active.</li>
+</ul>
+<pre class="fragment">class FixtureName : public framework::Fixture
+{
+    public:
+        void setup() override
+        {
+            _one = 1;
+        }
+
+    protected:
+        int _one;
+};
+
+FIXTURE_TEST_CASE(TestCaseName, FixtureName, DatasetMode::PRECOMMIT)
+{
+    ARM_COMPUTE_ASSERT_EQUAL(_one + 1, 2);
+}
+</pre><h3><a class="anchor" id="tests_overview_test_cases_fixture_register_fixture_test_case"></a>
+Registering a fixture as test case</h3>
+<p>Allows to use a fixture directly as test case. Instead of defining a new test function the run method of the fixture will be executed.</p>
+<ul>
+<li>First argument is the name of the test case (has to be unique within the enclosing test suite).</li>
+<li>Second argument is the class name of the fixture.</li>
+<li>Third argument is the dataset mode in which the test will be active.</li>
+</ul>
+<pre class="fragment">class FixtureName : public framework::Fixture
+{
+    public:
+        void setup() override
+        {
+            _one = 1;
+        }
+
+        void run() override
+        {
+            ARM_COMPUTE_ASSERT_EQUAL(_one + 1, 2);
+        }
+
+    protected:
+        int _one;
+};
+
+REGISTER_FIXTURE_TEST_CASE(TestCaseName, FixtureName, DatasetMode::PRECOMMIT);
+</pre><h3><a class="anchor" id="tests_overview_test_cases_data_test_case"></a>
+Data test case</h3>
+<p>A parameterized test case function that has no (shared) state. The dataset will be used to generate versions of the test case with different inputs.</p>
+<ul>
+<li>First argument is the name of the test case (has to be unique within the enclosing test suite).</li>
+<li>Second argument is the dataset mode in which the test will be active.</li>
+<li>Third argument is the dataset.</li>
+<li>Further arguments specify names of the arguments to the test function. The number must match the arity of the dataset.</li>
+</ul>
+<pre class="fragment">DATA_TEST_CASE(TestCaseName, DatasetMode::PRECOMMIT, framework::make("Numbers", {1, 2, 3}), num)
+{
+    ARM_COMPUTE_ASSERT(num &lt; 4);
+}
+</pre><h3><a class="anchor" id="tests_overview_test_cases_fixture_data_test_case"></a>
+Fixture data test case</h3>
+<p>A parameterized test case that inherits from a fixture. The test case will have access to all public and protected members of the fixture. Only the setup and teardown methods of the fixture will be used. The setup method of the fixture needs to be a template and has to accept inputs from the dataset as arguments. The body of this function will be used as test function. The dataset will be used to generate versions of the test case with different inputs.</p>
+<ul>
+<li>First argument is the name of the test case (has to be unique within the enclosing test suite).</li>
+<li>Second argument is the class name of the fixture.</li>
+<li>Third argument is the dataset mode in which the test will be active.</li>
+<li>Fourth argument is the dataset.</li>
+</ul>
+<pre class="fragment">class FixtureName : public framework::Fixture
+{
+    public:
+        template &lt;typename T&gt;
+        void setup(T num)
+        {
+            _num = num;
+        }
+
+    protected:
+        int _num;
+};
+
+FIXTURE_DATA_TEST_CASE(TestCaseName, FixtureName, DatasetMode::PRECOMMIT, framework::make("Numbers", {1, 2, 3}))
+{
+    ARM_COMPUTE_ASSERT(_num &lt; 4);
+}
+</pre><h3><a class="anchor" id="tests_overview_test_cases_register_fixture_data_test_case"></a>
+Registering a fixture as data test case</h3>
+<p>Allows to use a fixture directly as parameterized test case. Instead of defining a new test function the run method of the fixture will be executed. The setup method of the fixture needs to be a template and has to accept inputs from the dataset as arguments. The dataset will be used to generate versions of the test case with different inputs.</p>
+<ul>
+<li>First argument is the name of the test case (has to be unique within the enclosing test suite).</li>
+<li>Second argument is the class name of the fixture.</li>
+<li>Third argument is the dataset mode in which the test will be active.</li>
+<li>Fourth argument is the dataset.</li>
+</ul>
+<pre class="fragment">class FixtureName : public framework::Fixture
+{
+    public:
+        template &lt;typename T&gt;
+        void setup(T num)
+        {
+            _num = num;
+        }
+
+        void run() override
+        {
+            ARM_COMPUTE_ASSERT(_num &lt; 4);
+        }
+
+    protected:
+        int _num;
+};
+
+REGISTER_FIXTURE_DATA_TEST_CASE(TestCaseName, FixtureName, DatasetMode::PRECOMMIT, framework::make("Numbers", {1, 2, 3}));
+</pre><h1><a class="anchor" id="writing_tests"></a>
+Writing validation tests</h1>
+<p>Before starting a new test case have a look at the existing ones. They should provide a good overview how test cases are structured.</p>
+<ul>
+<li>The C++ reference needs to be added to <code>tests/validation/CPP/</code>. The reference function is typically a template parameterized by the underlying value type of the <code><a class="el" href="classarm__compute_1_1test_1_1_simple_tensor.xhtml" title="Simple tensor object that stores elements in a consecutive chunk of memory. ">SimpleTensor</a></code>. This makes it easy to specialise for different data types.</li>
+<li>If all backends have a common interface it makes sense to share the setup code. This can be done by adding a fixture in <code>tests/validation/fixtures/</code>. Inside of the <code>setup</code> method of a fixture the tensors can be created and initialised and the function can be configured and run. The actual test will only have to validate the results. To be shared among multiple backends the fixture class is usually a template that accepts the specific types (data, tensor class, function class etc.) as parameters.</li>
+<li>The actual test cases need to be added for each backend individually. Typically the will be multiple tests for different data types and for different execution modes, e.g. precommit and nightly.</li>
+</ul>
 <h1><a class="anchor" id="tests_running_tests"></a>
 Running tests</h1>
 <h2><a class="anchor" id="tests_running_tests_benchmarking"></a>
 Benchmarking</h2>
 <h3><a class="anchor" id="tests_running_tests_benchmarking_filter"></a>
 Filter tests</h3>
-<p>All tests can be run by invoking </p><pre class="fragment">./arm_compute_benchmark -- ./data
+<p>All tests can be run by invoking </p>
+<pre class="fragment">./arm_compute_benchmark ./data
 </pre><p>where <code>./data</code> contains the assets needed by the tests.</p>
-<p>If only a subset of the tests has to be executed the <code>--benchmark_filter</code> option takes a regular expression to select matching tests. </p><pre class="fragment">./arm_compute_benchmark --benchmark_filter=neon_bitwise_and ./data
-</pre><p>All available tests can be displayed with the <code>--benchmark_list_tests</code> switch. </p><pre class="fragment">./arm_compute_benchmark --benchmark_list_tests ./data
-</pre><h3><a class="anchor" id="tests_running_tests_benchmarking_runtime"></a>
+<p>If only a subset of the tests has to be executed the <code>--filter</code> option takes a regular expression to select matching tests. </p>
+<pre class="fragment">./arm_compute_benchmark --filter='NEON/.*AlexNet' ./data
+</pre><p>Additionally each test has a test id which can be used as a filter, too. However, the test id is not guaranteed to be stable when new tests are added. Only for a specific build the same the test will keep its id. </p>
+<pre class="fragment">./arm_compute_benchmark --filter-id=10 ./data
+</pre><p>All available tests can be displayed with the <code>--list-tests</code> switch. </p>
+<pre class="fragment">./arm_compute_benchmark --list-tests
+</pre><p>More options can be found in the <code>--help</code> message.</p>
+<h3><a class="anchor" id="tests_running_tests_benchmarking_runtime"></a>
 Runtime</h3>
-<p>By default every test is run multiple <em>iterations</em> until a minimum time is reached. The minimum time (in seconds) can be controlled with the <code>--benchmark_min_time</code> flag. However, each test might have a hard coded value for the number of iterations or minimum execution time. In that case the command line argument is ignored for those specific tests. Additionally it is possible to specify multiple <em>repetitions</em> (<code>--benchmark_repetitions</code>) which will run each test multiple times (including the iterations). The average and standard deviation for all repetitions is automatically computed and reported.</p>
-<h3><a class="anchor" id="tests_running_tests_benchmarking_verbosity"></a>
-Verbosity</h3>
-<p>The verbosity of the test output can be controlled via the <code>--v</code> flag. Though it should hardly ever be necessary.</p>
+<p>By default every test is run once on a single thread. The number of iterations can be controlled via the <code>--iterations</code> option and the number of threads via <code>--threads</code>.</p>
+<h3><a class="anchor" id="tests_running_tests_benchmarking_output"></a>
+Output</h3>
+<p>By default the benchmarking results are printed in a human readable format on the command line. The colored output can be disabled via <code>--no-color-output</code>. As an alternative output format JSON is supported and can be selected via <code>--log-format=json</code>. To write the output to a file instead of stdout the <code>--log-file</code> option can be used.</p>
 <h2><a class="anchor" id="tests_running_tests_validation"></a>
 Validation</h2>
-<h3><a class="anchor" id="tests_running_tests_validation_filter"></a>
-Filter tests</h3>
-<p>All tests can be run by invoking </p><pre class="fragment">./arm_compute_validation -- ./data
-</pre><p>where <code>./data</code> contains the assets needed by the tests.</p>
-<p>As running all tests can take a lot of time the suite is split into "precommit" and "nightly" tests. The precommit tests will be fast to execute but still cover the most important features. In contrast the nightly tests offer more extensive coverage but take longer. The different subsets can be selected from the command line as follows: </p><pre class="fragment">./arm_compute_validation -t @precommit -- ./data
-./arm_compute_validation -t @nightly -- ./data
-</pre><p>Additionally it is possible to select specific suites or tests: </p><pre class="fragment">./arm_compute_validation -t CL -- ./data
-./arm_compute_validation -t NEON/BitwiseAnd/RunSmall/_0 -- ./data
-</pre><p>All available tests can be displayed with the <code>--list_content</code> switch. </p><pre class="fragment">./arm_compute_validation --list_content -- ./data
-</pre><p>For a complete list of possible selectors please see: <a href="http://www.boost.org/doc/libs/1_64_0/libs/test/doc/html/boost_test/runtime_config/test_unit_filtering.html">http://www.boost.org/doc/libs/1_64_0/libs/test/doc/html/boost_test/runtime_config/test_unit_filtering.html</a></p>
-<h3><a class="anchor" id="tests_running_tests_validation_verbosity"></a>
-Verbosity</h3>
-<p>There are two separate flags to control the verbosity of the test output. <code>--report_level</code> controls the verbosity of the summary produced after all tests have been executed. <code>--log_level</code> controls the verbosity of the information generated during the execution of tests. All available settings can be found in the Boost documentation for <a href="http://www.boost.org/doc/libs/1_64_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/report_level.html">&ndash;report_level</a> and <a href="http://www.boost.org/doc/libs/1_64_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/log_level.html">&ndash;log_level</a>, respectively. </p>
+<dl class="section note"><dt>Note</dt><dd>The new validation tests have the same interface as the benchmarking tests. </dd></dl>
 </div></div><!-- contents -->
 </div><!-- doc-content -->
 <!-- start footer part -->
 <div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
   <ul>
-    <li class="footer">Generated on Fri Jun 23 2017 15:44:34 for Compute Library by
+    <li class="footer">Generated on Thu Sep 28 2017 14:37:54 for Compute Library by
     <a href="http://www.doxygen.org/index.html">
-    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.11 </li>
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.6 </li>
   </ul>
 </div>
 </body>