arm_compute v18.08
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index f9dcfcb..2feab89 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -24,7 +24,6 @@
 #include "GEMM.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 
 namespace arm_compute
 {
@@ -38,7 +37,7 @@
 SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
 {
     // Create reference
-    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
+    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
 
     // Compute reference
     const int M = a.shape().y();
@@ -85,79 +84,8 @@
     return dst;
 }
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
-{
-    using namespace fixed_point_arithmetic;
-
-    // Create reference
-    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
-
-    // Compute reference
-    using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
-    const int M = dst.shape().y();
-    const int N = dst.shape().x();
-    const int K = a.shape().x();
-    const int D = a.shape().z(); // Number of matrices in a batch
-    const int W = a.shape()[3];  // Number of batched-gemm (Winograd case)
-
-    const int a_stride_z = K * M;
-    const int a_stride_w = K * M * D;
-
-    const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0;     // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
-    const int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
-
-    const int c_stride_z = N * M;
-    const int c_stride_w = N * M * D;
-
-    const int            fixed_point_position = a.fixed_point_position();
-    const fixed_point<T> alpha_q(alpha, fixed_point_position);
-    const fixed_point<T> beta_q(beta, fixed_point_position);
-
-    for(int w = 0; w < W; ++w)
-    {
-        for(int depth = 0; depth < D; ++depth)
-        {
-            const int base_addr_a = depth * a_stride_z + w * a_stride_w;
-            const int base_addr_b = depth * b_stride_z + w * b_stride_w;
-            const int base_addr_c = depth * c_stride_z + w * c_stride_w;
-
-            for(int row = 0; row < M; ++row)
-            {
-                for(int col = 0; col < N; ++col)
-                {
-                    fixed_point<promoted_type> acc_q(0, fixed_point_position);
-
-                    for(int k = 0; k < K; ++k)
-                    {
-                        const fixed_point<promoted_type> a0_q(a[base_addr_a + row * K + k], fixed_point_position, true);
-                        const fixed_point<promoted_type> b0_q(b[base_addr_b + k * N + col], fixed_point_position, true);
-
-                        acc_q = acc_q + (a0_q * b0_q);
-                    }
-
-                    // Finalize the result: alpha * A * B + beta * C
-                    const fixed_point<T> c0_q(c[base_addr_c + col + row * N], fixed_point_position, true);
-
-                    fixed_point<T> res_q(acc_q);
-                    res_q = alpha_q * res_q;
-                    res_q = res_q + (beta_q * c0_q);
-
-                    // Store the result
-                    dst[base_addr_c + col + row * N] = res_q.raw();
-                }
-            }
-        }
-    }
-
-    return dst;
-}
-
 template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
 template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
-template SimpleTensor<qint8_t> gemm(const SimpleTensor<qint8_t> &a, const SimpleTensor<qint8_t> &b, const SimpleTensor<qint8_t> &c, float alpha, float beta);
-template SimpleTensor<qint16_t> gemm(const SimpleTensor<qint16_t> &a, const SimpleTensor<qint16_t> &b, const SimpleTensor<qint16_t> &c, float alpha, float beta);
 } // namespace reference
 } // namespace validation
 } // namespace test