resample: add neon optimised code if supported on target

- Add NEON optimized inner_product_single for fixed point.
Semantics of inner_product_single have also been changed to
contain the final right shift and saturation. This change
affects fixed point calculations only.

- Force full sinc table (e.g. no polynomial interpolation)

Change-Id: Iba6a6f54fad26598de623851fc455c947681ca30
diff --git a/Android.mk b/Android.mk
index 333ef03..50985dc 100644
--- a/Android.mk
+++ b/Android.mk
@@ -48,7 +48,12 @@
 LOCAL_MODULE:= libspeexresampler
 LOCAL_MODULE_TAGS := optional
 
-LOCAL_CFLAGS+= -DEXPORT= -DFIXED_POINT -O3 -fstrict-aliasing -fprefetch-loop-arrays
+LOCAL_CFLAGS += -DEXPORT= -DFIXED_POINT -DRESAMPLE_FORCE_FULL_SINC_TABLE
+LOCAL_CFLAGS += -O3 -fstrict-aliasing -fprefetch-loop-arrays
+
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
+	LOCAL_CFLAGS += -D_USE_NEON
+endif
 
 LOCAL_C_INCLUDES += \
 	$(LOCAL_PATH)/include
diff --git a/NOTICE b/NOTICE
index 22798c6..4c4658d 100644
--- a/NOTICE
+++ b/NOTICE
@@ -10,6 +10,7 @@
 Copyright (C) 2008 Thorvald Natvig
 Copyright (c) 2003-2004, Mark Borgerding
 Copyright (c) 2005-2007, Jean-Marc Valin
+Copyright (C) 2011 Jyri Sarha, Texas Instruments
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions
 are met:
diff --git a/libspeex/arch.h b/libspeex/arch.h
index d38c36c..e911422 100644
--- a/libspeex/arch.h
+++ b/libspeex/arch.h
@@ -171,6 +171,7 @@
 #define VSHR32(a,shift) (a)
 #define SATURATE16(x,a) (x)
 #define SATURATE32(x,a) (x)
+#define SATURATE32PSHR(x,shift,a) (x)
 
 #define PSHR(a,shift)       (a)
 #define SHR(a,shift)       (a)
diff --git a/libspeex/fixed_generic.h b/libspeex/fixed_generic.h
index 3fb096e..0e012e9 100644
--- a/libspeex/fixed_generic.h
+++ b/libspeex/fixed_generic.h
@@ -52,6 +52,10 @@
 #define SATURATE16(x,a) (((x)>(a) ? (a) : (x)<-(a) ? -(a) : (x)))
 #define SATURATE32(x,a) (((x)>(a) ? (a) : (x)<-(a) ? -(a) : (x)))
 
+#define SATURATE32PSHR(x,shift,a) (((x)>=(SHL32(a,shift))) ? (a) : \
+                                   (x)<=-(SHL32(a,shift)) ? -(a) : \
+                                   (PSHR32(x, shift)))
+
 #define SHR(a,shift) ((a) >> (shift))
 #define SHL(a,shift) ((spx_word32_t)(a) << (shift))
 #define PSHR(a,shift) (SHR((a)+((EXTEND32(1)<<((shift))>>1)),shift))
diff --git a/libspeex/resample.c b/libspeex/resample.c
index bebd1a8..fa72d85 100644
--- a/libspeex/resample.c
+++ b/libspeex/resample.c
@@ -99,6 +99,10 @@
 #include "resample_sse.h"
 #endif
 
+#ifdef _USE_NEON
+#include "resample_neon.h"
+#endif
+
 /* Numer of elements to allocate on the stack */
 #ifdef VAR_ARRAYS
 #define FIXED_STACK_ALLOC 8192
@@ -354,11 +358,12 @@
         accum[3] += sinc[j+3]*iptr[j+3];
       }
       sum = accum[0] + accum[1] + accum[2] + accum[3];
+      sum = SATURATE32PSHR(sum, 15, 32767);
 #else
       sum = inner_product_single(sinc, iptr, N);
 #endif
 
-      out[out_stride * out_sample++] = PSHR32(sum, 15);
+      out[out_stride * out_sample++] = sum;
       last_sample += int_advance;
       samp_frac_num += frac_advance;
       if (samp_frac_num >= den_rate)
@@ -464,12 +469,13 @@
 
       cubic_coef(frac, interp);
       sum = MULT16_32_Q15(interp[0],accum[0]) + MULT16_32_Q15(interp[1],accum[1]) + MULT16_32_Q15(interp[2],accum[2]) + MULT16_32_Q15(interp[3],accum[3]);
+      sum = SATURATE32PSHR(sum, 15, 32767);
 #else
       cubic_coef(frac, interp);
       sum = interpolate_product_single(iptr, st->sinc_table + st->oversample + 4 - offset - 2, N, st->oversample, interp);
 #endif
       
-      out[out_stride * out_sample++] = PSHR32(sum,15);
+      out[out_stride * out_sample++] = sum;
       last_sample += int_advance;
       samp_frac_num += frac_advance;
       if (samp_frac_num >= den_rate)
@@ -579,7 +585,11 @@
    }
    
    /* Choose the resampling type that requires the least amount of memory */
+#ifdef RESAMPLE_FORCE_FULL_SINC_TABLE
+   if (1)
+#else
    if (st->den_rate <= st->oversample)
+#endif
    {
       spx_uint32_t i;
       if (!st->sinc_table)
diff --git a/libspeex/resample_neon.h b/libspeex/resample_neon.h
new file mode 100644
index 0000000..65741c1
--- /dev/null
+++ b/libspeex/resample_neon.h
@@ -0,0 +1,97 @@
+/* Copyright (C) 2007-2008 Jean-Marc Valin
+ * Copyright (C) 2008 Thorvald Natvig
+ * Copyright (C) 2011 Jyri Sarha, Texas Instruments
+ */
+/**
+   @file resample_neon.h
+   @brief Resampler functions (NEON version)
+*/
+/*
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   - Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+   - Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+   - Neither the name of the Xiph.org Foundation nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR
+   CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+   PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+   PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+   LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <arm_neon.h>
+
+/* NOTE: This code only works with following filter lengths:
+         8 or n*16 (where n = 1,2,3...)
+         The current resampler quality presets follow the above rule.
+*/
+
+#ifdef FIXED_POINT
+#define OVERRIDE_INNER_PRODUCT_SINGLE
+static inline int32_t inner_product_single(const int16_t *a, const int16_t *b, unsigned int len)
+{
+    int32_t ret;
+    if (len > 8) {
+        asm volatile ("      vld1.16 {d16, d17, d18, d19}, [%[a]]!\n"
+                      "      vld1.16 {d20, d21, d22, d23}, [%[b]]!\n"
+                      "      subs %[len], %[len], #16\n"
+                      "      vmull.s16 q0, d16, d20\n"
+                      "      vmlal.s16 q0, d17, d21\n"
+                      "      vmlal.s16 q0, d18, d22\n"
+                      "      vmlal.s16 q0, d19, d23\n"
+                      "      beq 2f\n"
+                      "1:"
+                      "      vld1.16 {d16, d17, d18, d19}, [%[a]]!\n"
+                      "      vld1.16 {d20, d21, d22, d23}, [%[b]]!\n"
+                      "      subs %[len], %[len], #16\n"
+                      "      vmlal.s16 q0, d16, d20\n"
+                      "      vmlal.s16 q0, d17, d21\n"
+                      "      vmlal.s16 q0, d18, d22\n"
+                      "      vmlal.s16 q0, d19, d23\n"
+                      "      bne 1b\n"
+                      "2:"
+                      "      vaddl.s32 q0, d0, d1\n"
+                      "      vadd.s64 d0, d0, d1\n"
+                      "      vqmovn.s64 d0, q0\n"
+                      "      vqrshrn.s32 d0, q0, #15\n"
+                      "      vmov.s16 %[ret],d0[0]\n"
+                      : [ret] "=&r" (ret), [a] "+r" (a), [b] "+r" (b),
+                        [len] "+r" (len)
+                      :
+                      : "cc", "q0",
+                        "d16", "d17", "d18", "d19",
+                        "d20", "d21", "d22", "d23");
+    }
+    else {
+        asm volatile ("vld1.16 {d4, d5}, [%[a]]\n"
+                      "vld1.16 {d6, d7}, [%[b]]\n"
+                      "vmull.s16 q0, d4, d6\n"
+                      "vmlal.s16 q0, d5, d7\n"
+                      "vaddl.s32 q0, d0, d1\n"
+                      "vadd.s64 d0, d0, d1\n"
+                      "vqmovn.s64 d0, q0\n"
+                      "vqrshrn.s32 d0, q0, #15\n"
+                      "vmov.s16 %[ret],d0[0]\n"
+                      : [ret] "=&r" (ret)
+                      : [a] "r" (a), [b] "r" (b)
+                      : "q0", "d4", "d5", "d6", "d7");
+    }
+    return ret;
+}
+#endif