blob: f4ecd3ddf4cf3a3380442ea7615cc4b3d640b117 [file] [log] [blame]
Gloria Wang37fe1582010-03-12 14:53:20 -08001/************************************************************************
2 * Copyright (C) 2010, The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of the Android Open Source Project nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 ************************************************************************
Gloria Wang79130732010-02-08 14:41:04 -080031
32 function: arm7 and later wide math functions
33
Gloria Wang37fe1582010-03-12 14:53:20 -080034 ************************************************************************/
Gloria Wang79130732010-02-08 14:41:04 -080035
36#ifdef _ARM_ASSEM_
37
38#if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
39#define _V_WIDE_MATH
40
41static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
42 int lo,hi;
43 asm volatile("smull\t%0, %1, %2, %3"
44 : "=&r"(lo),"=&r"(hi)
45 : "%r"(x),"r"(y)
46 : "cc");
47 return(hi);
48}
49
50static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
51 return MULT32(x,y)<<1;
52}
53
54static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
55 int lo,hi;
56 asm volatile("smull %0, %1, %2, %3\n\t"
57 "movs %0, %0, lsr #15\n\t"
58 "adc %1, %0, %1, lsl #17\n\t"
59 : "=&r"(lo),"=&r"(hi)
60 : "%r"(x),"r"(y)
61 : "cc");
62 return(hi);
63}
64
65#define MB() asm volatile ("" : : : "memory")
66
67static inline void XPROD32(ogg_int32_t a, ogg_int32_t b,
68 ogg_int32_t t, ogg_int32_t v,
69 ogg_int32_t *x, ogg_int32_t *y)
70{
71 int x1, y1, l;
72 asm( "smull %0, %1, %4, %6\n\t"
73 "smlal %0, %1, %5, %7\n\t"
74 "rsb %3, %4, #0\n\t"
75 "smull %0, %2, %5, %6\n\t"
76 "smlal %0, %2, %3, %7"
77 : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
78 : "3" (a), "r" (b), "r" (t), "r" (v)
79 : "cc" );
80 *x = x1;
81 MB();
82 *y = y1;
83}
84
85/* x = (a*t + b*v)>>31, y = (b*t - a*v)>>31 */
86static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
87 ogg_int32_t t, ogg_int32_t v,
88 ogg_int32_t *x, ogg_int32_t *y)
89{
90 int x1, y1, l;
91 asm( "smull %0, %1, %4, %6\n\t"
92 "smlal %0, %1, %5, %7\n\t"
93 "rsb %3, %4, #0\n\t"
94 "smull %0, %2, %5, %6\n\t"
95 "smlal %0, %2, %3, %7"
96 : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
97 : "3" (a), "r" (b), "r" (t), "r" (v)
98 : "cc" );
99 *x = x1 << 1;
100 MB();
101 *y = y1 << 1;
102}
103
104/* x = (a*t - b*v)>>31, y = (b*t + a*v)>>31 */
105static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
106 ogg_int32_t t, ogg_int32_t v,
107 ogg_int32_t *x, ogg_int32_t *y)
108{
109 int x1, y1, l;
110 asm( "rsb %2, %4, #0\n\t"
111 "smull %0, %1, %3, %5\n\t"
112 "smlal %0, %1, %2, %6\n\t"
113 "smull %0, %2, %4, %5\n\t"
114 "smlal %0, %2, %3, %6"
115 : "=&r" (l), "=&r" (x1), "=&r" (y1)
116 : "r" (a), "r" (b), "r" (t), "r" (v)
117 : "cc" );
118 *x = x1 << 1;
119 MB();
120 *y = y1 << 1;
121}
122
123#endif
124
125#ifndef _V_CLIP_MATH
126#define _V_CLIP_MATH
127
128static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
129 int tmp;
130 asm volatile("subs %1, %0, #32768\n\t"
131 "movpl %0, #0x7f00\n\t"
132 "orrpl %0, %0, #0xff\n"
133 "adds %1, %0, #32768\n\t"
134 "movmi %0, #0x8000"
135 : "+r"(x),"=r"(tmp)
136 :
137 : "cc");
138 return(x);
139}
140
141#endif
142
143#ifndef _V_LSP_MATH_ASM
144#define _V_LSP_MATH_ASM
145
146static inline void lsp_loop_asm(ogg_uint32_t *qip,ogg_uint32_t *pip,
147 ogg_int32_t *qexpp,
148 ogg_int32_t *ilsp,ogg_int32_t wi,
149 ogg_int32_t m){
150
151 ogg_uint32_t qi=*qip,pi=*pip;
152 ogg_int32_t qexp=*qexpp;
153
154 asm("mov r0,%3;"
155 "mov r1,%5,asr#1;"
156 "add r0,r0,r1,lsl#3;"
157 "1:"
158
159 "ldmdb r0!,{r1,r3};"
160 "subs r1,r1,%4;" //ilsp[j]-wi
161 "rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
162 "umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
163
164 "subs r1,r3,%4;" //ilsp[j+1]-wi
165 "rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
166 "umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
167
168 "cmn r2,r3;" // shift down 16?
169 "beq 0f;"
170 "add %2,%2,#16;"
171 "mov %0,%0,lsr #16;"
172 "orr %0,%0,r2,lsl #16;"
173 "mov %1,%1,lsr #16;"
174 "orr %1,%1,r3,lsl #16;"
175 "0:"
176 "cmp r0,%3;\n"
177 "bhi 1b;\n"
178
179 // odd filter assymetry
180 "ands r0,%5,#1;\n"
181 "beq 2f;\n"
182 "add r0,%3,%5,lsl#2;\n"
183
184 "ldr r1,[r0,#-4];\n"
185 "mov r0,#0x4000;\n"
186
187 "subs r1,r1,%4;\n" //ilsp[j]-wi
188 "rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
189 "umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
190 "umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
191
192 "cmn r2,r3;\n" // shift down 16?
193 "beq 2f;\n"
194 "add %2,%2,#16;\n"
195 "mov %0,%0,lsr #16;\n"
196 "orr %0,%0,r2,lsl #16;\n"
197 "mov %1,%1,lsr #16;\n"
198 "orr %1,%1,r3,lsl #16;\n"
199
200 //qi=(pi>>shift)*labs(ilsp[j]-wi);
201 //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
202 //qexp+=shift;
203
204 //}
205
206 /* normalize to max 16 sig figs */
207 "2:"
208 "mov r2,#0;"
209 "orr r1,%0,%1;"
210 "tst r1,#0xff000000;"
211 "addne r2,r2,#8;"
212 "movne r1,r1,lsr #8;"
213 "tst r1,#0x00f00000;"
214 "addne r2,r2,#4;"
215 "movne r1,r1,lsr #4;"
216 "tst r1,#0x000c0000;"
217 "addne r2,r2,#2;"
218 "movne r1,r1,lsr #2;"
219 "tst r1,#0x00020000;"
220 "addne r2,r2,#1;"
221 "movne r1,r1,lsr #1;"
222 "tst r1,#0x00010000;"
223 "addne r2,r2,#1;"
224 "mov %0,%0,lsr r2;"
225 "mov %1,%1,lsr r2;"
226 "add %2,%2,r2;"
227
228 : "+r"(qi),"+r"(pi),"+r"(qexp)
229 : "r"(ilsp),"r"(wi),"r"(m)
230 : "r0","r1","r2","r3","cc");
231
232 *qip=qi;
233 *pip=pi;
234 *qexpp=qexp;
235}
236
237static inline void lsp_norm_asm(ogg_uint32_t *qip,ogg_int32_t *qexpp){
238
239 ogg_uint32_t qi=*qip;
240 ogg_int32_t qexp=*qexpp;
241
242 asm("tst %0,#0x0000ff00;"
243 "moveq %0,%0,lsl #8;"
244 "subeq %1,%1,#8;"
245 "tst %0,#0x0000f000;"
246 "moveq %0,%0,lsl #4;"
247 "subeq %1,%1,#4;"
248 "tst %0,#0x0000c000;"
249 "moveq %0,%0,lsl #2;"
250 "subeq %1,%1,#2;"
251 "tst %0,#0x00008000;"
252 "moveq %0,%0,lsl #1;"
253 "subeq %1,%1,#1;"
254 : "+r"(qi),"+r"(qexp)
255 :
256 : "cc");
257 *qip=qi;
258 *qexpp=qexp;
259}
260
261#endif
262#endif
263