blob: d1bd5e4869586bb69a230cb50774dcd690c9d9a8 [file] [log] [blame]
Gloria Wang0f6f2522010-02-04 13:58:20 -08001@ Tremolo library
2@ Copyright (C) 2009 Robin Watts for Pinknoise Productions Ltd
3
4 .text
5
6 @ low accuracy version
7
8 .global mdct_backwardARM
9 .global mdct_shift_right
10 .global mdct_unroll_prelap
11 .global mdct_unroll_part2
12 .global mdct_unroll_part3
13 .global mdct_unroll_postlap
14
15 .extern sincos_lookup0
16 .extern sincos_lookup1
17
18mdct_unroll_prelap:
19 @ r0 = out
20 @ r1 = post
21 @ r2 = r
22 @ r3 = step
23 STMFD r13!,{r4-r7,r14}
24 MVN r4, #0x8000
25 MOV r3, r3, LSL #1
26 SUB r1, r2, r1 @ r1 = r - post
27 SUBS r1, r1, #16 @ r1 = r - post - 16
28 BLT unroll_over
29unroll_loop:
30 LDMDB r2!,{r5,r6,r7,r12}
31
32 MOV r5, r5, ASR #9 @ r5 = (*--r)>>9
33 MOV r6, r6, ASR #9 @ r6 = (*--r)>>9
34 MOV r7, r7, ASR #9 @ r7 = (*--r)>>9
35 MOV r12,r12,ASR #9 @ r12= (*--r)>>9
36
37 MOV r14,r12,ASR #15
38 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
39 EORNE r12,r4, r14,ASR #31
40 STRH r12,[r0], r3
41
42 MOV r14,r7, ASR #15
43 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
44 EORNE r7, r4, r14,ASR #31
45 STRH r7, [r0], r3
46
47 MOV r14,r6, ASR #15
48 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
49 EORNE r6, r4, r14,ASR #31
50 STRH r6, [r0], r3
51
52 MOV r14,r5, ASR #15
53 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
54 EORNE r5, r4, r14,ASR #31
55 STRH r5, [r0], r3
56
57 SUBS r1, r1, #16
58 BGE unroll_loop
59
60unroll_over:
61 ADDS r1, r1, #16
62 BLE unroll_end
63unroll_loop2:
64 LDR r5,[r2,#-4]!
65 @ stall
66 @ stall (Xscale)
67 MOV r5, r5, ASR #9 @ r5 = (*--r)>>9
68 MOV r14,r5, ASR #15
69 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
70 EORNE r5, r4, r14,ASR #31
71 STRH r5, [r0], r3
72 SUBS r1, r1, #4
73 BGT unroll_loop2
74unroll_end:
75 LDMFD r13!,{r4-r7,PC}
76
77mdct_unroll_postlap:
78 @ r0 = out
79 @ r1 = post
80 @ r2 = l
81 @ r3 = step
82 STMFD r13!,{r4-r7,r14}
83 MVN r4, #0x8000
84 MOV r3, r3, LSL #1
85 SUB r1, r1, r2 @ r1 = post - l
86 MOV r1, r1, ASR #1 @ r1 = (post - l)>>1
87 SUBS r1, r1, #16 @ r1 = ((post - l)>>1) - 4
88 BLT unroll_over3
89unroll_loop3:
90 LDR r12,[r2],#8
91 LDR r7, [r2],#8
92 LDR r6, [r2],#8
93 LDR r5, [r2],#8
94
95 RSB r12,r12,#0
96 RSB r5, r5, #0
97 RSB r6, r6, #0
98 RSB r7, r7, #0
99
100 MOV r12, r12,ASR #9 @ r12= (-*l)>>9
101 MOV r5, r5, ASR #9 @ r5 = (-*l)>>9
102 MOV r6, r6, ASR #9 @ r6 = (-*l)>>9
103 MOV r7, r7, ASR #9 @ r7 = (-*l)>>9
104
105 MOV r14,r12,ASR #15
106 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
107 EORNE r12,r4, r14,ASR #31
108 STRH r12,[r0], r3
109
110 MOV r14,r7, ASR #15
111 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
112 EORNE r7, r4, r14,ASR #31
113 STRH r7, [r0], r3
114
115 MOV r14,r6, ASR #15
116 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
117 EORNE r6, r4, r14,ASR #31
118 STRH r6, [r0], r3
119
120 MOV r14,r5, ASR #15
121 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
122 EORNE r5, r4, r14,ASR #31
123 STRH r5, [r0], r3
124
125 SUBS r1, r1, #16
126 BGE unroll_loop3
127
128unroll_over3:
129 ADDS r1, r1, #16
130 BLE unroll_over4
131unroll_loop4:
132 LDR r5,[r2], #8
133 @ stall
134 @ stall (Xscale)
135 RSB r5, r5, #0
136 MOV r5, r5, ASR #9 @ r5 = (-*l)>>9
137 MOV r14,r5, ASR #15
138 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
139 EORNE r5, r4, r14,ASR #31
140 STRH r5, [r0], r3
141 SUBS r1, r1, #4
142 BGT unroll_loop4
143unroll_over4:
144 LDMFD r13!,{r4-r7,PC}
145
146mdct_unroll_part2:
147 @ r0 = out
148 @ r1 = post
149 @ r2 = l
150 @ r3 = r
151 @ <> = step
152 @ <> = wL
153 @ <> = wR
154 MOV r12,r13
155 STMFD r13!,{r4,r6-r11,r14}
156 LDMFD r12,{r8,r9,r10} @ r8 = step
157 @ r9 = wL
158 @ r10= wR
159 MVN r4, #0x8000
160 MOV r8, r8, LSL #1
161 SUBS r1, r3, r1 @ r1 = (r - post)
162 BLE unroll_over5
163unroll_loop5:
164 LDR r12,[r2, #-8]! @ r12= *l (but l -= 2 first)
165 LDR r7, [r3, #-4]! @ r7 = *--r
166 LDRB r6, [r10,#-1]! @ r6 = *--wR
167 LDRB r11,[r9],#1 @ r11= *wL++
168
169 MOV r12, r12, ASR #8
170 @ Can save a cycle here, at the cost of 1bit errors in rounding
171 MUL r11,r12,r11 @ r11 = *l * *wL++
172 MOV r7, r7, ASR #8
173 MLA r6, r7, r6, r11 @ r6 = *--r * *--wR
174 MOV r6, r6, ASR #9
175 MOV r14,r6, ASR #15
176 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
177 EORNE r6, r4, r14,ASR #31
178 STRH r6, [r0], r8
179
180 SUBS r1, r1, #4
181 BGT unroll_loop5
182
183unroll_over5:
184 LDMFD r13!,{r4,r6-r11,PC}
185
186mdct_unroll_part3:
187 @ r0 = out
188 @ r1 = post
189 @ r2 = l
190 @ r3 = r
191 @ <> = step
192 @ <> = wL
193 @ <> = wR
194 MOV r12,r13
195 STMFD r13!,{r4,r6-r11,r14}
196 LDMFD r12,{r8,r9,r10} @ r8 = step
197 @ r9 = wL
198 @ r10= wR
199 MVN r4, #0x8000
200 MOV r8, r8, LSL #1
201 SUBS r1, r1, r3 @ r1 = (post - r)
202 BLE unroll_over6
203unroll_loop6:
204 LDR r12,[r2],#8 @ r12= *l (but l += 2 first)
205 LDR r7, [r3],#4 @ r7 = *r++
206 LDRB r11,[r9],#1 @ r11= *wL++
207 LDRB r6, [r10,#-1]! @ r6 = *--wR
208
209 @ Can save a cycle here, at the cost of 1bit errors in rounding
210 MOV r12,r12,ASR #8
211 MUL r11,r12,r11 @ (r14,r11) = *l * *wL++
212 MOV r7, r7, ASR #8
213 MUL r6, r7, r6 @ (r14,r6) = *--r * *--wR
214 SUB r6, r6, r11
215 MOV r6, r6, ASR #9
216 MOV r14,r6, ASR #15
217 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range
218 EORNE r6, r4, r14,ASR #31
219 STRH r6, [r0], r8
220
221 SUBS r1, r1, #4
222 BGT unroll_loop6
223
224unroll_over6:
225 LDMFD r13!,{r4,r6-r11,PC}
226
227mdct_shift_right:
228 @ r0 = n
229 @ r1 = in
230 @ r2 = right
231 STMFD r13!,{r4-r11,r14}
232
233 MOV r0, r0, LSR #2 @ n >>= 2
234 ADD r1, r1, #4
235
236 SUBS r0, r0, #8
237 BLT sr_less_than_8
238sr_loop:
239 LDR r3, [r1], #8
240 LDR r4, [r1], #8
241 LDR r5, [r1], #8
242 LDR r6, [r1], #8
243 LDR r7, [r1], #8
244 LDR r8, [r1], #8
245 LDR r12,[r1], #8
246 LDR r14,[r1], #8
247 SUBS r0, r0, #8
248 STMIA r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
249 BGE sr_loop
250sr_less_than_8:
251 ADDS r0, r0, #8
252 BEQ sr_end
253sr_loop2:
254 LDR r3, [r1], #8
255 SUBS r0, r0, #1
256 STR r3, [r2], #4
257 BGT sr_loop2
258sr_end:
259 LDMFD r13!,{r4-r11,PC}
260
261mdct_backwardARM:
262 @ r0 = n
263 @ r1 = in
264 STMFD r13!,{r4-r11,r14}
265
266 MOV r2, #1<<4 @ r2 = 1<<shift
267 MOV r3, #13-4 @ r3 = 13-shift
268find_shift_loop:
269 TST r0, r2 @ if (n & (1<<shift)) == 0
270 MOV r2, r2, LSL #1
271 SUBEQ r3, r3, #1 @ shift--
272 BEQ find_shift_loop
273 MOV r2, #2
274 MOV r2, r2, LSL r3 @ r2 = step = 2<<shift
275
276 @ presymmetry
277 @ r0 = n (a multiple of 4)
278 @ r1 = in
279 @ r2 = step
280 @ r3 = shift
281
282 ADD r4, r1, r0, LSL #1 @ r4 = aX = in+(n>>1)
283 ADD r14,r1, r0 @ r14= in+(n>>2)
284 SUB r4, r4, #3*4 @ r4 = aX = in+n2-3
285 LDR r5, =sincos_lookup0 @ r5 = T=sincos_lookup0
286
287presymmetry_loop1:
288 LDR r7, [r4,#8] @ r6 = s2 = aX[2]
289 LDRB r11,[r5,#1] @ r11= T[1]
290 LDR r6, [r4],#-16 @ r6 = s0 = aX[0]
291 LDRB r10,[r5],r2 @ r10= T[0] T += step
292 MOV r6, r6, ASR #8
293 MOV r7, r7, ASR #8
294
295 @ XPROD31(s0, s2, T[0], T[1], 0xaX[0], &ax[2])
296 MUL r9, r6, r10 @ r9 = s0*T[0]
297 RSB r6, r6, #0
298 MLA r9, r7, r11,r9 @ r9 += s2*T[1]
299 CMP r4, r14
300 MUL r12,r7, r10 @ r12 = s2*T[0]
301 STR r9, [r4,#16] @ aX[0] = r9
302 MLA r12,r6, r11,r12 @ r12 -= s0*T[1]
303 STR r12,[r4,#8+16] @ aX[2] = r12
304
305 BGE presymmetry_loop1 @ while (aX >= in+n4)
306
307presymmetry_loop2:
308 LDR r6, [r4],#-16 @ r6 = s0 = aX[0]
309 LDRB r10,[r5,#1] @ r10= T[1]
310 LDR r7, [r4,#16+8] @ r6 = s2 = aX[2]
311 LDRB r11,[r5],-r2 @ r11= T[0] T -= step
312 MOV r6, r6, ASR #8
313 MOV r7, r7, ASR #8
314
315 @ XPROD31(s0, s2, T[1], T[0], 0xaX[0], &ax[2])
316 MUL r9, r6, r10 @ r9 = s0*T[1]
317 RSB r6, r6, #0
318 MLA r9, r7, r11,r9 @ r9 += s2*T[0]
319 CMP r4, r1
320 MUL r12,r7, r10 @ r12 = s2*T[1]
321 STR r9, [r4,#16] @ aX[0] = r9
322 MLA r12,r6, r11,r12 @ r12 -= s0*T[0]
323 STR r12,[r4,#8+16] @ aX[2] = r12
324
325 BGE presymmetry_loop2 @ while (aX >= in)
326
327 @ r0 = n
328 @ r1 = in
329 @ r2 = step
330 @ r3 = shift
331 STMFD r13!,{r3}
332 LDR r5, =sincos_lookup0 @ r5 = T=sincos_lookup0
333 ADD r4, r1, r0, LSL #1 @ r4 = aX = in+(n>>1)
334 SUB r4, r4, #4*4 @ r4 = aX = in+(n>>1)-4
335 LDRB r11,[r5,#1] @ r11= T[1]
336 LDRB r10,[r5],r2 @ r10= T[0] T += step
337presymmetry_loop3:
338 LDR r8, [r1],#16 @ r8 = ro0 = bX[0]
339 LDR r9, [r1,#8-16] @ r9 = ro2 = bX[2]
340 LDR r6, [r4],#-16 @ r6 = ri0 = aX[0]
341 LDR r7, [r4,#8+16] @ r7 = ri2 = aX[2]
342 MOV r8, r8, ASR #8
343 MOV r9, r9, ASR #8
344 MOV r6, r6, ASR #8
345
346 @ XNPROD31( ro2, ro0, T[1], T[0], 0xaX[0], &aX[2] )
347 @ aX[0] = (ro2*T[1] - ro0*T[0])>>31 aX[2] = (ro0*T[1] + ro2*T[0])>>31
348 MUL r12,r8, r11 @ r12 = ro0*T[1]
349 MOV r7, r7, ASR #8
350 MLA r12,r9, r10,r12 @ r12 += ro2*T[0]
351 RSB r8, r8, #0 @ r8 = -ro0
352 MUL r3, r9, r11 @ r3 = ro2*T[1]
353 LDRB r11,[r5,#1] @ r11= T[1]
354 MLA r3, r8, r10,r3 @ r3 -= ro0*T[0]
355 LDRB r10,[r5],r2 @ r10= T[0] T += step
356 STR r12,[r4,#16+8]
357 STR r3, [r4,#16]
358
359 @ XNPROD31( ri2, ri0, T[0], T[1], 0xbX[0], &bX[2] )
360 @ bX[0] = (ri2*T[0] - ri0*T[1])>>31 bX[2] = (ri0*T[0] + ri2*T[1])>>31
361 MUL r12,r6, r10 @ r12 = ri0*T[0]
362 RSB r6, r6, #0 @ r6 = -ri0
363 MLA r12,r7, r11,r12 @ r12 += ri2*T[1]
364 CMP r4, r1
365 MUL r3, r7, r10 @ r3 = ri2*T[0]
366 STR r12,[r1,#8-16]
367 MLA r3, r6, r11,r3 @ r3 -= ri0*T[1]
368 STR r3, [r1,#-16]
369
370 BGE presymmetry_loop3
371
372 SUB r1,r1,r0 @ r1 = in -= n>>2 (i.e. restore in)
373
374 LDR r3,[r13]
375 STR r2,[r13,#-4]!
376
377 @ mdct_butterflies
378 @ r0 = n = (points * 2)
379 @ r1 = in = x
380 @ r2 = i
381 @ r3 = shift
382 STMFD r13!,{r0-r1}
383 RSBS r4,r3,#6 @ r4 = stages = 7-shift then --stages
384 LDR r5,=sincos_lookup0
385 BLE no_generics
386 MOV r14,#4 @ r14= 4 (i=0)
387 MOV r6, r14,LSL r3 @ r6 = (4<<i)<<shift
388mdct_butterflies_loop1:
389 MOV r0, r0, LSR #1 @ r0 = points>>i = POINTS
390 MOV r2, r14,LSR #2 @ r2 = (1<<i)-j (j=0)
391 STMFD r13!,{r4,r14}
392mdct_butterflies_loop2:
393
394 @ mdct_butterfly_generic(x+POINTS*j, POINTS, 4<<(i+shift))
395 @ mdct_butterfly_generic(r1, r0, r6)
396 @ r0 = points
397 @ r1 = x
398 @ preserve r2 (external loop counter)
399 @ preserve r3
400 @ preserve r4 (external loop counter)
401 @ r5 = T = sincos_lookup0
402 @ r6 = step
403 @ preserve r14
404
405 STR r2,[r13,#-4]! @ stack r2
406 ADD r1,r1,r0,LSL #1 @ r1 = x2+4 = x + (POINTS>>1)
407 ADD r7,r1,r0,LSL #1 @ r7 = x1+4 = x + POINTS
408 ADD r12,r5,#1024 @ r12= sincos_lookup0+1024
409
410mdct_bufferfly_generic_loop1:
411 LDMDB r7!,{r2,r3,r8,r11} @ r2 = x1[0]
412 @ r3 = x1[1]
413 @ r8 = x1[2]
414 @ r11= x1[3] x1 -= 4
415 LDMDB r1!,{r4,r9,r10,r14} @ r4 = x2[0]
416 @ r9 = x2[1]
417 @ r10= x2[2]
418 @ r14= x2[3] x2 -= 4
419
420 SUB r2, r2, r3 @ r2 = s0 = x1[0] - x1[1]
421 ADD r3, r2, r3, LSL #1 @ r3 = x1[0] + x1[1] (-> x1[0])
422 SUB r11,r11,r8 @ r11= s1 = x1[3] - x1[2]
423 ADD r8, r11,r8, LSL #1 @ r8 = x1[3] + x1[2] (-> x1[2])
424 SUB r9, r9, r4 @ r9 = s2 = x2[1] - x2[0]
425 ADD r4, r9, r4, LSL #1 @ r4 = x2[1] + x2[0] (-> x1[1])
426 SUB r14,r14,r10 @ r14= s3 = x2[3] - x2[2]
427 ADD r10,r14,r10,LSL #1 @ r10= x2[3] + x2[2] (-> x1[3])
428 STMIA r7,{r3,r4,r8,r10}
429
430 @ r0 = points
431 @ r1 = x2
432 @ r2 = s0
433 @ r3 free
434 @ r4 free
435 @ r5 = T
436 @ r6 = step
437 @ r7 = x1
438 @ r8 free
439 @ r9 = s2
440 @ r10 free
441 @ r11= s1
442 @ r12= limit
443 @ r14= s3
444
445 LDRB r8, [r5,#1] @ r8 = T[1]
446 LDRB r10,[r5],r6 @ r10= T[0] T += step
447 MOV r2, r2, ASR #8
448 MOV r11,r11,ASR #8
449 MOV r9, r9, ASR #8
450 MOV r14,r14,ASR #8
451
452 @ XPROD31(s1, s0, T[0], T[1], &x2[0], &x2[2])
453 @ x2[0] = (s1*T[0] + s0*T[1])>>31 x2[2] = (s0*T[0] - s1*T[1])>>31
454 @ stall Xscale
455 MUL r3, r2, r8 @ r3 = s0*T[1]
456 MLA r3, r11,r10,r3 @ r3 += s1*T[0]
457 RSB r11,r11,#0
458 MUL r4, r8, r11 @ r4 = -s1*T[1]
459 MLA r4, r2, r10,r4 @ r4 += s0*T[0] = Value for x2[2]
460 MOV r2, r3 @ r2 = r3 = Value for x2[0]
461
462 @ XPROD31(s2, s3, T[0], T[1], &x2[1], &x2[3])
463 @ x2[1] = (s2*T[0] + s3*T[1])>>31 x2[3] = (s3*T[0] - s2*T[1])>>31
464 MUL r3, r9, r10 @ r3 = s2*T[0]
465 MLA r3, r14,r8, r3 @ r3 += s3*T[1] = Value for x2[1]
466 RSB r9, r9, #0
467 MUL r11,r14,r10 @ r11 = s3*T[0]
468 MLA r11,r9, r8, r11 @ r11 -= s2*T[1] = Value for x2[3]
469 CMP r5, r12
470
471 STMIA r1,{r2,r3,r4,r11}
472
473 BLT mdct_bufferfly_generic_loop1
474
475 SUB r12,r12,#1024
476mdct_bufferfly_generic_loop2:
477 LDMDB r7!,{r2,r3,r9,r10} @ r2 = x1[0]
478 @ r3 = x1[1]
479 @ r9 = x1[2]
480 @ r10= x1[3] x1 -= 4
481 LDMDB r1!,{r4,r8,r11,r14} @ r4 = x2[0]
482 @ r8 = x2[1]
483 @ r11= x2[2]
484 @ r14= x2[3] x2 -= 4
485
486 SUB r2, r2, r3 @ r2 = s0 = x1[0] - x1[1]
487 ADD r3, r2, r3, LSL #1 @ r3 = x1[0] + x1[1] (-> x1[0])
488 SUB r9, r9,r10 @ r9 = s1 = x1[2] - x1[3]
489 ADD r10,r9,r10, LSL #1 @ r10= x1[2] + x1[3] (-> x1[2])
490 SUB r4, r4, r8 @ r4 = s2 = x2[0] - x2[1]
491 ADD r8, r4, r8, LSL #1 @ r8 = x2[0] + x2[1] (-> x1[1])
492 SUB r14,r14,r11 @ r14= s3 = x2[3] - x2[2]
493 ADD r11,r14,r11,LSL #1 @ r11= x2[3] + x2[2] (-> x1[3])
494 STMIA r7,{r3,r8,r10,r11}
495
496 @ r0 = points
497 @ r1 = x2
498 @ r2 = s0
499 @ r3 free
500 @ r4 = s2
501 @ r5 = T
502 @ r6 = step
503 @ r7 = x1
504 @ r8 free
505 @ r9 = s1
506 @ r10 free
507 @ r11 free
508 @ r12= limit
509 @ r14= s3
510
511 LDRB r8, [r5,#1] @ r8 = T[1]
512 LDRB r10,[r5],-r6 @ r10= T[0] T -= step
513 MOV r2, r2, ASR #8
514 MOV r9, r9, ASR #8
515 MOV r4, r4, ASR #8
516 MOV r14,r14,ASR #8
517
518 @ XNPROD31(s0, s1, T[0], T[1], &x2[0], &x2[2])
519 @ x2[0] = (s0*T[0] - s1*T[1])>>31 x2[2] = (s1*T[0] + s0*T[1])>>31
520 @ stall Xscale
521 MUL r11,r2, r8 @ r11 = s0*T[1]
522 MLA r11,r9, r10,r11 @ r11 += s1*T[0]
523 RSB r9, r9, #0
524 MUL r2, r10,r2 @ r2 = s0*T[0]
525 MLA r2, r9, r8, r2 @ r2 += -s1*T[1] = Value for x2[0]
526 MOV r9, r11 @ r9 = r11 = Value for x2[2]
527
528 @ XNPROD31(s3, s2, T[0], T[1], &x2[1], &x2[3])
529 @ x2[1] = (s3*T[0] - s2*T[1])>>31 x2[3] = (s2*T[0] + s3*T[1])>>31
530 MUL r11,r4, r10 @ r11 = s2*T[0]
531 MLA r11,r14,r8, r11 @ r11 += s3*T[1] = Value for x2[3]
532 RSB r4, r4, #0
533 MUL r3, r14,r10 @ r3 = s3*T[0]
534 MLA r3, r4, r8, r3 @ r3 -= s2*T[1] = Value for x2[1]
535 CMP r5, r12
536
537 STMIA r1,{r2,r3,r9,r11}
538
539 BGT mdct_bufferfly_generic_loop2
540
541 LDR r2,[r13],#4 @ unstack r2
542 ADD r1, r1, r0, LSL #2 @ r1 = x+POINTS*j
543 @ stall Xscale
544 SUBS r2, r2, #1 @ r2-- (j++)
545 BGT mdct_butterflies_loop2
546
547 LDMFD r13!,{r4,r14}
548
549 LDR r1,[r13,#4]
550
551 SUBS r4, r4, #1 @ stages--
552 MOV r14,r14,LSL #1 @ r14= 4<<i (i++)
553 MOV r6, r6, LSL #1 @ r6 = step <<= 1 (i++)
554 BGE mdct_butterflies_loop1
555 LDMFD r13,{r0-r1}
556
557no_generics:
558 @ mdct_butterflies part2 (loop around mdct_bufferfly_32)
559 @ r0 = points
560 @ r1 = in
561 @ r2 = step
562 @ r3 = shift
563
564mdct_bufferflies_loop3:
565 @ mdct_bufferfly_32
566
567 @ block1
568 ADD r4, r1, #16*4 @ r4 = &in[16]
569 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[16]
570 @ r6 = x[17]
571 @ r9 = x[18]
572 @ r10= x[19]
573 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[0]
574 @ r8 = x[1]
575 @ r11= x[2]
576 @ r12= x[3]
577 SUB r5, r5, r6 @ r5 = s0 = x[16] - x[17]
578 ADD r6, r5, r6, LSL #1 @ r6 = x[16] + x[17] -> x[16]
579 SUB r9, r9, r10 @ r9 = s1 = x[18] - x[19]
580 ADD r10,r9, r10,LSL #1 @ r10= x[18] + x[19] -> x[18]
581 SUB r8, r8, r7 @ r8 = s2 = x[ 1] - x[ 0]
582 ADD r7, r8, r7, LSL #1 @ r7 = x[ 1] + x[ 0] -> x[17]
583 SUB r12,r12,r11 @ r12= s3 = x[ 3] - x[ 2]
584 ADD r11,r12,r11, LSL #1 @ r11= x[ 3] + x[ 2] -> x[19]
585 STMIA r4!,{r6,r7,r10,r11}
586
587 MOV r6,#0xed @ r6 =cPI1_8
588 MOV r7,#0x62 @ r7 =cPI3_8
589
590 MOV r5, r5, ASR #8
591 MOV r9, r9, ASR #8
592 MOV r8, r8, ASR #8
593 MOV r12,r12,ASR #8
594
595 @ XNPROD31( s0, s1, cPI3_8, cPI1_8, &x[ 0], &x[ 2] )
596 @ x[0] = s0*cPI3_8 - s1*cPI1_8 x[2] = s1*cPI3_8 + s0*cPI1_8
597 @ stall Xscale
598 MUL r11,r5, r6 @ r11 = s0*cPI1_8
599 MLA r11,r9, r7, r11 @ r11 += s1*cPI3_8
600 RSB r9, r9, #0
601 MUL r5, r7, r5 @ r5 = s0*cPI3_8
602 MLA r5, r9, r6, r5 @ r5 -= s1*cPI1_8
603
604 @ XPROD31 ( s2, s3, cPI1_8, cPI3_8, &x[ 1], &x[ 3] )
605 @ x[1] = s2*cPI1_8 + s3*cPI3_8 x[3] = s3*cPI1_8 - s2*cPI3_8
606 MUL r9, r8, r6 @ r9 = s2*cPI1_8
607 MLA r9, r12,r7, r9 @ r9 += s3*cPI3_8
608 RSB r8,r8,#0
609 MUL r12,r6, r12 @ r12 = s3*cPI1_8
610 MLA r12,r8, r7, r12 @ r12 -= s2*cPI3_8
611 STMIA r1!,{r5,r9,r11,r12}
612
613 @ block2
614 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[20]
615 @ r6 = x[21]
616 @ r9 = x[22]
617 @ r10= x[23]
618 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[4]
619 @ r8 = x[5]
620 @ r11= x[6]
621 @ r12= x[7]
622 SUB r5, r5, r6 @ r5 = s0 = x[20] - x[21]
623 ADD r6, r5, r6, LSL #1 @ r6 = x[20] + x[21] -> x[20]
624 SUB r9, r9, r10 @ r9 = s1 = x[22] - x[23]
625 ADD r10,r9, r10,LSL #1 @ r10= x[22] + x[23] -> x[22]
626 SUB r8, r8, r7 @ r8 = s2 = x[ 5] - x[ 4]
627 ADD r7, r8, r7, LSL #1 @ r7 = x[ 5] + x[ 4] -> x[21]
628 SUB r12,r12,r11 @ r12= s3 = x[ 7] - x[ 6]
629 ADD r11,r12,r11, LSL #1 @ r11= x[ 7] + x[ 6] -> x[23]
630 MOV r14,#0xb5 @ cPI2_8
631 STMIA r4!,{r6,r7,r10,r11}
632
633 SUB r5, r5, r9 @ r5 = s0 - s1
634 ADD r9, r5, r9, LSL #1 @ r9 = s0 + s1
635 MOV r5, r5, ASR #8
636 MUL r5, r14,r5 @ r5 = (s0-s1)*cPI2_8
637 SUB r12,r12,r8 @ r12= s3 - s2
638 ADD r8, r12,r8, LSL #1 @ r8 = s3 + s2
639
640 MOV r8, r8, ASR #8
641 MUL r8, r14,r8 @ r8 = (s3+s2)*cPI2_8
642 MOV r9, r9, ASR #8
643 MUL r9, r14,r9 @ r9 = (s0+s1)*cPI2_8
644 MOV r12,r12,ASR #8
645 MUL r12,r14,r12 @ r12 = (s3-s2)*cPI2_8
646 STMIA r1!,{r5,r8,r9,r12}
647
648 @ block3
649 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[24]
650 @ r6 = x[25]
651 @ r9 = x[25]
652 @ r10= x[26]
653 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[8]
654 @ r8 = x[9]
655 @ r11= x[10]
656 @ r12= x[11]
657 SUB r5, r5, r6 @ r5 = s0 = x[24] - x[25]
658 ADD r6, r5, r6, LSL #1 @ r6 = x[24] + x[25] -> x[25]
659 SUB r9, r9, r10 @ r9 = s1 = x[26] - x[27]
660 ADD r10,r9, r10,LSL #1 @ r10= x[26] + x[27] -> x[26]
661 SUB r8, r8, r7 @ r8 = s2 = x[ 9] - x[ 8]
662 ADD r7, r8, r7, LSL #1 @ r7 = x[ 9] + x[ 8] -> x[25]
663 SUB r12,r12,r11 @ r12= s3 = x[11] - x[10]
664 ADD r11,r12,r11, LSL #1 @ r11= x[11] + x[10] -> x[27]
665 STMIA r4!,{r6,r7,r10,r11}
666
667 MOV r6,#0x62 @ r6 = cPI3_8
668 MOV r7,#0xED @ r7 = cPI1_8
669
670 @ XNPROD31( s0, s1, cPI1_8, cPI3_8, &x[ 8], &x[10] )
671 @ x[8] = s0*cPI1_8 - s1*cPI3_8 x[10] = s1*cPI1_8 + s0*cPI3_8
672 @ stall Xscale
673 MOV r5, r5, ASR #8
674 MUL r11,r5, r6 @ r11 = s0*cPI3_8
675 MOV r9, r9, ASR #8
676 MLA r11,r9, r7, r11 @ r11 += s1*cPI1_8
677 RSB r9, r9, #0
678 MUL r5, r7, r5 @ r5 = s0*cPI1_8
679 MLA r5, r9, r6, r5 @ r5 -= s1*cPI3_8
680
681 @ XPROD31 ( s2, s3, cPI3_8, cPI1_8, &x[ 9], &x[11] )
682 @ x[9] = s2*cPI3_8 + s3*cPI1_8 x[11] = s3*cPI3_8 - s2*cPI1_8
683 MOV r8, r8, ASR #8
684 MUL r9, r8, r6 @ r9 = s2*cPI3_8
685 MOV r12,r12,ASR #8
686 MLA r9, r12,r7, r9 @ r9 += s3*cPI1_8
687 RSB r8,r8,#0
688 MUL r12,r6, r12 @ r12 = s3*cPI3_8
689 MLA r12,r8, r7, r12 @ r12 -= s2*cPI1_8
690 STMIA r1!,{r5,r9,r11,r12}
691
692 @ block4
693 LDMIA r4,{r5,r6,r10,r11} @ r5 = x[28]
694 @ r6 = x[29]
695 @ r10= x[30]
696 @ r11= x[31]
697 LDMIA r1,{r8,r9,r12,r14} @ r8 = x[12]
698 @ r9 = x[13]
699 @ r12= x[14]
700 @ r14= x[15]
701 SUB r5, r5, r6 @ r5 = s0 = x[28] - x[29]
702 ADD r6, r5, r6, LSL #1 @ r6 = x[28] + x[29] -> x[28]
703 SUB r7, r14,r12 @ r7 = s3 = x[15] - x[14]
704 ADD r12,r7, r12, LSL #1 @ r12= x[15] + x[14] -> x[31]
705 SUB r10,r10,r11 @ r10= s1 = x[30] - x[31]
706 ADD r11,r10,r11,LSL #1 @ r11= x[30] + x[31] -> x[30]
707 SUB r14, r8, r9 @ r14= s2 = x[12] - x[13]
708 ADD r9, r14, r9, LSL #1 @ r9 = x[12] + x[13] -> x[29]
709 STMIA r4!,{r6,r9,r11,r12}
710 STMIA r1!,{r5,r7,r10,r14}
711
712 @ mdct_butterfly16 (1st version)
713 @ block 1
714 SUB r1,r1,#16*4
715 ADD r4,r1,#8*4
716 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[ 8]
717 @ r6 = x[ 9]
718 @ r9 = x[10]
719 @ r10= x[11]
720 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[0]
721 @ r8 = x[1]
722 @ r11= x[2]
723 @ r12= x[3]
724 SUB r5, r5, r6 @ r5 = s0 = x[ 8] - x[ 9]
725 ADD r6, r5, r6, LSL #1 @ r6 = x[ 8] + x[ 9] -> x[ 8]
726 SUB r9, r9, r10 @ r9 = s1 = x[10] - x[11]
727 ADD r10,r9, r10,LSL #1 @ r10= x[10] + x[11] -> x[10]
728 SUB r8, r8, r7 @ r8 = s2 = x[ 1] - x[ 0]
729 ADD r7, r8, r7, LSL #1 @ r7 = x[ 1] + x[ 0] -> x[ 9]
730 SUB r12,r12,r11 @ r12= s3 = x[ 3] - x[ 2]
731 ADD r11,r12,r11, LSL #1 @ r11= x[ 3] + x[ 2] -> x[11]
732 MOV r14,#0xB5 @ r14= cPI2_8
733 STMIA r4!,{r6,r7,r10,r11}
734
735 SUB r5, r5, r9 @ r5 = s0 - s1
736 ADD r9, r5, r9, LSL #1 @ r9 = s0 + s1
737 MOV r5, r5, ASR #8
738 MUL r5, r14,r5 @ r5 = (s0-s1)*cPI2_8
739 SUB r12,r12,r8 @ r12= s3 - s2
740 ADD r8, r12,r8, LSL #1 @ r8 = s3 + s2
741
742 MOV r8, r8, ASR #8
743 MUL r8, r14,r8 @ r8 = (s3+s2)*cPI2_8
744 MOV r9, r9, ASR #8
745 MUL r9, r14,r9 @ r9 = (s0+s1)*cPI2_8
746 MOV r12,r12,ASR #8
747 MUL r12,r14,r12 @ r12 = (s3-s2)*cPI2_8
748 STMIA r1!,{r5,r8,r9,r12}
749
750 @ block2
751 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[12]
752 @ r6 = x[13]
753 @ r9 = x[14]
754 @ r10= x[15]
755 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[ 4]
756 @ r8 = x[ 5]
757 @ r11= x[ 6]
758 @ r12= x[ 7]
759 SUB r14,r7, r8 @ r14= s0 = x[ 4] - x[ 5]
760 ADD r8, r14,r8, LSL #1 @ r8 = x[ 4] + x[ 5] -> x[13]
761 SUB r7, r12,r11 @ r7 = s1 = x[ 7] - x[ 6]
762 ADD r11,r7, r11, LSL #1 @ r11= x[ 7] + x[ 6] -> x[15]
763 SUB r5, r5, r6 @ r5 = s2 = x[12] - x[13]
764 ADD r6, r5, r6, LSL #1 @ r6 = x[12] + x[13] -> x[12]
765 SUB r12,r9, r10 @ r12= s3 = x[14] - x[15]
766 ADD r10,r12,r10,LSL #1 @ r10= x[14] + x[15] -> x[14]
767 STMIA r4!,{r6,r8,r10,r11}
768 STMIA r1!,{r5,r7,r12,r14}
769
770 @ mdct_butterfly_8
771 LDMDB r1,{r6,r7,r8,r9,r10,r11,r12,r14}
772 @ r6 = x[0]
773 @ r7 = x[1]
774 @ r8 = x[2]
775 @ r9 = x[3]
776 @ r10= x[4]
777 @ r11= x[5]
778 @ r12= x[6]
779 @ r14= x[7]
780 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1]
781 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1]
782 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3]
783 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3]
784 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5]
785 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5]
786 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7]
787 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7]
788
789 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3
790 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3
791 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1
792 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1
793 SUB r10,r10,r6 @ r10= x[4] = s4 - s0
794 SUB r11,r12,r8 @ r11= x[5] = s6 - s2
795 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0
796 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2
797 STMDB r1,{r2,r3,r4,r5,r10,r11,r12,r14}
798
799 @ mdct_butterfly_8
800 LDMIA r1,{r6,r7,r8,r9,r10,r11,r12,r14}
801 @ r6 = x[0]
802 @ r7 = x[1]
803 @ r8 = x[2]
804 @ r9 = x[3]
805 @ r10= x[4]
806 @ r11= x[5]
807 @ r12= x[6]
808 @ r14= x[7]
809 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1]
810 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1]
811 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3]
812 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3]
813 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5]
814 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5]
815 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7]
816 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7]
817
818 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3
819 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3
820 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1
821 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1
822 SUB r10,r10,r6 @ r10= x[4] = s4 - s0
823 SUB r11,r12,r8 @ r11= x[5] = s6 - s2
824 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0
825 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2
826 STMIA r1,{r2,r3,r4,r5,r10,r11,r12,r14}
827
828 @ mdct_butterfly16 (2nd version)
829 @ block 1
830 ADD r1,r1,#16*4-8*4
831 ADD r4,r1,#8*4
832 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[ 8]
833 @ r6 = x[ 9]
834 @ r9 = x[10]
835 @ r10= x[11]
836 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[0]
837 @ r8 = x[1]
838 @ r11= x[2]
839 @ r12= x[3]
840 SUB r5, r5, r6 @ r5 = s0 = x[ 8] - x[ 9]
841 ADD r6, r5, r6, LSL #1 @ r6 = x[ 8] + x[ 9] -> x[ 8]
842 SUB r9, r9, r10 @ r9 = s1 = x[10] - x[11]
843 ADD r10,r9, r10,LSL #1 @ r10= x[10] + x[11] -> x[10]
844 SUB r8, r8, r7 @ r8 = s2 = x[ 1] - x[ 0]
845 ADD r7, r8, r7, LSL #1 @ r7 = x[ 1] + x[ 0] -> x[ 9]
846 SUB r12,r12,r11 @ r12= s3 = x[ 3] - x[ 2]
847 ADD r11,r12,r11, LSL #1 @ r11= x[ 3] + x[ 2] -> x[11]
848 MOV r14,#0xb5 @ r14= cPI2_8
849 STMIA r4!,{r6,r7,r10,r11}
850
851 SUB r5, r5, r9 @ r5 = s0 - s1
852 ADD r9, r5, r9, LSL #1 @ r9 = s0 + s1
853 MOV r5, r5, ASR #8
854 MUL r5, r14,r5 @ r5 = (s0-s1)*cPI2_8
855 SUB r12,r12,r8 @ r12= s3 - s2
856 ADD r8, r12,r8, LSL #1 @ r8 = s3 + s2
857
858 MOV r8, r8, ASR #8
859 MUL r8, r14,r8 @ r8 = (s3+s2)*cPI2_8
860 MOV r9, r9, ASR #8
861 MUL r9, r14,r9 @ r9 = (s0+s1)*cPI2_8
862 MOV r12,r12,ASR #8
863 MUL r12,r14,r12 @ r12 = (s3-s2)*cPI2_8
864 STMIA r1!,{r5,r8,r9,r12}
865
866 @ block2
867 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[12]
868 @ r6 = x[13]
869 @ r9 = x[14]
870 @ r10= x[15]
871 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[ 4]
872 @ r8 = x[ 5]
873 @ r11= x[ 6]
874 @ r12= x[ 7]
875 SUB r5, r5, r6 @ r5 = s2 = x[12] - x[13]
876 ADD r6, r5, r6, LSL #1 @ r6 = x[12] + x[13] -> x[12]
877 SUB r9, r9, r10 @ r9 = s3 = x[14] - x[15]
878 ADD r10,r9, r10,LSL #1 @ r10= x[14] + x[15] -> x[14]
879 SUB r14,r7, r8 @ r14= s0 = x[ 4] - x[ 5]
880 ADD r8, r14,r8, LSL #1 @ r8 = x[ 4] + x[ 5] -> x[13]
881 SUB r7, r12,r11 @ r7 = s1 = x[ 7] - x[ 6]
882 ADD r11,r7, r11, LSL #1 @ r11= x[ 7] + x[ 6] -> x[15]
883 STMIA r4!,{r6,r8,r10,r11}
884 STMIA r1!,{r5,r7,r9,r14}
885
886 @ mdct_butterfly_8
887 LDMDB r1,{r6,r7,r8,r9,r10,r11,r12,r14}
888 @ r6 = x[0]
889 @ r7 = x[1]
890 @ r8 = x[2]
891 @ r9 = x[3]
892 @ r10= x[4]
893 @ r11= x[5]
894 @ r12= x[6]
895 @ r14= x[7]
896 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1]
897 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1]
898 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3]
899 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3]
900 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5]
901 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5]
902 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7]
903 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7]
904
905 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3
906 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3
907 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1
908 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1
909 SUB r10,r10,r6 @ r10= x[4] = s4 - s0
910 SUB r11,r12,r8 @ r11= x[5] = s6 - s2
911 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0
912 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2
913 STMDB r1,{r2,r3,r4,r5,r10,r11,r12,r14}
914
915 @ mdct_butterfly_8
916 LDMIA r1,{r6,r7,r8,r9,r10,r11,r12,r14}
917 @ r6 = x[0]
918 @ r7 = x[1]
919 @ r8 = x[2]
920 @ r9 = x[3]
921 @ r10= x[4]
922 @ r11= x[5]
923 @ r12= x[6]
924 @ r14= x[7]
925 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1]
926 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1]
927 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3]
928 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3]
929 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5]
930 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5]
931 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7]
932 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7]
933
934 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3
935 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3
936 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1
937 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1
938 SUB r10,r10,r6 @ r10= x[4] = s4 - s0
939 SUB r11,r12,r8 @ r11= x[5] = s6 - s2
940 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0
941 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2
942 STMIA r1,{r2,r3,r4,r5,r10,r11,r12,r14}
943
944 ADD r1,r1,#8*4
945 SUBS r0,r0,#64
946 BGT mdct_bufferflies_loop3
947
948 LDMFD r13,{r0-r3}
949
950mdct_bitreverseARM:
951 @ r0 = points
952 @ r1 = in
953 @ r2 = step
954 @ r3 = shift
955
956 MOV r4, #0 @ r4 = bit = 0
957 ADD r5, r1, r0, LSL #1 @ r5 = w = x + (n>>1)
958 ADR r6, bitrev
959 SUB r3, r3, #2 @ r3 = shift -= 2
960 SUB r5, r5, #8
961brev_lp:
962 LDRB r7, [r6, r4, LSR #6]
963 AND r8, r4, #0x3f
964 LDRB r8, [r6, r8]
965 ADD r4, r4, #1 @ bit++
966 @ stall XScale
967 ORR r7, r7, r8, LSL #6 @ r7 = bitrev[bit]
968 ADD r9, r1, r7, LSR r3 @ r9 = xx = x + (b>>shift)
969 CMP r5, r9 @ if (w > xx)
970 LDR r10,[r5],#-8 @ r10 = w[0] w -= 2
971 LDRGT r11,[r5,#12] @ r11 = w[1]
972 LDRGT r12,[r9] @ r12 = xx[0]
973 LDRGT r14,[r9,#4] @ r14 = xx[1]
974 STRGT r10,[r9] @ xx[0]= w[0]
975 STRGT r11,[r9,#4] @ xx[1]= w[1]
976 STRGT r12,[r5,#8] @ w[0] = xx[0]
977 STRGT r14,[r5,#12] @ w[1] = xx[1]
978 CMP r5,r1
979 BGT brev_lp
980
981 @ mdct_step7
982 @ r0 = points
983 @ r1 = in
984 @ r2 = step
985 @ r3 = shift-2
986
987 CMP r2, #4 @ r5 = T = (step>=4) ?
988 LDRGE r5, =sincos_lookup0 @ sincos_lookup0 +
989 LDRLT r5, =sincos_lookup1 @ sincos_lookup0 +
990 ADD r7, r1, r0, LSL #1 @ r7 = w1 = x + (n>>1)
991 ADDGE r5, r5, r2, LSR #1 @ (step>>1)
992 ADD r8, r5, #1024 @ r8 = Ttop
993step7_loop1:
994 LDR r6, [r1] @ r6 = w0[0]
995 LDR r9, [r1,#4] @ r9 = w0[1]
996 LDR r10,[r7,#-8]! @ r10= w1[0] w1 -= 2
997 LDR r11,[r7,#4] @ r11= w1[1]
998 LDRB r14,[r5,#1] @ r14= T[1]
999 LDRB r12,[r5],r2 @ r12= T[0] T += step
1000
1001 ADD r6, r6, r10 @ r6 = s0 = w0[0] + w1[0]
1002 SUB r10,r6, r10,LSL #1 @ r10= s1b= w0[0] - w1[0]
1003 SUB r11,r11,r9 @ r11= s1 = w1[1] - w0[1]
1004 ADD r9, r11,r9, LSL #1 @ r9 = s0b= w1[1] + w0[1]
1005
1006 MOV r6, r6, ASR #9
1007 MUL r3, r6, r14 @ r3 = s0*T[1]
1008 MOV r11,r11,ASR #9
1009 MUL r4, r11,r12 @ r4 += s1*T[0] = s2
1010 ADD r3, r3, r4
1011 MUL r14,r11,r14 @ r14 = s1*T[1]
1012 MUL r12,r6, r12 @ r12 += s0*T[0] = s3
1013 SUB r14,r14,r12
1014
1015 @ r9 = s0b<<1
1016 @ r10= s1b<<1
1017 ADD r9, r3, r9, ASR #1 @ r9 = s0b + s2
1018 SUB r3, r9, r3, LSL #1 @ r3 = s0b - s2
1019
1020 SUB r12,r14,r10,ASR #1 @ r12= s3 - s1b
1021 ADD r10,r14,r10,ASR #1 @ r10= s3 + s1b
1022 STR r9, [r1],#4
1023 STR r10,[r1],#4 @ w0 += 2
1024 STR r3, [r7]
1025 STR r12,[r7,#4]
1026
1027 CMP r5,r8
1028 BLT step7_loop1
1029
1030step7_loop2:
1031 LDR r6, [r1] @ r6 = w0[0]
1032 LDR r9, [r1,#4] @ r9 = w0[1]
1033 LDR r10,[r7,#-8]! @ r10= w1[0] w1 -= 2
1034 LDR r11,[r7,#4] @ r11= w1[1]
1035 LDRB r14,[r5,-r2]! @ r12= T[1] T -= step
1036 LDRB r12,[r5,#1] @ r14= T[0]
1037
1038 ADD r6, r6, r10 @ r6 = s0 = w0[0] + w1[0]
1039 SUB r10,r6, r10,LSL #1 @ r10= s1b= w0[0] - w1[0]
1040 SUB r11,r11,r9 @ r11= s1 = w1[1] - w0[1]
1041 ADD r9, r11,r9, LSL #1 @ r9 = s0b= w1[1] + w0[1]
1042
1043 MOV r6, r6, ASR #9
1044 MUL r3, r6, r14 @ r3 = s0*T[0]
1045 MOV r11,r11,ASR #9
1046 MUL r4, r11,r12 @ r4 += s1*T[1] = s2
1047 ADD r3, r3, r4
1048 MUL r14,r11,r14 @ r14 = s1*T[0]
1049 MUL r12,r6, r12 @ r12 += s0*T[1] = s3
1050 SUB r14,r14,r12
1051
1052 @ r9 = s0b<<1
1053 @ r10= s1b<<1
1054 ADD r9, r3, r9, ASR #1 @ r9 = s0b + s2
1055 SUB r3, r9, r3, LSL #1 @ r3 = s0b - s2
1056
1057 SUB r12,r14,r10,ASR #1 @ r12= s3 - s1b
1058 ADD r10,r14,r10,ASR #1 @ r10= s3 + s1b
1059 STR r9, [r1],#4
1060 STR r10,[r1],#4 @ w0 += 2
1061 STR r3, [r7]
1062 STR r12,[r7,#4]
1063
1064 CMP r1,r7
1065 BLT step7_loop2
1066
1067 LDMFD r13!,{r0-r3}
1068
1069 @ r0 = points
1070 @ r1 = in
1071 @ r2 = step
1072 @ r3 = shift
1073 MOV r2, r2, ASR #2 @ r2 = step >>= 2
1074 CMP r2, #0
1075 CMPNE r2, #1
1076 BEQ mdct_end
1077
1078 @ step > 1 (default case)
1079 CMP r2, #4 @ r5 = T = (step>=4) ?
1080 LDRGE r5, =sincos_lookup0 @ sincos_lookup0 +
1081 LDRLT r5, =sincos_lookup1 @ sincos_lookup1
1082 ADD r7, r1, r0, LSL #1 @ r7 = iX = x + (n>>1)
1083 ADDGE r5, r5, r2, LSR #1 @ (step>>1)
1084mdct_step8_default:
1085 LDR r6, [r1],#4 @ r6 = s0 = x[0]
1086 LDR r8, [r1],#4 @ r8 = -s1 = x[1]
1087 LDRB r12,[r5,#1] @ r12= T[1]
1088 LDRB r14,[r5],r2 @ r14= T[0] T += step
1089 RSB r8, r8, #0 @ r8 = s1
1090
1091 @ XPROD31(s0, s1, T[0], T[1], x, x+1)
1092 @ x[0] = s0 * T[0] + s1 * T[1] x[1] = s1 * T[0] - s0 * T[1]
1093 MOV r6, r6, ASR #8
1094 MOV r8, r8, ASR #8
1095 MUL r10,r8, r12 @ r10 = s1 * T[1]
1096 CMP r1, r7
1097 MLA r10,r6, r14,r10 @ r10 += s0 * T[0]
1098 RSB r6, r6, #0 @ r6 = -s0
1099 MUL r11,r8, r14 @ r11 = s1 * T[0]
1100 MLA r11,r6, r12,r11 @ r11 -= s0 * T[1]
1101 STR r10,[r1,#-8]
1102 STR r11,[r1,#-4]
1103 BLT mdct_step8_default
1104
1105mdct_end:
1106 MOV r0, r2
1107 LDMFD r13!,{r4-r11,PC}
1108
1109bitrev:
1110 .byte 0
1111 .byte 32
1112 .byte 16
1113 .byte 48
1114 .byte 8
1115 .byte 40
1116 .byte 24
1117 .byte 56
1118 .byte 4
1119 .byte 36
1120 .byte 20
1121 .byte 52
1122 .byte 12
1123 .byte 44
1124 .byte 28
1125 .byte 60
1126 .byte 2
1127 .byte 34
1128 .byte 18
1129 .byte 50
1130 .byte 10
1131 .byte 42
1132 .byte 26
1133 .byte 58
1134 .byte 6
1135 .byte 38
1136 .byte 22
1137 .byte 54
1138 .byte 14
1139 .byte 46
1140 .byte 30
1141 .byte 62
1142 .byte 1
1143 .byte 33
1144 .byte 17
1145 .byte 49
1146 .byte 9
1147 .byte 41
1148 .byte 25
1149 .byte 57
1150 .byte 5
1151 .byte 37
1152 .byte 21
1153 .byte 53
1154 .byte 13
1155 .byte 45
1156 .byte 29
1157 .byte 61
1158 .byte 3
1159 .byte 35
1160 .byte 19
1161 .byte 51
1162 .byte 11
1163 .byte 43
1164 .byte 27
1165 .byte 59
1166 .byte 7
1167 .byte 39
1168 .byte 23
1169 .byte 55
1170 .byte 15
1171 .byte 47
1172 .byte 31
1173 .byte 63
1174
1175 @ END