blob: a84c047d50522bd724d1b95c910c75fe5c2acf8a [file] [log] [blame]
Christopher Ferris31dea252013-03-08 16:50:31 -08001/*
2 * Copyright (c) 2013 ARM Ltd
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the company may not be used to endorse or promote
14 * products derived from this software without specific prior written
15 * permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <machine/cpu-features.h>
30#include <machine/asm.h>
31
32#ifdef __ARMEB__
33#define S2LOMEM lsl
34#define S2LOMEMEQ lsleq
35#define S2HIMEM lsr
36#define MSB 0x000000ff
37#define LSB 0xff000000
38#define BYTE0_OFFSET 24
39#define BYTE1_OFFSET 16
40#define BYTE2_OFFSET 8
41#define BYTE3_OFFSET 0
42#else /* not __ARMEB__ */
43#define S2LOMEM lsr
44#define S2LOMEMEQ lsreq
45#define S2HIMEM lsl
46#define BYTE0_OFFSET 0
47#define BYTE1_OFFSET 8
48#define BYTE2_OFFSET 16
49#define BYTE3_OFFSET 24
50#define MSB 0xff000000
51#define LSB 0x000000ff
52#endif /* not __ARMEB__ */
53
54.syntax unified
55
56#if defined (__thumb__)
57 .thumb
58 .thumb_func
59#endif
60
61ENTRY(strcmp)
62 /* Use LDRD whenever possible. */
63
64/* The main thing to look out for when comparing large blocks is that
65 the loads do not cross a page boundary when loading past the index
66 of the byte with the first difference or the first string-terminator.
67
68 For example, if the strings are identical and the string-terminator
69 is at index k, byte by byte comparison will not load beyond address
70 s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
71 k; double word - up to 7 bytes. If the load of these bytes crosses
72 a page boundary, it might cause a memory fault (if the page is not mapped)
73 that would not have happened in byte by byte comparison.
74
75 If an address is (double) word aligned, then a load of a (double) word
76 from that address will not cross a page boundary.
77 Therefore, the algorithm below considers word and double-word alignment
78 of strings separately. */
79
80/* High-level description of the algorithm.
81
82 * The fast path: if both strings are double-word aligned,
83 use LDRD to load two words from each string in every loop iteration.
84 * If the strings have the same offset from a word boundary,
85 use LDRB to load and compare byte by byte until
86 the first string is aligned to a word boundary (at most 3 bytes).
87 This is optimized for quick return on short unaligned strings.
88 * If the strings have the same offset from a double-word boundary,
89 use LDRD to load two words from each string in every loop iteration, as in the fast path.
90 * If the strings do not have the same offset from a double-word boundary,
91 load a word from the second string before the loop to initialize the queue.
92 Use LDRD to load two words from every string in every loop iteration.
93 Inside the loop, load the second word from the second string only after comparing
94 the first word, using the queued value, to guarantee safety across page boundaries.
95 * If the strings do not have the same offset from a word boundary,
96 use LDR and a shift queue. Order of loads and comparisons matters,
97 similarly to the previous case.
98
99 * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
100 * The only difference between ARM and Thumb modes is the use of CBZ instruction.
101 * The only difference between big and little endian is the use of REV in little endian
102 to compute the return value, instead of MOV.
103*/
104
105 .macro m_cbz reg label
106#ifdef __thumb2__
107 cbz \reg, \label
108#else /* not defined __thumb2__ */
109 cmp \reg, #0
110 beq \label
111#endif /* not defined __thumb2__ */
112 .endm /* m_cbz */
113
114 .macro m_cbnz reg label
115#ifdef __thumb2__
116 cbnz \reg, \label
117#else /* not defined __thumb2__ */
118 cmp \reg, #0
119 bne \label
120#endif /* not defined __thumb2__ */
121 .endm /* m_cbnz */
122
123 .macro init
124 /* Macro to save temporary registers and prepare magic values. */
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700125 .save {r4-r7}
Christopher Ferris31dea252013-03-08 16:50:31 -0800126 subs sp, sp, #16
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700127 .cfi_def_cfa_offset 16
Christopher Ferris31dea252013-03-08 16:50:31 -0800128 strd r4, r5, [sp, #8]
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700129 .cfi_rel_offset r4, 0
130 .cfi_rel_offset r5, 4
Christopher Ferris31dea252013-03-08 16:50:31 -0800131 strd r6, r7, [sp]
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700132 .cfi_rel_offset r6, 8
133 .cfi_rel_offset r7, 12
Christopher Ferris31dea252013-03-08 16:50:31 -0800134 mvn r6, #0 /* all F */
135 mov r7, #0 /* all 0 */
136 .endm /* init */
137
138 .macro magic_compare_and_branch w1 w2 label
139 /* Macro to compare registers w1 and w2 and conditionally branch to label. */
140 cmp \w1, \w2 /* Are w1 and w2 the same? */
141 magic_find_zero_bytes \w1
142 it eq
143 cmpeq ip, #0 /* Is there a zero byte in w1? */
144 bne \label
145 .endm /* magic_compare_and_branch */
146
147 .macro magic_find_zero_bytes w1
148 /* Macro to find all-zero bytes in w1, result is in ip. */
149#if (defined (__ARM_FEATURE_DSP))
150 uadd8 ip, \w1, r6
151 sel ip, r7, r6
152#else /* not defined (__ARM_FEATURE_DSP) */
153 /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
154 Coincidently, these processors only have Thumb-2 mode, where we can use the
155 the (large) magic constant available directly as an immediate in instructions.
156 Note that we cannot use the magic constant in ARM mode, where we need
157 to create the constant in a register. */
158 sub ip, \w1, #0x01010101
159 bic ip, ip, \w1
160 and ip, ip, #0x80808080
161#endif /* not defined (__ARM_FEATURE_DSP) */
162 .endm /* magic_find_zero_bytes */
163
164 .macro setup_return w1 w2
165#ifdef __ARMEB__
166 mov r1, \w1
167 mov r2, \w2
168#else /* not __ARMEB__ */
169 rev r1, \w1
170 rev r2, \w2
171#endif /* not __ARMEB__ */
172 .endm /* setup_return */
173
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700174 .cfi_startproc
Christopher Ferris31dea252013-03-08 16:50:31 -0800175 pld [r0, #0]
176 pld [r1, #0]
177
178 /* Are both strings double-word aligned? */
179 orr ip, r0, r1
180 tst ip, #7
181 bne do_align
182
183 /* Fast path. */
184 init
185
186doubleword_aligned:
187
188 /* Get here when the strings to compare are double-word aligned. */
189 /* Compare two words in every iteration. */
190 .p2align 2
1912:
192 pld [r0, #16]
193 pld [r1, #16]
194
195 /* Load the next double-word from each string. */
196 ldrd r2, r3, [r0], #8
197 ldrd r4, r5, [r1], #8
198
199 magic_compare_and_branch w1=r2, w2=r4, label=return_24
200 magic_compare_and_branch w1=r3, w2=r5, label=return_35
201 b 2b
202
203do_align:
204 /* Is the first string word-aligned? */
205 ands ip, r0, #3
206 beq word_aligned_r0
207
208 /* Fast compare byte by byte until the first string is word-aligned. */
209 /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
210 to read until the next word boundary is 4-ip. */
211 bic r0, r0, #3
212 ldr r2, [r0], #4
213 lsls ip, ip, #31
214 beq byte2
215 bcs byte3
216
217byte1:
218 ldrb ip, [r1], #1
219 uxtb r3, r2, ror #BYTE1_OFFSET
220 subs ip, r3, ip
221 bne fast_return
222 m_cbz reg=r3, label=fast_return
223
224byte2:
225 ldrb ip, [r1], #1
226 uxtb r3, r2, ror #BYTE2_OFFSET
227 subs ip, r3, ip
228 bne fast_return
229 m_cbz reg=r3, label=fast_return
230
231byte3:
232 ldrb ip, [r1], #1
233 uxtb r3, r2, ror #BYTE3_OFFSET
234 subs ip, r3, ip
235 bne fast_return
236 m_cbnz reg=r3, label=word_aligned_r0
237
238fast_return:
239 mov r0, ip
240 bx lr
241
242word_aligned_r0:
243 init
244 /* The first string is word-aligned. */
245 /* Is the second string word-aligned? */
246 ands ip, r1, #3
247 bne strcmp_unaligned
248
249word_aligned:
250 /* The strings are word-aligned. */
251 /* Is the first string double-word aligned? */
252 tst r0, #4
253 beq doubleword_aligned_r0
254
255 /* If r0 is not double-word aligned yet, align it by loading
256 and comparing the next word from each string. */
257 ldr r2, [r0], #4
258 ldr r4, [r1], #4
259 magic_compare_and_branch w1=r2 w2=r4 label=return_24
260
261doubleword_aligned_r0:
262 /* Get here when r0 is double-word aligned. */
263 /* Is r1 doubleword_aligned? */
264 tst r1, #4
265 beq doubleword_aligned
266
267 /* Get here when the strings to compare are word-aligned,
268 r0 is double-word aligned, but r1 is not double-word aligned. */
269
270 /* Initialize the queue. */
271 ldr r5, [r1], #4
272
273 /* Compare two words in every iteration. */
274 .p2align 2
2753:
276 pld [r0, #16]
277 pld [r1, #16]
278
279 /* Load the next double-word from each string and compare. */
280 ldrd r2, r3, [r0], #8
281 magic_compare_and_branch w1=r2 w2=r5 label=return_25
282 ldrd r4, r5, [r1], #8
283 magic_compare_and_branch w1=r3 w2=r4 label=return_34
284 b 3b
285
286 .macro miscmp_word offsetlo offsethi
287 /* Macro to compare misaligned strings. */
288 /* r0, r1 are word-aligned, and at least one of the strings
289 is not double-word aligned. */
290 /* Compare one word in every loop iteration. */
291 /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
292 OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
293
294 /* Initialize the shift queue. */
295 ldr r5, [r1], #4
296
297 /* Compare one word from each string in every loop iteration. */
298 .p2align 2
2997:
300 ldr r3, [r0], #4
301 S2LOMEM r5, r5, #\offsetlo
302 magic_find_zero_bytes w1=r3
303 cmp r7, ip, S2HIMEM #\offsetlo
304 and r2, r3, r6, S2LOMEM #\offsetlo
305 it eq
306 cmpeq r2, r5
307 bne return_25
308 ldr r5, [r1], #4
309 cmp ip, #0
310 eor r3, r2, r3
311 S2HIMEM r2, r5, #\offsethi
312 it eq
313 cmpeq r3, r2
314 bne return_32
315 b 7b
316 .endm /* miscmp_word */
317
318return_32:
319 setup_return w1=r3, w2=r2
320 b do_return
321return_34:
322 setup_return w1=r3, w2=r4
323 b do_return
324return_25:
325 setup_return w1=r2, w2=r5
326 b do_return
327return_35:
328 setup_return w1=r3, w2=r5
329 b do_return
330return_24:
331 setup_return w1=r2, w2=r4
332
333do_return:
334
335#ifdef __ARMEB__
336 mov r0, ip
337#else /* not __ARMEB__ */
338 rev r0, ip
339#endif /* not __ARMEB__ */
340
341 /* Restore temporaries early, before computing the return value. */
342 ldrd r6, r7, [sp]
343 ldrd r4, r5, [sp, #8]
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700344 .pad #-16
Christopher Ferris31dea252013-03-08 16:50:31 -0800345 adds sp, sp, #16
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700346 .cfi_def_cfa_offset 0
347 .cfi_restore r4
348 .cfi_restore r5
349 .cfi_restore r6
350 .cfi_restore r7
Christopher Ferris31dea252013-03-08 16:50:31 -0800351
352 /* There is a zero or a different byte between r1 and r2. */
353 /* r0 contains a mask of all-zero bytes in r1. */
354 /* Using r0 and not ip here because cbz requires low register. */
355 m_cbz reg=r0, label=compute_return_value
356 clz r0, r0
357 /* r0 contains the number of bits on the left of the first all-zero byte in r1. */
358 rsb r0, r0, #24
359 /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
360 lsr r1, r1, r0
361 lsr r2, r2, r0
362
363compute_return_value:
364 movs r0, #1
365 cmp r1, r2
366 /* The return value is computed as follows.
367 If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
368 If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
369 which means r0:=r0-r0-1 and r0 is #-1 at return.
370 If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
371 which means r0:=r0-r0 and r0 is #0 at return.
372 (C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
373 it ls
374 sbcls r0, r0, r0
375 bx lr
376
377 /* The code from the previous version of strcmp.S handles all of the
378 * cases where the first string and seconds string cannot both be
379 * aligned to a word boundary faster than the new algorithm. See
380 * bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S for the unedited
381 * version of the code.
382 */
383strcmp_unaligned:
384 wp1 .req r0
385 wp2 .req r1
386 b1 .req r2
387 w1 .req r4
388 w2 .req r5
389 t1 .req ip
390 @ r3 is scratch
391
3922:
393 mov b1, #1
394 orr b1, b1, b1, lsl #8
395 orr b1, b1, b1, lsl #16
396
397 and t1, wp2, #3
398 bic wp2, wp2, #3
399 ldr w1, [wp1], #4
400 ldr w2, [wp2], #4
401 cmp t1, #2
402 beq 2f
403 bhi 3f
404
405 /* Critical inner Loop: Block with 3 bytes initial overlap */
406 .p2align 2
4071:
408 bic t1, w1, #MSB
409 cmp t1, w2, S2LOMEM #8
410 sub r3, w1, b1
411 bic r3, r3, w1
412 bne 4f
413 ands r3, r3, b1, lsl #7
414 it eq
415 ldreq w2, [wp2], #4
416 bne 5f
417 eor t1, t1, w1
418 cmp t1, w2, S2HIMEM #24
419 bne 6f
420 ldr w1, [wp1], #4
421 b 1b
4224:
423 S2LOMEM w2, w2, #8
424 b 8f
425
4265:
427#ifdef __ARMEB__
428 /* The syndrome value may contain false ones if the string ends
429 * with the bytes 0x01 0x00
430 */
431 tst w1, #0xff000000
432 itt ne
433 tstne w1, #0x00ff0000
434 tstne w1, #0x0000ff00
435 beq 7f
436#else
437 bics r3, r3, #0xff000000
438 bne 7f
439#endif
440 ldrb w2, [wp2]
441 S2LOMEM t1, w1, #24
442#ifdef __ARMEB__
443 lsl w2, w2, #24
444#endif
445 b 8f
446
4476:
448 S2LOMEM t1, w1, #24
449 and w2, w2, #LSB
450 b 8f
451
452 /* Critical inner Loop: Block with 2 bytes initial overlap */
453 .p2align 2
4542:
455 S2HIMEM t1, w1, #16
456 sub r3, w1, b1
457 S2LOMEM t1, t1, #16
458 bic r3, r3, w1
459 cmp t1, w2, S2LOMEM #16
460 bne 4f
461 ands r3, r3, b1, lsl #7
462 it eq
463 ldreq w2, [wp2], #4
464 bne 5f
465 eor t1, t1, w1
466 cmp t1, w2, S2HIMEM #16
467 bne 6f
468 ldr w1, [wp1], #4
469 b 2b
470
4715:
472#ifdef __ARMEB__
473 /* The syndrome value may contain false ones if the string ends
474 * with the bytes 0x01 0x00
475 */
476 tst w1, #0xff000000
477 it ne
478 tstne w1, #0x00ff0000
479 beq 7f
480#else
481 lsls r3, r3, #16
482 bne 7f
483#endif
484 ldrh w2, [wp2]
485 S2LOMEM t1, w1, #16
486#ifdef __ARMEB__
487 lsl w2, w2, #16
488#endif
489 b 8f
490
4916:
492 S2HIMEM w2, w2, #16
493 S2LOMEM t1, w1, #16
4944:
495 S2LOMEM w2, w2, #16
496 b 8f
497
498 /* Critical inner Loop: Block with 1 byte initial overlap */
499 .p2align 2
5003:
501 and t1, w1, #LSB
502 cmp t1, w2, S2LOMEM #24
503 sub r3, w1, b1
504 bic r3, r3, w1
505 bne 4f
506 ands r3, r3, b1, lsl #7
507 it eq
508 ldreq w2, [wp2], #4
509 bne 5f
510 eor t1, t1, w1
511 cmp t1, w2, S2HIMEM #8
512 bne 6f
513 ldr w1, [wp1], #4
514 b 3b
5154:
516 S2LOMEM w2, w2, #24
517 b 8f
5185:
519 /* The syndrome value may contain false ones if the string ends
520 * with the bytes 0x01 0x00
521 */
522 tst w1, #LSB
523 beq 7f
524 ldr w2, [wp2], #4
5256:
526 S2LOMEM t1, w1, #8
527 bic w2, w2, #MSB
528 b 8f
5297:
530 mov r0, #0
531
532 /* Restore registers and stack. */
533 ldrd r6, r7, [sp]
534 ldrd r4, r5, [sp, #8]
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700535 .pad #-16
Christopher Ferris31dea252013-03-08 16:50:31 -0800536 adds sp, sp, #16
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700537 .cfi_def_cfa_offset 0
538 .cfi_restore r4
539 .cfi_restore r5
540 .cfi_restore r6
541 .cfi_restore r7
Christopher Ferris31dea252013-03-08 16:50:31 -0800542
543 bx lr
544
5458:
546 and r2, t1, #LSB
547 and r0, w2, #LSB
548 cmp r0, #1
549 it cs
550 cmpcs r0, r2
551 itt eq
552 S2LOMEMEQ t1, t1, #8
553 S2LOMEMEQ w2, w2, #8
554 beq 8b
555 sub r0, r2, r0
556
557 /* Restore registers and stack. */
558 ldrd r6, r7, [sp]
559 ldrd r4, r5, [sp, #8]
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700560 .pad #-16
Christopher Ferris31dea252013-03-08 16:50:31 -0800561 adds sp, sp, #16
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700562 .cfi_def_cfa_offset 0
563 .cfi_restore r4
564 .cfi_restore r5
565 .cfi_restore r6
566 .cfi_restore r7
Christopher Ferris31dea252013-03-08 16:50:31 -0800567
568 bx lr
Christopher Ferrisbd7fe1d2013-08-20 11:20:48 -0700569 .cfi_endproc
Christopher Ferris31dea252013-03-08 16:50:31 -0800570END(strcmp)