blob: 2d35d4f2befd5c0f6bc62e5cc648760e7c26babb [file] [log] [blame]
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -07001/*
2 * Copyright (c) 2008 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <asm.h>
24#include <arch/arm/cores.h>
25
26.text
27.align 2
28
29/* void *memset(void *s, int c, size_t n); */
30 .global mymemset
31mymemset:
32 // check for zero length
33 cmp r2, #0
34 bxeq lr
35
36 // save the original pointer
37 mov r12, r0
38
39 // short memsets aren't worth optimizing
40 cmp r2, #(32 + 16)
41 blt .L_bytewise
42
43 // fill a 32 bit register with the 8 bit value
44 and r1, r1, #0xff
45 orr r1, r1, r1, lsl #8
46 orr r1, r1, r1, lsl #16
47
48 // check for 16 byte alignment
49 tst r0, #15
50 bne .L_not16bytealigned
51
52.L_bigset:
53 // dump some registers to make space for our values
54 stmfd sp!, { r4-r5 }
55
56 // fill a bunch of registers with the set value
57 mov r3, r1
58 mov r4, r1
59 mov r5, r1
60
61 // prepare the count register so we can avoid an extra compare
62 sub r2, r2, #32
63
64 // 32 bytes at a time
65.L_bigset_loop:
66 stmia r0!, { r1, r3, r4, r5 }
67 subs r2, r2, #32
68 stmia r0!, { r1, r3, r4, r5 }
69 bge .L_bigset_loop
70
71 // restore our dumped registers
72 ldmfd sp!, { r4-r5 }
73
74 // see if we're done
75 adds r2, r2, #32
76 beq .L_done
77
78.L_bytewise:
79 // bytewise memset
80 subs r2, r2, #1
81 strb r1, [r0], #1
82 bgt .L_bytewise
83
84.L_done:
85 // restore the base pointer as return value
86 mov r0, r12
87 bx lr
88
89.L_not16bytealigned:
90 // dst is not 16 byte aligned, so we will set up to 15 bytes to get it aligned.
91
92 // set the condition flags based on the alignment.
93 lsl r3, r0, #28
94 rsb r3, r3, #0
95 msr CPSR_f, r3 // move into NZCV fields in CPSR
96
97 // move as many bytes as necessary to get the dst aligned
98 strvsb r1, [r0], #1 // V set
99 strcsh r1, [r0], #2 // C set
100 streq r1, [r0], #4 // Z set
101 strmi r1, [r0], #4 // N set
102 strmi r1, [r0], #4 // N set
103
104 // fix the remaining len
105 sub r2, r2, r3, lsr #28
106
107 // do the large memset
108 b .L_bigset
109