blob: 6a8ef5d8daf056ee0411fa9adddbc06133e99f74 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* memcpy.S: Sparc optimized memcpy and memmove code
2 * Hand optimized from GNU libc's memcpy and memmove
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
David S. Miller045b7de2011-10-19 15:15:58 -070010#define FUNC(x) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 .globl x; \
12 .type x,@function; \
David S. Miller045b7de2011-10-19 15:15:58 -070013 .align 4; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070014x:
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17/* In kernel these functions don't return a value.
18 * One should use macros in asm/string.h for that purpose.
19 * We return 0, so that bugs are more apparent.
20 */
21#define SETUP_RETL
22#define RETL_INSN clr %o0
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024/* Both these macros have to start with exactly the same insn */
25#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
26 ldd [%src + (offset) + 0x00], %t0; \
27 ldd [%src + (offset) + 0x08], %t2; \
28 ldd [%src + (offset) + 0x10], %t4; \
29 ldd [%src + (offset) + 0x18], %t6; \
30 st %t0, [%dst + (offset) + 0x00]; \
31 st %t1, [%dst + (offset) + 0x04]; \
32 st %t2, [%dst + (offset) + 0x08]; \
33 st %t3, [%dst + (offset) + 0x0c]; \
34 st %t4, [%dst + (offset) + 0x10]; \
35 st %t5, [%dst + (offset) + 0x14]; \
36 st %t6, [%dst + (offset) + 0x18]; \
37 st %t7, [%dst + (offset) + 0x1c];
38
39#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
40 ldd [%src + (offset) + 0x00], %t0; \
41 ldd [%src + (offset) + 0x08], %t2; \
42 ldd [%src + (offset) + 0x10], %t4; \
43 ldd [%src + (offset) + 0x18], %t6; \
44 std %t0, [%dst + (offset) + 0x00]; \
45 std %t2, [%dst + (offset) + 0x08]; \
46 std %t4, [%dst + (offset) + 0x10]; \
47 std %t6, [%dst + (offset) + 0x18];
48
49#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
50 ldd [%src - (offset) - 0x10], %t0; \
51 ldd [%src - (offset) - 0x08], %t2; \
52 st %t0, [%dst - (offset) - 0x10]; \
53 st %t1, [%dst - (offset) - 0x0c]; \
54 st %t2, [%dst - (offset) - 0x08]; \
55 st %t3, [%dst - (offset) - 0x04];
56
57#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
58 ldd [%src - (offset) - 0x10], %t0; \
59 ldd [%src - (offset) - 0x08], %t2; \
60 std %t0, [%dst - (offset) - 0x10]; \
61 std %t2, [%dst - (offset) - 0x08];
62
63#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
64 ldub [%src - (offset) - 0x02], %t0; \
65 ldub [%src - (offset) - 0x01], %t1; \
66 stb %t0, [%dst - (offset) - 0x02]; \
67 stb %t1, [%dst - (offset) - 0x01];
68
69/* Both these macros have to start with exactly the same insn */
70#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
71 ldd [%src - (offset) - 0x20], %t0; \
72 ldd [%src - (offset) - 0x18], %t2; \
73 ldd [%src - (offset) - 0x10], %t4; \
74 ldd [%src - (offset) - 0x08], %t6; \
75 st %t0, [%dst - (offset) - 0x20]; \
76 st %t1, [%dst - (offset) - 0x1c]; \
77 st %t2, [%dst - (offset) - 0x18]; \
78 st %t3, [%dst - (offset) - 0x14]; \
79 st %t4, [%dst - (offset) - 0x10]; \
80 st %t5, [%dst - (offset) - 0x0c]; \
81 st %t6, [%dst - (offset) - 0x08]; \
82 st %t7, [%dst - (offset) - 0x04];
83
84#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
85 ldd [%src - (offset) - 0x20], %t0; \
86 ldd [%src - (offset) - 0x18], %t2; \
87 ldd [%src - (offset) - 0x10], %t4; \
88 ldd [%src - (offset) - 0x08], %t6; \
89 std %t0, [%dst - (offset) - 0x20]; \
90 std %t2, [%dst - (offset) - 0x18]; \
91 std %t4, [%dst - (offset) - 0x10]; \
92 std %t6, [%dst - (offset) - 0x08];
93
94#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
95 ldd [%src + (offset) + 0x00], %t0; \
96 ldd [%src + (offset) + 0x08], %t2; \
97 st %t0, [%dst + (offset) + 0x00]; \
98 st %t1, [%dst + (offset) + 0x04]; \
99 st %t2, [%dst + (offset) + 0x08]; \
100 st %t3, [%dst + (offset) + 0x0c];
101
102#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
103 ldub [%src + (offset) + 0x00], %t0; \
104 ldub [%src + (offset) + 0x01], %t1; \
105 stb %t0, [%dst + (offset) + 0x00]; \
106 stb %t1, [%dst + (offset) + 0x01];
107
108#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
109 ldd [%src + (offset) + 0x00], %t0; \
110 ldd [%src + (offset) + 0x08], %t2; \
111 srl %t0, shir, %t5; \
112 srl %t1, shir, %t6; \
113 sll %t0, shil, %t0; \
114 or %t5, %prev, %t5; \
115 sll %t1, shil, %prev; \
116 or %t6, %t0, %t0; \
117 srl %t2, shir, %t1; \
118 srl %t3, shir, %t6; \
119 sll %t2, shil, %t2; \
120 or %t1, %prev, %t1; \
121 std %t4, [%dst + (offset) + (offset2) - 0x04]; \
122 std %t0, [%dst + (offset) + (offset2) + 0x04]; \
123 sll %t3, shil, %prev; \
124 or %t6, %t2, %t4;
125
126#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
127 ldd [%src + (offset) + 0x00], %t0; \
128 ldd [%src + (offset) + 0x08], %t2; \
129 srl %t0, shir, %t4; \
130 srl %t1, shir, %t5; \
131 sll %t0, shil, %t6; \
132 or %t4, %prev, %t0; \
133 sll %t1, shil, %prev; \
134 or %t5, %t6, %t1; \
135 srl %t2, shir, %t4; \
136 srl %t3, shir, %t5; \
137 sll %t2, shil, %t6; \
138 or %t4, %prev, %t2; \
139 sll %t3, shil, %prev; \
140 or %t5, %t6, %t3; \
141 std %t0, [%dst + (offset) + (offset2) + 0x00]; \
142 std %t2, [%dst + (offset) + (offset2) + 0x08];
143
144 .text
145 .align 4
146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470:
148 retl
149 nop ! Only bcopy returns here and it retuns void...
150
151#ifdef __KERNEL__
152FUNC(amemmove)
153FUNC(__memmove)
154#endif
155FUNC(memmove)
156 cmp %o0, %o1
157 SETUP_RETL
158 bleu 9f
159 sub %o0, %o1, %o4
160
161 add %o1, %o2, %o3
162 cmp %o3, %o0
163 bleu 0f
164 andcc %o4, 3, %o5
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 add %o1, %o2, %o1
167 add %o0, %o2, %o0
168 sub %o1, 1, %o1
169 sub %o0, 1, %o0
170
1711: /* reverse_bytes */
172
173 ldub [%o1], %o4
174 subcc %o2, 1, %o2
175 stb %o4, [%o0]
176 sub %o1, 1, %o1
177 bne 1b
178 sub %o0, 1, %o0
179
180 retl
181 RETL_INSN
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/* NOTE: This code is executed just for the cases,
184 where %src (=%o1) & 3 is != 0.
185 We need to align it to 4. So, for (%src & 3)
186 1 we need to do ldub,lduh
187 2 lduh
188 3 just ldub
189 so even if it looks weird, the branches
190 are correct here. -jj
191 */
19278: /* dword_align */
193
194 andcc %o1, 1, %g0
195 be 4f
196 andcc %o1, 2, %g0
197
198 ldub [%o1], %g2
199 add %o1, 1, %o1
200 stb %g2, [%o0]
201 sub %o2, 1, %o2
202 bne 3f
203 add %o0, 1, %o0
2044:
205 lduh [%o1], %g2
206 add %o1, 2, %o1
207 sth %g2, [%o0]
208 sub %o2, 2, %o2
209 b 3f
210 add %o0, 2, %o0
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
213
214 sub %o0, %o1, %o4
215 SETUP_RETL
2169:
217 andcc %o4, 3, %o5
2180:
219 bne 86f
220 cmp %o2, 15
221
222 bleu 90f
223 andcc %o1, 3, %g0
224
225 bne 78b
2263:
227 andcc %o1, 4, %g0
228
229 be 2f
230 mov %o2, %g1
231
232 ld [%o1], %o4
233 sub %g1, 4, %g1
234 st %o4, [%o0]
235 add %o1, 4, %o1
236 add %o0, 4, %o0
2372:
238 andcc %g1, 0xffffff80, %g7
239 be 3f
240 andcc %o0, 4, %g0
241
242 be 82f + 4
2435:
244 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
245 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
246 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
247 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
248 subcc %g7, 128, %g7
249 add %o1, 128, %o1
250 bne 5b
251 add %o0, 128, %o0
2523:
253 andcc %g1, 0x70, %g7
254 be 80f
255 andcc %g1, 8, %g0
256
257 sethi %hi(80f), %o5
258 srl %g7, 1, %o4
259 add %g7, %o4, %o4
260 add %o1, %g7, %o1
261 sub %o5, %o4, %o5
262 jmpl %o5 + %lo(80f), %g0
263 add %o0, %g7, %o0
264
26579: /* memcpy_table */
266
267 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
268 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
269 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
270 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
271 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
272 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
273 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
274
27580: /* memcpy_table_end */
276 be 81f
277 andcc %g1, 4, %g0
278
279 ldd [%o1], %g2
280 add %o0, 8, %o0
281 st %g2, [%o0 - 0x08]
282 add %o1, 8, %o1
283 st %g3, [%o0 - 0x04]
284
28581: /* memcpy_last7 */
286
287 be 1f
288 andcc %g1, 2, %g0
289
290 ld [%o1], %g2
291 add %o1, 4, %o1
292 st %g2, [%o0]
293 add %o0, 4, %o0
2941:
295 be 1f
296 andcc %g1, 1, %g0
297
298 lduh [%o1], %g2
299 add %o1, 2, %o1
300 sth %g2, [%o0]
301 add %o0, 2, %o0
3021:
303 be 1f
304 nop
305
306 ldub [%o1], %g2
307 stb %g2, [%o0]
3081:
309 retl
310 RETL_INSN
311
31282: /* ldd_std */
313 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
314 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
315 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
316 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
317 subcc %g7, 128, %g7
318 add %o1, 128, %o1
319 bne 82b
320 add %o0, 128, %o0
321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 andcc %g1, 0x70, %g7
323 be 84f
324 andcc %g1, 8, %g0
325
326 sethi %hi(84f), %o5
327 add %o1, %g7, %o1
328 sub %o5, %g7, %o5
329 jmpl %o5 + %lo(84f), %g0
330 add %o0, %g7, %o0
331
33283: /* amemcpy_table */
333
334 MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
335 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
336 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
337 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
338 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
339 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
340 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
341
34284: /* amemcpy_table_end */
343 be 85f
344 andcc %g1, 4, %g0
345
346 ldd [%o1], %g2
347 add %o0, 8, %o0
348 std %g2, [%o0 - 0x08]
349 add %o1, 8, %o1
35085: /* amemcpy_last7 */
351 be 1f
352 andcc %g1, 2, %g0
353
354 ld [%o1], %g2
355 add %o1, 4, %o1
356 st %g2, [%o0]
357 add %o0, 4, %o0
3581:
359 be 1f
360 andcc %g1, 1, %g0
361
362 lduh [%o1], %g2
363 add %o1, 2, %o1
364 sth %g2, [%o0]
365 add %o0, 2, %o0
3661:
367 be 1f
368 nop
369
370 ldub [%o1], %g2
371 stb %g2, [%o0]
3721:
373 retl
374 RETL_INSN
375
Linus Torvalds1da177e2005-04-16 15:20:36 -070037686: /* non_aligned */
377 cmp %o2, 6
378 bleu 88f
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 andcc %o0, 3, %g0
380 be 61f
381 andcc %o0, 1, %g0
382 be 60f
383 andcc %o0, 2, %g0
384
385 ldub [%o1], %g5
386 add %o1, 1, %o1
387 stb %g5, [%o0]
388 sub %o2, 1, %o2
389 bne 61f
390 add %o0, 1, %o0
39160:
392 ldub [%o1], %g3
393 add %o1, 2, %o1
394 stb %g3, [%o0]
395 sub %o2, 2, %o2
396 ldub [%o1 - 1], %g3
397 add %o0, 2, %o0
398 stb %g3, [%o0 - 1]
39961:
400 and %o1, 3, %g2
401 and %o2, 0xc, %g3
402 and %o1, -4, %o1
403 cmp %g3, 4
404 sll %g2, 3, %g4
405 mov 32, %g2
406 be 4f
407 sub %g2, %g4, %g7
408
409 blu 3f
410 cmp %g3, 0x8
411
412 be 2f
413 srl %o2, 2, %g3
414
415 ld [%o1], %o3
416 add %o0, -8, %o0
417 ld [%o1 + 4], %o4
418 b 8f
419 add %g3, 1, %g3
4202:
421 ld [%o1], %o4
422 add %o0, -12, %o0
423 ld [%o1 + 4], %o5
424 add %g3, 2, %g3
425 b 9f
426 add %o1, -4, %o1
4273:
428 ld [%o1], %g1
429 add %o0, -4, %o0
430 ld [%o1 + 4], %o3
431 srl %o2, 2, %g3
432 b 7f
433 add %o1, 4, %o1
4344:
435 ld [%o1], %o5
436 cmp %o2, 7
437 ld [%o1 + 4], %g1
438 srl %o2, 2, %g3
439 bleu 10f
440 add %o1, 8, %o1
441
442 ld [%o1], %o3
443 add %g3, -1, %g3
4445:
445 sll %o5, %g4, %g2
446 srl %g1, %g7, %g5
447 or %g2, %g5, %g2
448 st %g2, [%o0]
4497:
450 ld [%o1 + 4], %o4
451 sll %g1, %g4, %g2
452 srl %o3, %g7, %g5
453 or %g2, %g5, %g2
454 st %g2, [%o0 + 4]
4558:
456 ld [%o1 + 8], %o5
457 sll %o3, %g4, %g2
458 srl %o4, %g7, %g5
459 or %g2, %g5, %g2
460 st %g2, [%o0 + 8]
4619:
462 ld [%o1 + 12], %g1
463 sll %o4, %g4, %g2
464 srl %o5, %g7, %g5
465 addcc %g3, -4, %g3
466 or %g2, %g5, %g2
467 add %o1, 16, %o1
468 st %g2, [%o0 + 12]
469 add %o0, 16, %o0
470 bne,a 5b
471 ld [%o1], %o3
47210:
473 sll %o5, %g4, %g2
474 srl %g1, %g7, %g5
475 srl %g7, 3, %g3
476 or %g2, %g5, %g2
477 sub %o1, %g3, %o1
478 andcc %o2, 2, %g0
479 st %g2, [%o0]
480 be 1f
481 andcc %o2, 1, %g0
482
483 ldub [%o1], %g2
484 add %o1, 2, %o1
485 stb %g2, [%o0 + 4]
486 add %o0, 2, %o0
487 ldub [%o1 - 1], %g2
488 stb %g2, [%o0 + 3]
4891:
490 be 1f
491 nop
492 ldub [%o1], %g2
493 stb %g2, [%o0 + 4]
4941:
495 retl
496 RETL_INSN
497
Linus Torvalds1da177e2005-04-16 15:20:36 -070049888: /* short_end */
499
500 and %o2, 0xe, %o3
50120:
502 sethi %hi(89f), %o5
503 sll %o3, 3, %o4
504 add %o0, %o3, %o0
505 sub %o5, %o4, %o5
506 add %o1, %o3, %o1
507 jmpl %o5 + %lo(89f), %g0
508 andcc %o2, 1, %g0
509
510 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
511 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
512 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
513 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
514 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
515 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
516 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
517
51889: /* short_table_end */
519
520 be 1f
521 nop
522
523 ldub [%o1], %g2
524 stb %g2, [%o0]
5251:
526 retl
527 RETL_INSN
528
52990: /* short_aligned_end */
530 bne 88b
531 andcc %o2, 8, %g0
532
533 be 1f
534 andcc %o2, 4, %g0
535
536 ld [%o1 + 0x00], %g2
537 ld [%o1 + 0x04], %g3
538 add %o1, 8, %o1
539 st %g2, [%o0 + 0x00]
540 st %g3, [%o0 + 0x04]
541 add %o0, 8, %o0
5421:
543 b 81b
544 mov %o2, %g1