Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/lib/copypage.S |
| 3 | * |
| 4 | * Copyright (C) 1995-1999 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * ASM optimised string functions |
| 11 | */ |
| 12 | #include <linux/linkage.h> |
| 13 | #include <linux/init.h> |
Sam Ravnborg | e6ae744 | 2005-09-09 21:08:59 +0200 | [diff] [blame] | 14 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
| 16 | .text |
| 17 | .align 5 |
| 18 | /* |
| 19 | * ARMv4 optimised copy_user_page |
| 20 | * |
| 21 | * We flush the destination cache lines just before we write the data into the |
| 22 | * corresponding address. Since the Dcache is read-allocate, this removes the |
| 23 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, |
| 24 | * and merged as appropriate. |
| 25 | * |
| 26 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" |
| 27 | * instruction. If your processor does not supply this, you have to write your |
| 28 | * own copy_user_page that does the right thing. |
| 29 | */ |
| 30 | ENTRY(v4wb_copy_user_page) |
| 31 | stmfd sp!, {r4, lr} @ 2 |
| 32 | mov r2, #PAGE_SZ/64 @ 1 |
| 33 | ldmia r1!, {r3, r4, ip, lr} @ 4 |
| 34 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line |
| 35 | stmia r0!, {r3, r4, ip, lr} @ 4 |
| 36 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 |
| 37 | stmia r0!, {r3, r4, ip, lr} @ 4 |
| 38 | ldmia r1!, {r3, r4, ip, lr} @ 4 |
| 39 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line |
| 40 | stmia r0!, {r3, r4, ip, lr} @ 4 |
| 41 | ldmia r1!, {r3, r4, ip, lr} @ 4 |
| 42 | subs r2, r2, #1 @ 1 |
| 43 | stmia r0!, {r3, r4, ip, lr} @ 4 |
| 44 | ldmneia r1!, {r3, r4, ip, lr} @ 4 |
| 45 | bne 1b @ 1 |
| 46 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB |
| 47 | ldmfd sp!, {r4, pc} @ 3 |
| 48 | |
| 49 | .align 5 |
| 50 | /* |
| 51 | * ARMv4 optimised clear_user_page |
| 52 | * |
| 53 | * Same story as above. |
| 54 | */ |
| 55 | ENTRY(v4wb_clear_user_page) |
| 56 | str lr, [sp, #-4]! |
| 57 | mov r1, #PAGE_SZ/64 @ 1 |
| 58 | mov r2, #0 @ 1 |
| 59 | mov r3, #0 @ 1 |
| 60 | mov ip, #0 @ 1 |
| 61 | mov lr, #0 @ 1 |
| 62 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line |
| 63 | stmia r0!, {r2, r3, ip, lr} @ 4 |
| 64 | stmia r0!, {r2, r3, ip, lr} @ 4 |
| 65 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line |
| 66 | stmia r0!, {r2, r3, ip, lr} @ 4 |
| 67 | stmia r0!, {r2, r3, ip, lr} @ 4 |
| 68 | subs r1, r1, #1 @ 1 |
| 69 | bne 1b @ 1 |
| 70 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB |
| 71 | ldr pc, [sp], #4 |
| 72 | |
| 73 | __INITDATA |
| 74 | |
| 75 | .type v4wb_user_fns, #object |
| 76 | ENTRY(v4wb_user_fns) |
| 77 | .long v4wb_clear_user_page |
| 78 | .long v4wb_copy_user_page |
| 79 | .size v4wb_user_fns, . - v4wb_user_fns |