Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/lib/copypage-armv4mc.S |
| 3 | * |
| 4 | * Copyright (C) 1995-2005 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This handles the mini data cache, as found on SA11x0 and XScale |
| 11 | * processors. When we copy a user page page, we map it in such a way |
| 12 | * that accesses to this page will not touch the main data cache, but |
| 13 | * will be cached in the mini data cache. This prevents us thrashing |
| 14 | * the main data cache on page faults. |
| 15 | */ |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/mm.h> |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 18 | #include <linux/highmem.h> |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 19 | |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/tlbflush.h> |
Richard Purdie | 1c9d3df | 2006-12-30 16:08:50 +0100 | [diff] [blame] | 22 | #include <asm/cacheflush.h> |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 23 | |
Russell King | 1b2e2b7 | 2006-08-21 17:06:38 +0100 | [diff] [blame] | 24 | #include "mm.h" |
| 25 | |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 26 | /* |
| 27 | * 0xffff8000 to 0xffffffff is reserved for any ARM architecture |
| 28 | * specific hacks for copying pages efficiently. |
| 29 | */ |
| 30 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 31 | L_PTE_MT_MINICACHE) |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 32 | |
Thomas Gleixner | bd31b85 | 2009-07-03 08:44:46 -0500 | [diff] [blame] | 33 | static DEFINE_RAW_SPINLOCK(minicache_lock); |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 34 | |
| 35 | /* |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 36 | * ARMv4 mini-dcache optimised copy_user_highpage |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 37 | * |
| 38 | * We flush the destination cache lines just before we write the data into the |
| 39 | * corresponding address. Since the Dcache is read-allocate, this removes the |
| 40 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, |
| 41 | * and merged as appropriate. |
| 42 | * |
| 43 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" |
| 44 | * instruction. If your processor does not supply this, you have to write your |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 45 | * own copy_user_highpage that does the right thing. |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 46 | */ |
Uwe Kleine-König | 446c92b | 2009-03-12 18:03:16 +0100 | [diff] [blame] | 47 | static void __naked |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 48 | mc_copy_user_page(void *from, void *to) |
| 49 | { |
| 50 | asm volatile( |
| 51 | "stmfd sp!, {r4, lr} @ 2\n\ |
| 52 | mov r4, %2 @ 1\n\ |
| 53 | ldmia %0!, {r2, r3, ip, lr} @ 4\n\ |
| 54 | 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ |
| 55 | stmia %1!, {r2, r3, ip, lr} @ 4\n\ |
| 56 | ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\ |
| 57 | stmia %1!, {r2, r3, ip, lr} @ 4\n\ |
| 58 | ldmia %0!, {r2, r3, ip, lr} @ 4\n\ |
| 59 | mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ |
| 60 | stmia %1!, {r2, r3, ip, lr} @ 4\n\ |
| 61 | ldmia %0!, {r2, r3, ip, lr} @ 4\n\ |
| 62 | subs r4, r4, #1 @ 1\n\ |
| 63 | stmia %1!, {r2, r3, ip, lr} @ 4\n\ |
| 64 | ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ |
| 65 | bne 1b @ 1\n\ |
| 66 | ldmfd sp!, {r4, pc} @ 3" |
| 67 | : |
| 68 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); |
| 69 | } |
| 70 | |
Russell King | 7dd8c4f | 2009-01-18 16:24:19 +0000 | [diff] [blame] | 71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, |
Russell King | f00a75c | 2009-10-05 15:17:45 +0100 | [diff] [blame] | 72 | unsigned long vaddr, struct vm_area_struct *vma) |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 73 | { |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 74 | void *kto = kmap_atomic(to, KM_USER1); |
Richard Purdie | 1c9d3df | 2006-12-30 16:08:50 +0100 | [diff] [blame] | 75 | |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 77 | __flush_dcache_page(page_mapping(from), from); |
Richard Purdie | 1c9d3df | 2006-12-30 16:08:50 +0100 | [diff] [blame] | 78 | |
Thomas Gleixner | bd31b85 | 2009-07-03 08:44:46 -0500 | [diff] [blame] | 79 | raw_spin_lock(&minicache_lock); |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 80 | |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
Russell King | 8711a1b | 2005-05-16 23:36:22 +0100 | [diff] [blame] | 82 | flush_tlb_kernel_page(0xffff8000); |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 83 | |
Russell King | 8711a1b | 2005-05-16 23:36:22 +0100 | [diff] [blame] | 84 | mc_copy_user_page((void *)0xffff8000, kto); |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 85 | |
Thomas Gleixner | bd31b85 | 2009-07-03 08:44:46 -0500 | [diff] [blame] | 86 | raw_spin_unlock(&minicache_lock); |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 87 | |
| 88 | kunmap_atomic(kto, KM_USER1); |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | /* |
| 92 | * ARMv4 optimised clear_user_page |
| 93 | */ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 95 | { |
Nicolas Pitre | 43ae286 | 2008-11-04 02:42:27 -0500 | [diff] [blame] | 96 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 97 | asm volatile("\ |
Nicolas Pitre | 43ae286 | 2008-11-04 02:42:27 -0500 | [diff] [blame] | 98 | mov r1, %2 @ 1\n\ |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 99 | mov r2, #0 @ 1\n\ |
| 100 | mov r3, #0 @ 1\n\ |
| 101 | mov ip, #0 @ 1\n\ |
| 102 | mov lr, #0 @ 1\n\ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 103 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
| 104 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
| 105 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
| 106 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
| 107 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
| 108 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 109 | subs r1, r1, #1 @ 1\n\ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 110 | bne 1b @ 1" |
Nicolas Pitre | 43ae286 | 2008-11-04 02:42:27 -0500 | [diff] [blame] | 111 | : "=r" (ptr) |
| 112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 113 | : "r1", "r2", "r3", "ip", "lr"); |
| 114 | kunmap_atomic(kaddr, KM_USER0); |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | struct cpu_user_fns v4_mc_user_fns __initdata = { |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 118 | .cpu_clear_user_highpage = v4_mc_clear_user_highpage, |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 119 | .cpu_copy_user_highpage = v4_mc_copy_user_highpage, |
Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 120 | }; |