Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/copypage-v6.c |
| 3 | * |
| 4 | * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/spinlock.h> |
| 12 | #include <linux/mm.h> |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 13 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/pgtable.h> |
| 16 | #include <asm/shmparam.h> |
| 17 | #include <asm/tlbflush.h> |
| 18 | #include <asm/cacheflush.h> |
Russell King | 46097c7 | 2008-08-10 18:10:19 +0100 | [diff] [blame] | 19 | #include <asm/cachetype.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Russell King | 1b2e2b7 | 2006-08-21 17:06:38 +0100 | [diff] [blame] | 21 | #include "mm.h" |
| 22 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #if SHMLBA > 16384 |
| 24 | #error FIX ME |
| 25 | #endif |
| 26 | |
| 27 | #define from_address (0xffff8000) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #define to_address (0xffffc000) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | static DEFINE_SPINLOCK(v6_lock); |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* |
| 33 | * Copy the user page. No aliasing to deal with so we can just |
| 34 | * attack the kernel's existing mapping of these pages. |
| 35 | */ |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, |
| 37 | struct page *from, unsigned long vaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | { |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 39 | void *kto, *kfrom; |
| 40 | |
| 41 | kfrom = kmap_atomic(from, KM_USER0); |
| 42 | kto = kmap_atomic(to, KM_USER1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | copy_page(kto, kfrom); |
Catalin Marinas | 115b224 | 2009-11-24 18:54:07 +0100 | [diff] [blame] | 44 | #ifdef CONFIG_HIGHMEM |
| 45 | /* |
| 46 | * kmap_atomic() doesn't set the page virtual address, and |
| 47 | * kunmap_atomic() takes care of cache flushing already. |
| 48 | */ |
| 49 | if (page_address(to) != NULL) |
| 50 | #endif |
Anand Gadiyar | 2395d66 | 2009-12-18 12:56:10 +0100 | [diff] [blame] | 51 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 52 | kunmap_atomic(kto, KM_USER1); |
| 53 | kunmap_atomic(kfrom, KM_USER0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | /* |
| 57 | * Clear the user page. No aliasing to deal with so we can just |
| 58 | * attack the kernel's existing mapping of this page. |
| 59 | */ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 60 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | { |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 62 | void *kaddr = kmap_atomic(page, KM_USER0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | clear_page(kaddr); |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 64 | kunmap_atomic(kaddr, KM_USER0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } |
| 66 | |
| 67 | /* |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 68 | * Discard data in the kernel mapping for the new page. |
| 69 | * FIXME: needs this MCRR to be supported. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | */ |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 71 | static void discard_old_kernel_data(void *kto) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" |
| 74 | : |
| 75 | : "r" (kto), |
| 76 | "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) |
| 77 | : "cc"); |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | /* |
| 81 | * Copy the page, taking account of the cache colour. |
| 82 | */ |
| 83 | static void v6_copy_user_highpage_aliasing(struct page *to, |
| 84 | struct page *from, unsigned long vaddr) |
| 85 | { |
| 86 | unsigned int offset = CACHE_COLOUR(vaddr); |
| 87 | unsigned long kfrom, kto; |
| 88 | |
| 89 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) |
| 90 | __flush_dcache_page(page_mapping(from), from); |
| 91 | |
| 92 | /* FIXME: not highmem safe */ |
| 93 | discard_old_kernel_data(page_address(to)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
| 95 | /* |
| 96 | * Now copy the page using the same cache colour as the |
| 97 | * pages ultimate destination. |
| 98 | */ |
| 99 | spin_lock(&v6_lock); |
| 100 | |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 101 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); |
| 102 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 104 | kfrom = from_address + (offset << PAGE_SHIFT); |
| 105 | kto = to_address + (offset << PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 107 | flush_tlb_kernel_page(kfrom); |
| 108 | flush_tlb_kernel_page(kto); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 110 | copy_page((void *)kto, (void *)kfrom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
| 112 | spin_unlock(&v6_lock); |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * Clear the user page. We need to deal with the aliasing issues, |
| 117 | * so remap the kernel page into the same cache colour as the user |
| 118 | * page. |
| 119 | */ |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 120 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { |
Russell King | b8a9b66 | 2005-06-20 11:31:09 +0100 | [diff] [blame] | 122 | unsigned int offset = CACHE_COLOUR(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | unsigned long to = to_address + (offset << PAGE_SHIFT); |
| 124 | |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 125 | /* FIXME: not highmem safe */ |
| 126 | discard_old_kernel_data(page_address(page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
| 128 | /* |
| 129 | * Now clear the page using the same cache colour as |
| 130 | * the pages ultimate destination. |
| 131 | */ |
| 132 | spin_lock(&v6_lock); |
| 133 | |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 134 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | flush_tlb_kernel_page(to); |
| 136 | clear_page((void *)to); |
| 137 | |
| 138 | spin_unlock(&v6_lock); |
| 139 | } |
| 140 | |
| 141 | struct cpu_user_fns v6_user_fns __initdata = { |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 142 | .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 143 | .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | }; |
| 145 | |
| 146 | static int __init v6_userpage_init(void) |
| 147 | { |
| 148 | if (cache_is_vipt_aliasing()) { |
Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 149 | cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; |
Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 150 | cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | return 0; |
| 154 | } |
| 155 | |
Russell King | 08ee4e4 | 2005-05-10 17:30:47 +0100 | [diff] [blame] | 156 | core_initcall(v6_userpage_init); |