blob: 0fa1319273dead26070b178751828f025d661561 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/copypage-v6.c
3 *
4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
Russell King063b0a42008-10-31 15:08:35 +000013#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/pgtable.h>
16#include <asm/shmparam.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010019#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Russell King1b2e2b72006-08-21 17:06:38 +010021#include "mm.h"
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#if SHMLBA > 16384
24#error FIX ME
25#endif
26
27#define from_address (0xffff8000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#define to_address (0xffffc000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Linus Torvalds1da177e2005-04-16 15:20:36 -070030static DEFINE_SPINLOCK(v6_lock);
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/*
33 * Copy the user page. No aliasing to deal with so we can just
34 * attack the kernel's existing mapping of these pages.
35 */
Russell King063b0a42008-10-31 15:08:35 +000036static void v6_copy_user_highpage_nonaliasing(struct page *to,
37 struct page *from, unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Russell King063b0a42008-10-31 15:08:35 +000039 void *kto, *kfrom;
40
41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 copy_page(kto, kfrom);
Catalin Marinas115b2242009-11-24 18:54:07 +010044#ifdef CONFIG_HIGHMEM
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
Anand Gadiyar2395d662009-12-18 12:56:10 +010051 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
Russell King063b0a42008-10-31 15:08:35 +000052 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054}
55
56/*
57 * Clear the user page. No aliasing to deal with so we can just
58 * attack the kernel's existing mapping of this page.
59 */
Russell King303c6442008-10-31 16:32:19 +000060static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Russell King303c6442008-10-31 16:32:19 +000062 void *kaddr = kmap_atomic(page, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 clear_page(kaddr);
Russell King303c6442008-10-31 16:32:19 +000064 kunmap_atomic(kaddr, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065}
66
67/*
Russell King063b0a42008-10-31 15:08:35 +000068 * Discard data in the kernel mapping for the new page.
69 * FIXME: needs this MCRR to be supported.
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 */
Russell King063b0a42008-10-31 15:08:35 +000071static void discard_old_kernel_data(void *kto)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
74 :
75 : "r" (kto),
76 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
77 : "cc");
Russell King063b0a42008-10-31 15:08:35 +000078}
79
80/*
81 * Copy the page, taking account of the cache colour.
82 */
83static void v6_copy_user_highpage_aliasing(struct page *to,
84 struct page *from, unsigned long vaddr)
85{
86 unsigned int offset = CACHE_COLOUR(vaddr);
87 unsigned long kfrom, kto;
88
89 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
90 __flush_dcache_page(page_mapping(from), from);
91
92 /* FIXME: not highmem safe */
93 discard_old_kernel_data(page_address(to));
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95 /*
96 * Now copy the page using the same cache colour as the
97 * pages ultimate destination.
98 */
99 spin_lock(&v6_lock);
100
Russell King063b0a42008-10-31 15:08:35 +0000101 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
102 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Russell King063b0a42008-10-31 15:08:35 +0000104 kfrom = from_address + (offset << PAGE_SHIFT);
105 kto = to_address + (offset << PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Russell King063b0a42008-10-31 15:08:35 +0000107 flush_tlb_kernel_page(kfrom);
108 flush_tlb_kernel_page(kto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Russell King063b0a42008-10-31 15:08:35 +0000110 copy_page((void *)kto, (void *)kfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112 spin_unlock(&v6_lock);
113}
114
115/*
116 * Clear the user page. We need to deal with the aliasing issues,
117 * so remap the kernel page into the same cache colour as the user
118 * page.
119 */
Russell King303c6442008-10-31 16:32:19 +0000120static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121{
Russell Kingb8a9b662005-06-20 11:31:09 +0100122 unsigned int offset = CACHE_COLOUR(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 unsigned long to = to_address + (offset << PAGE_SHIFT);
124
Russell King303c6442008-10-31 16:32:19 +0000125 /* FIXME: not highmem safe */
126 discard_old_kernel_data(page_address(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /*
129 * Now clear the page using the same cache colour as
130 * the pages ultimate destination.
131 */
132 spin_lock(&v6_lock);
133
Russell King303c6442008-10-31 16:32:19 +0000134 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 flush_tlb_kernel_page(to);
136 clear_page((void *)to);
137
138 spin_unlock(&v6_lock);
139}
140
141struct cpu_user_fns v6_user_fns __initdata = {
Russell King303c6442008-10-31 16:32:19 +0000142 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
Russell King063b0a42008-10-31 15:08:35 +0000143 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144};
145
146static int __init v6_userpage_init(void)
147{
148 if (cache_is_vipt_aliasing()) {
Russell King303c6442008-10-31 16:32:19 +0000149 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
Russell King063b0a42008-10-31 15:08:35 +0000150 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 }
152
153 return 0;
154}
155
Russell King08ee4e42005-05-10 17:30:47 +0100156core_initcall(v6_userpage_init);