blob: c9480b48c7464546efa9f5401c1e994e80f849f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundt0dfae7d2009-07-27 21:30:17 +09002 * arch/sh/mm/pg-mmu.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundtdfff0fa2009-07-27 20:53:22 +09005 * Copyright (C) 2002 - 2009 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Paul Mundtacca4f42008-11-10 20:00:45 +090010#include <linux/init.h>
Paul Mundt52e27782006-11-21 11:09:41 +090011#include <linux/mutex.h>
Paul Mundte06c4e52007-07-31 13:01:43 +090012#include <linux/fs.h>
Paul Mundt7747b9a2007-11-05 16:12:32 +090013#include <linux/highmem.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/mmu_context.h>
16#include <asm/cacheflush.h>
17
Paul Mundt37443ef2009-08-15 12:29:49 +090018void (*flush_cache_all)(void);
19void (*flush_cache_mm)(struct mm_struct *mm);
20void (*flush_cache_dup_mm)(struct mm_struct *mm);
21void (*flush_cache_page)(struct vm_area_struct *vma,
22 unsigned long addr, unsigned long pfn);
23void (*flush_cache_range)(struct vm_area_struct *vma,
24 unsigned long start, unsigned long end);
25void (*flush_dcache_page)(struct page *page);
26void (*flush_icache_range)(unsigned long start, unsigned long end);
27void (*flush_icache_page)(struct vm_area_struct *vma,
28 struct page *page);
29void (*flush_cache_sigtramp)(unsigned long address);
30void (*__flush_wback_region)(void *start, int size);
31void (*__flush_purge_region)(void *start, int size);
32void (*__flush_invalidate_region)(void *start, int size);
33
34static inline void noop_flush_cache_all(void)
35{
36}
37
38static inline void noop_flush_cache_mm(struct mm_struct *mm)
39{
40}
41
42static inline void noop_flush_cache_page(struct vm_area_struct *vma,
43 unsigned long addr, unsigned long pfn)
44{
45}
46
47static inline void noop_flush_cache_range(struct vm_area_struct *vma,
48 unsigned long start, unsigned long end)
49{
50}
51
52static inline void noop_flush_dcache_page(struct page *page)
53{
54}
55
56static inline void noop_flush_icache_range(unsigned long start,
57 unsigned long end)
58{
59}
60
61static inline void noop_flush_icache_page(struct vm_area_struct *vma,
62 struct page *page)
63{
64}
65
66static inline void noop_flush_cache_sigtramp(unsigned long address)
67{
68}
69
70static inline void noop__flush_region(void *start, int size)
71{
72}
73
Paul Mundtba1789e2007-11-05 16:18:16 +090074void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
75 unsigned long vaddr, void *dst, const void *src,
76 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Paul Mundt0dfae7d2009-07-27 21:30:17 +090078 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
79 !test_bit(PG_dcache_dirty, &page->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +090080 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
81 memcpy(vto, src, len);
Paul Mundtb5eb10a2009-08-04 16:00:36 +090082 kunmap_coherent();
Paul Mundt2277ab42009-07-22 19:20:49 +090083 } else {
84 memcpy(dst, src, len);
Paul Mundt0dfae7d2009-07-27 21:30:17 +090085 if (boot_cpu_data.dcache.n_aliases)
86 set_bit(PG_dcache_dirty, &page->flags);
Paul Mundt2277ab42009-07-22 19:20:49 +090087 }
Paul Mundtba1789e2007-11-05 16:18:16 +090088
89 if (vma->vm_flags & VM_EXEC)
90 flush_cache_page(vma, vaddr, page_to_pfn(page));
91}
92
93void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
94 unsigned long vaddr, void *dst, const void *src,
95 unsigned long len)
96{
Paul Mundt0dfae7d2009-07-27 21:30:17 +090097 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
98 !test_bit(PG_dcache_dirty, &page->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +090099 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
100 memcpy(dst, vfrom, len);
Paul Mundtb5eb10a2009-08-04 16:00:36 +0900101 kunmap_coherent();
Paul Mundt2277ab42009-07-22 19:20:49 +0900102 } else {
103 memcpy(dst, src, len);
Paul Mundt0dfae7d2009-07-27 21:30:17 +0900104 if (boot_cpu_data.dcache.n_aliases)
105 set_bit(PG_dcache_dirty, &page->flags);
Paul Mundt2277ab42009-07-22 19:20:49 +0900106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
Paul Mundt39e688a2007-03-05 19:46:47 +0900108
Paul Mundt7747b9a2007-11-05 16:12:32 +0900109void copy_user_highpage(struct page *to, struct page *from,
110 unsigned long vaddr, struct vm_area_struct *vma)
111{
112 void *vfrom, *vto;
113
Paul Mundt7747b9a2007-11-05 16:12:32 +0900114 vto = kmap_atomic(to, KM_USER1);
Paul Mundt7747b9a2007-11-05 16:12:32 +0900115
Paul Mundt0dfae7d2009-07-27 21:30:17 +0900116 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
117 !test_bit(PG_dcache_dirty, &from->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +0900118 vfrom = kmap_coherent(from, vaddr);
119 copy_page(vto, vfrom);
Paul Mundtb5eb10a2009-08-04 16:00:36 +0900120 kunmap_coherent();
Paul Mundt2277ab42009-07-22 19:20:49 +0900121 } else {
122 vfrom = kmap_atomic(from, KM_USER0);
123 copy_page(vto, vfrom);
124 kunmap_atomic(vfrom, KM_USER0);
125 }
126
127 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
Paul Mundt7747b9a2007-11-05 16:12:32 +0900128 __flush_wback_region(vto, PAGE_SIZE);
129
130 kunmap_atomic(vto, KM_USER1);
131 /* Make sure this page is cleared on other CPU's too before using it */
132 smp_wmb();
133}
134EXPORT_SYMBOL(copy_user_highpage);
Paul Mundtdfff0fa2009-07-27 20:53:22 +0900135
136void clear_user_highpage(struct page *page, unsigned long vaddr)
137{
138 void *kaddr = kmap_atomic(page, KM_USER0);
139
140 clear_page(kaddr);
141
142 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
143 __flush_wback_region(kaddr, PAGE_SIZE);
144
145 kunmap_atomic(kaddr, KM_USER0);
146}
147EXPORT_SYMBOL(clear_user_highpage);
Paul Mundt9cef7492009-07-29 00:12:17 +0900148
149void __update_cache(struct vm_area_struct *vma,
150 unsigned long address, pte_t pte)
151{
152 struct page *page;
153 unsigned long pfn = pte_pfn(pte);
154
155 if (!boot_cpu_data.dcache.n_aliases)
156 return;
157
158 page = pfn_to_page(pfn);
159 if (pfn_valid(pfn) && page_mapping(page)) {
160 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
161 if (dirty) {
162 unsigned long addr = (unsigned long)page_address(page);
163
164 if (pages_do_alias(addr, address & PAGE_MASK))
165 __flush_wback_region((void *)addr, PAGE_SIZE);
166 }
167 }
168}
Paul Mundtc0fe4782009-08-04 16:02:43 +0900169
170void __flush_anon_page(struct page *page, unsigned long vmaddr)
171{
172 unsigned long addr = (unsigned long) page_address(page);
173
174 if (pages_do_alias(addr, vmaddr)) {
175 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
176 !test_bit(PG_dcache_dirty, &page->flags)) {
177 void *kaddr;
178
179 kaddr = kmap_coherent(page, vmaddr);
180 __flush_wback_region((void *)kaddr, PAGE_SIZE);
181 kunmap_coherent();
182 } else
183 __flush_wback_region((void *)addr, PAGE_SIZE);
184 }
185}
Paul Mundtecba1062009-08-15 11:05:42 +0900186
Paul Mundt27d59ec2009-08-15 11:11:16 +0900187static void compute_alias(struct cache_info *c)
188{
189 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
190 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
191}
192
193static void __init emit_cache_params(void)
194{
195 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
196 boot_cpu_data.icache.ways,
197 boot_cpu_data.icache.sets,
198 boot_cpu_data.icache.way_incr);
199 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
200 boot_cpu_data.icache.entry_mask,
201 boot_cpu_data.icache.alias_mask,
202 boot_cpu_data.icache.n_aliases);
203 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
204 boot_cpu_data.dcache.ways,
205 boot_cpu_data.dcache.sets,
206 boot_cpu_data.dcache.way_incr);
207 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
208 boot_cpu_data.dcache.entry_mask,
209 boot_cpu_data.dcache.alias_mask,
210 boot_cpu_data.dcache.n_aliases);
211
212 /*
213 * Emit Secondary Cache parameters if the CPU has a probed L2.
214 */
215 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
216 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
217 boot_cpu_data.scache.ways,
218 boot_cpu_data.scache.sets,
219 boot_cpu_data.scache.way_incr);
220 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
221 boot_cpu_data.scache.entry_mask,
222 boot_cpu_data.scache.alias_mask,
223 boot_cpu_data.scache.n_aliases);
224 }
225}
226
Paul Mundtecba1062009-08-15 11:05:42 +0900227void __init cpu_cache_init(void)
228{
Paul Mundt27d59ec2009-08-15 11:11:16 +0900229 compute_alias(&boot_cpu_data.icache);
230 compute_alias(&boot_cpu_data.dcache);
231 compute_alias(&boot_cpu_data.scache);
232
Paul Mundt37443ef2009-08-15 12:29:49 +0900233 flush_cache_all = noop_flush_cache_all;
234 flush_cache_mm = noop_flush_cache_mm;
235 flush_cache_dup_mm = noop_flush_cache_mm;
236 flush_cache_page = noop_flush_cache_page;
237 flush_cache_range = noop_flush_cache_range;
238 flush_dcache_page = noop_flush_dcache_page;
239 flush_icache_range = noop_flush_icache_range;
240 flush_icache_page = noop_flush_icache_page;
241 flush_cache_sigtramp = noop_flush_cache_sigtramp;
242
243 __flush_wback_region = noop__flush_region;
244 __flush_purge_region = noop__flush_region;
245 __flush_invalidate_region = noop__flush_region;
246
Paul Mundt109b44a2009-08-15 12:35:15 +0900247 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
248 extern void __weak sh2_cache_init(void);
249
250 sh2_cache_init();
251 }
252
Paul Mundta58e1a22009-08-15 12:38:29 +0900253 if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
254 extern void __weak sh2a_cache_init(void);
255
256 sh2a_cache_init();
257 }
258
Paul Mundt79f1c9d2009-08-15 12:42:55 +0900259 if (boot_cpu_data.family == CPU_FAMILY_SH3) {
260 extern void __weak sh3_cache_init(void);
261
262 sh3_cache_init();
263 }
264
Paul Mundtecba1062009-08-15 11:05:42 +0900265 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
266 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
267 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
268 extern void __weak sh4_cache_init(void);
269
270 sh4_cache_init();
271 }
Paul Mundt27d59ec2009-08-15 11:11:16 +0900272
273 emit_cache_params();
Paul Mundtecba1062009-08-15 11:05:42 +0900274}