blob: 616966a96cba61d6a680d4bc57cf71150a04fb3e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundtf26b2a52009-08-21 17:23:14 +09002 * arch/sh/mm/cache.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundta6198a22010-01-15 14:21:37 +09005 * Copyright (C) 2002 - 2010 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Paul Mundtacca4f42008-11-10 20:00:45 +090010#include <linux/init.h>
Paul Mundt52e27782006-11-21 11:09:41 +090011#include <linux/mutex.h>
Paul Mundte06c4e52007-07-31 13:01:43 +090012#include <linux/fs.h>
Paul Mundtf26b2a52009-08-21 17:23:14 +090013#include <linux/smp.h>
Paul Mundt7747b9a2007-11-05 16:12:32 +090014#include <linux/highmem.h>
15#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/mmu_context.h>
17#include <asm/cacheflush.h>
18
Paul Mundtf26b2a52009-08-21 17:23:14 +090019void (*local_flush_cache_all)(void *args) = cache_noop;
20void (*local_flush_cache_mm)(void *args) = cache_noop;
21void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22void (*local_flush_cache_page)(void *args) = cache_noop;
23void (*local_flush_cache_range)(void *args) = cache_noop;
24void (*local_flush_dcache_page)(void *args) = cache_noop;
25void (*local_flush_icache_range)(void *args) = cache_noop;
26void (*local_flush_icache_page)(void *args) = cache_noop;
27void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28
Paul Mundt37443ef2009-08-15 12:29:49 +090029void (*__flush_wback_region)(void *start, int size);
Paul Mundt0a993b02009-10-27 10:51:35 +090030EXPORT_SYMBOL(__flush_wback_region);
Paul Mundt37443ef2009-08-15 12:29:49 +090031void (*__flush_purge_region)(void *start, int size);
Paul Mundt0a993b02009-10-27 10:51:35 +090032EXPORT_SYMBOL(__flush_purge_region);
Paul Mundt37443ef2009-08-15 12:29:49 +090033void (*__flush_invalidate_region)(void *start, int size);
Paul Mundt0a993b02009-10-27 10:51:35 +090034EXPORT_SYMBOL(__flush_invalidate_region);
Paul Mundt37443ef2009-08-15 12:29:49 +090035
Paul Mundt37443ef2009-08-15 12:29:49 +090036static inline void noop__flush_region(void *start, int size)
37{
38}
39
Paul Mundt6f379572009-09-01 21:21:36 +090040static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41 int wait)
42{
43 preempt_disable();
Paul Mundta6198a22010-01-15 14:21:37 +090044
45 /*
46 * It's possible that this gets called early on when IRQs are
47 * still disabled due to ioremapping by the boot CPU, so don't
48 * even attempt IPIs unless there are other CPUs online.
49 */
50 if (num_online_cpus() > 1)
51 smp_call_function(func, info, wait);
52
Paul Mundt6f379572009-09-01 21:21:36 +090053 func(info);
Paul Mundta6198a22010-01-15 14:21:37 +090054
Paul Mundt6f379572009-09-01 21:21:36 +090055 preempt_enable();
56}
57
Paul Mundtba1789e2007-11-05 16:18:16 +090058void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
59 unsigned long vaddr, void *dst, const void *src,
60 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Paul Mundt0dfae7d2009-07-27 21:30:17 +090062 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
Paul Mundt55661fc2010-12-01 15:39:51 +090063 test_bit(PG_dcache_clean, &page->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +090064 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65 memcpy(vto, src, len);
Paul Mundt0906a3a2009-09-03 17:21:10 +090066 kunmap_coherent(vto);
Paul Mundt2277ab42009-07-22 19:20:49 +090067 } else {
68 memcpy(dst, src, len);
Paul Mundt0dfae7d2009-07-27 21:30:17 +090069 if (boot_cpu_data.dcache.n_aliases)
Paul Mundt55661fc2010-12-01 15:39:51 +090070 clear_bit(PG_dcache_clean, &page->flags);
Paul Mundt2277ab42009-07-22 19:20:49 +090071 }
Paul Mundtba1789e2007-11-05 16:18:16 +090072
73 if (vma->vm_flags & VM_EXEC)
74 flush_cache_page(vma, vaddr, page_to_pfn(page));
75}
76
77void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78 unsigned long vaddr, void *dst, const void *src,
79 unsigned long len)
80{
Paul Mundt0dfae7d2009-07-27 21:30:17 +090081 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
Paul Mundt55661fc2010-12-01 15:39:51 +090082 test_bit(PG_dcache_clean, &page->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +090083 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 memcpy(dst, vfrom, len);
Paul Mundt0906a3a2009-09-03 17:21:10 +090085 kunmap_coherent(vfrom);
Paul Mundt2277ab42009-07-22 19:20:49 +090086 } else {
87 memcpy(dst, src, len);
Paul Mundt0dfae7d2009-07-27 21:30:17 +090088 if (boot_cpu_data.dcache.n_aliases)
Paul Mundt55661fc2010-12-01 15:39:51 +090089 clear_bit(PG_dcache_clean, &page->flags);
Paul Mundt2277ab42009-07-22 19:20:49 +090090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
Paul Mundt39e688a2007-03-05 19:46:47 +090092
Paul Mundt7747b9a2007-11-05 16:12:32 +090093void copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr, struct vm_area_struct *vma)
95{
96 void *vfrom, *vto;
97
Cong Wangbc3e11b2011-11-25 23:14:16 +080098 vto = kmap_atomic(to);
Paul Mundt7747b9a2007-11-05 16:12:32 +090099
Paul Mundt0dfae7d2009-07-27 21:30:17 +0900100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
Paul Mundt55661fc2010-12-01 15:39:51 +0900101 test_bit(PG_dcache_clean, &from->flags)) {
Paul Mundt7e01c942009-12-04 15:14:52 +0900102 vfrom = kmap_coherent(from, vaddr);
Paul Mundt2277ab42009-07-22 19:20:49 +0900103 copy_page(vto, vfrom);
Paul Mundt7e01c942009-12-04 15:14:52 +0900104 kunmap_coherent(vfrom);
105 } else {
Cong Wangbc3e11b2011-11-25 23:14:16 +0800106 vfrom = kmap_atomic(from);
Paul Mundt7e01c942009-12-04 15:14:52 +0900107 copy_page(vto, vfrom);
Cong Wangbc3e11b2011-11-25 23:14:16 +0800108 kunmap_atomic(vfrom);
Paul Mundt7e01c942009-12-04 15:14:52 +0900109 }
Paul Mundt2277ab42009-07-22 19:20:49 +0900110
Stuart Menefya25bbe12011-01-31 17:50:29 +0000111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112 (vma->vm_flags & VM_EXEC))
Paul Mundt7e01c942009-12-04 15:14:52 +0900113 __flush_purge_region(vto, PAGE_SIZE);
114
Cong Wangbc3e11b2011-11-25 23:14:16 +0800115 kunmap_atomic(vto);
Paul Mundt7747b9a2007-11-05 16:12:32 +0900116 /* Make sure this page is cleared on other CPU's too before using it */
117 smp_wmb();
118}
119EXPORT_SYMBOL(copy_user_highpage);
Paul Mundtdfff0fa2009-07-27 20:53:22 +0900120
121void clear_user_highpage(struct page *page, unsigned long vaddr)
122{
Cong Wangbc3e11b2011-11-25 23:14:16 +0800123 void *kaddr = kmap_atomic(page);
Paul Mundtdfff0fa2009-07-27 20:53:22 +0900124
Paul Mundt7e01c942009-12-04 15:14:52 +0900125 clear_page(kaddr);
Paul Mundtdfff0fa2009-07-27 20:53:22 +0900126
Paul Mundt7e01c942009-12-04 15:14:52 +0900127 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
128 __flush_purge_region(kaddr, PAGE_SIZE);
Paul Mundtdfff0fa2009-07-27 20:53:22 +0900129
Cong Wangbc3e11b2011-11-25 23:14:16 +0800130 kunmap_atomic(kaddr);
Paul Mundtdfff0fa2009-07-27 20:53:22 +0900131}
132EXPORT_SYMBOL(clear_user_highpage);
Paul Mundt9cef7492009-07-29 00:12:17 +0900133
134void __update_cache(struct vm_area_struct *vma,
135 unsigned long address, pte_t pte)
136{
137 struct page *page;
138 unsigned long pfn = pte_pfn(pte);
139
140 if (!boot_cpu_data.dcache.n_aliases)
141 return;
142
143 page = pfn_to_page(pfn);
Paul Mundt964f7e52009-10-13 11:18:34 +0900144 if (pfn_valid(pfn)) {
Paul Mundt55661fc2010-12-01 15:39:51 +0900145 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
Markus Pietrek76382b52009-12-24 15:12:02 +0900146 if (dirty)
147 __flush_purge_region(page_address(page), PAGE_SIZE);
Paul Mundt9cef7492009-07-29 00:12:17 +0900148 }
149}
Paul Mundtc0fe4782009-08-04 16:02:43 +0900150
151void __flush_anon_page(struct page *page, unsigned long vmaddr)
152{
153 unsigned long addr = (unsigned long) page_address(page);
154
155 if (pages_do_alias(addr, vmaddr)) {
156 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
Paul Mundt55661fc2010-12-01 15:39:51 +0900157 test_bit(PG_dcache_clean, &page->flags)) {
Paul Mundtc0fe4782009-08-04 16:02:43 +0900158 void *kaddr;
159
160 kaddr = kmap_coherent(page, vmaddr);
Paul Mundt6e4154d2009-09-08 16:21:00 +0900161 /* XXX.. For now kunmap_coherent() does a purge */
162 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
Paul Mundt0906a3a2009-09-03 17:21:10 +0900163 kunmap_coherent(kaddr);
Paul Mundtc0fe4782009-08-04 16:02:43 +0900164 } else
Paul Mundt6e4154d2009-09-08 16:21:00 +0900165 __flush_purge_region((void *)addr, PAGE_SIZE);
Paul Mundtc0fe4782009-08-04 16:02:43 +0900166 }
167}
Paul Mundtecba1062009-08-15 11:05:42 +0900168
Paul Mundtf26b2a52009-08-21 17:23:14 +0900169void flush_cache_all(void)
170{
Paul Mundt6f379572009-09-01 21:21:36 +0900171 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900172}
Paul Mundt0a993b02009-10-27 10:51:35 +0900173EXPORT_SYMBOL(flush_cache_all);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900174
175void flush_cache_mm(struct mm_struct *mm)
176{
Paul Mundt654d3642009-09-09 14:04:06 +0900177 if (boot_cpu_data.dcache.n_aliases == 0)
178 return;
179
Paul Mundt6f379572009-09-01 21:21:36 +0900180 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900181}
182
183void flush_cache_dup_mm(struct mm_struct *mm)
184{
Paul Mundt654d3642009-09-09 14:04:06 +0900185 if (boot_cpu_data.dcache.n_aliases == 0)
186 return;
187
Paul Mundt6f379572009-09-01 21:21:36 +0900188 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900189}
190
191void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
192 unsigned long pfn)
193{
194 struct flusher_data data;
195
196 data.vma = vma;
197 data.addr1 = addr;
198 data.addr2 = pfn;
199
Paul Mundt6f379572009-09-01 21:21:36 +0900200 cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900201}
202
203void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
204 unsigned long end)
205{
206 struct flusher_data data;
207
208 data.vma = vma;
209 data.addr1 = start;
210 data.addr2 = end;
211
Paul Mundt6f379572009-09-01 21:21:36 +0900212 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900213}
Paul Mundt0a993b02009-10-27 10:51:35 +0900214EXPORT_SYMBOL(flush_cache_range);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900215
216void flush_dcache_page(struct page *page)
217{
Paul Mundt6f379572009-09-01 21:21:36 +0900218 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900219}
Paul Mundt0a993b02009-10-27 10:51:35 +0900220EXPORT_SYMBOL(flush_dcache_page);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900221
222void flush_icache_range(unsigned long start, unsigned long end)
223{
224 struct flusher_data data;
225
226 data.vma = NULL;
227 data.addr1 = start;
228 data.addr2 = end;
229
Paul Mundt6f379572009-09-01 21:21:36 +0900230 cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900231}
232
233void flush_icache_page(struct vm_area_struct *vma, struct page *page)
234{
235 /* Nothing uses the VMA, so just pass the struct page along */
Paul Mundt6f379572009-09-01 21:21:36 +0900236 cacheop_on_each_cpu(local_flush_icache_page, page, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900237}
238
239void flush_cache_sigtramp(unsigned long address)
240{
Paul Mundt6f379572009-09-01 21:21:36 +0900241 cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
Paul Mundtf26b2a52009-08-21 17:23:14 +0900242}
243
Paul Mundt27d59ec2009-08-15 11:11:16 +0900244static void compute_alias(struct cache_info *c)
245{
246 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
247 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
248}
249
250static void __init emit_cache_params(void)
251{
252 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
253 boot_cpu_data.icache.ways,
254 boot_cpu_data.icache.sets,
255 boot_cpu_data.icache.way_incr);
256 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
257 boot_cpu_data.icache.entry_mask,
258 boot_cpu_data.icache.alias_mask,
259 boot_cpu_data.icache.n_aliases);
260 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
261 boot_cpu_data.dcache.ways,
262 boot_cpu_data.dcache.sets,
263 boot_cpu_data.dcache.way_incr);
264 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
265 boot_cpu_data.dcache.entry_mask,
266 boot_cpu_data.dcache.alias_mask,
267 boot_cpu_data.dcache.n_aliases);
268
269 /*
270 * Emit Secondary Cache parameters if the CPU has a probed L2.
271 */
272 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
273 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
274 boot_cpu_data.scache.ways,
275 boot_cpu_data.scache.sets,
276 boot_cpu_data.scache.way_incr);
277 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
278 boot_cpu_data.scache.entry_mask,
279 boot_cpu_data.scache.alias_mask,
280 boot_cpu_data.scache.n_aliases);
281 }
282}
283
Paul Mundtecba1062009-08-15 11:05:42 +0900284void __init cpu_cache_init(void)
285{
Paul Mundt3af539e2009-11-12 17:03:28 +0900286 unsigned int cache_disabled = 0;
287
288#ifdef CCR
289 cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
290#endif
Magnus Damm5fb80ae2009-10-16 14:38:48 +0900291
Paul Mundt27d59ec2009-08-15 11:11:16 +0900292 compute_alias(&boot_cpu_data.icache);
293 compute_alias(&boot_cpu_data.dcache);
294 compute_alias(&boot_cpu_data.scache);
295
Paul Mundt37443ef2009-08-15 12:29:49 +0900296 __flush_wback_region = noop__flush_region;
297 __flush_purge_region = noop__flush_region;
298 __flush_invalidate_region = noop__flush_region;
299
Magnus Damm5fb80ae2009-10-16 14:38:48 +0900300 /*
301 * No flushing is necessary in the disabled cache case so we can
302 * just keep the noop functions in local_flush_..() and __flush_..()
303 */
304 if (unlikely(cache_disabled))
305 goto skip;
306
Paul Mundt109b44a2009-08-15 12:35:15 +0900307 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
308 extern void __weak sh2_cache_init(void);
309
310 sh2_cache_init();
311 }
312
Paul Mundta58e1a22009-08-15 12:38:29 +0900313 if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
314 extern void __weak sh2a_cache_init(void);
315
316 sh2a_cache_init();
317 }
318
Paul Mundt79f1c9d2009-08-15 12:42:55 +0900319 if (boot_cpu_data.family == CPU_FAMILY_SH3) {
320 extern void __weak sh3_cache_init(void);
321
322 sh3_cache_init();
Paul Mundt0d051d92009-08-15 12:53:39 +0900323
324 if ((boot_cpu_data.type == CPU_SH7705) &&
325 (boot_cpu_data.dcache.sets == 512)) {
326 extern void __weak sh7705_cache_init(void);
327
328 sh7705_cache_init();
329 }
Paul Mundt79f1c9d2009-08-15 12:42:55 +0900330 }
331
Paul Mundtecba1062009-08-15 11:05:42 +0900332 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
333 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
334 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
335 extern void __weak sh4_cache_init(void);
336
337 sh4_cache_init();
Paul Mundt3cf6fa12010-04-19 17:27:17 +0900338
339 if ((boot_cpu_data.type == CPU_SH7786) ||
340 (boot_cpu_data.type == CPU_SHX3)) {
341 extern void __weak shx3_cache_init(void);
342
343 shx3_cache_init();
344 }
Paul Mundtecba1062009-08-15 11:05:42 +0900345 }
Paul Mundt27d59ec2009-08-15 11:11:16 +0900346
Paul Mundt2b431512009-08-16 02:16:44 +0900347 if (boot_cpu_data.family == CPU_FAMILY_SH5) {
348 extern void __weak sh5_cache_init(void);
349
350 sh5_cache_init();
351 }
352
Magnus Damm5fb80ae2009-10-16 14:38:48 +0900353skip:
Paul Mundt27d59ec2009-08-15 11:11:16 +0900354 emit_cache_params();
Paul Mundtecba1062009-08-15 11:05:42 +0900355}