blob: 034e8506f6ea712a9554d9f13de28f622d8db1d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechle641e97f2007-10-11 23:46:05 +01006 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
Ralf Baechle7575a492007-03-23 21:36:37 +00007 * Copyright (C) 2007 MIPS Technologies, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Ralf Baechle24e9d0b962007-07-10 17:32:56 +01009#include <linux/fs.h>
10#include <linux/fcntl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/init.h>
12#include <linux/kernel.h>
Ralf Baechle641e97f2007-10-11 23:46:05 +010013#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/mm.h>
17
18#include <asm/cacheflush.h>
19#include <asm/processor.h>
20#include <asm/cpu.h>
21#include <asm/cpu-features.h>
22
23/* Cache operations. */
24void (*flush_cache_all)(void);
25void (*__flush_cache_all)(void);
26void (*flush_cache_mm)(struct mm_struct *mm);
27void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
28 unsigned long end);
Ralf Baechle53de0d42005-03-18 17:36:42 +000029void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 unsigned long pfn);
Atsushi Nemotod4264f12006-01-29 02:27:51 +090031void (*flush_icache_range)(unsigned long start, unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Ralf Baechle9c5a3d72008-04-05 15:13:23 +010033void (*__flush_cache_vmap)(void);
34void (*__flush_cache_vunmap)(void);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* MIPS specific cache operations */
37void (*flush_cache_sigtramp)(unsigned long addr);
Ralf Baechle7e3bfc72006-04-05 20:42:04 +010038void (*local_flush_data_cache_page)(void * addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039void (*flush_data_cache_page)(unsigned long addr);
40void (*flush_icache_all)(void);
41
Ralf Baechle9202f322006-12-10 18:43:59 +000042EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
Ralf Baechle9ff77c42005-03-08 14:39:39 +000043EXPORT_SYMBOL(flush_data_cache_page);
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#ifdef CONFIG_DMA_NONCOHERENT
46
47/* DMA cache operations. */
48void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
49void (*_dma_cache_wback)(unsigned long start, unsigned long size);
50void (*_dma_cache_inv)(unsigned long start, unsigned long size);
51
52EXPORT_SYMBOL(_dma_cache_wback_inv);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54#endif /* CONFIG_DMA_NONCOHERENT */
55
56/*
57 * We could optimize the case where the cache argument is not BCACHE but
58 * that seems very atypical use ...
59 */
Atsushi Nemotod4264f12006-01-29 02:27:51 +090060asmlinkage int sys_cacheflush(unsigned long addr,
Ralf Baechlefe00f942005-03-01 19:22:29 +000061 unsigned long bytes, unsigned int cache)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
Atsushi Nemoto750ccf62005-10-19 19:57:14 +090063 if (bytes == 0)
64 return 0;
Ralf Baechlefe00f942005-03-01 19:22:29 +000065 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 return -EFAULT;
67
68 flush_icache_range(addr, addr + bytes);
69
70 return 0;
71}
72
73void __flush_dcache_page(struct page *page)
74{
75 struct address_space *mapping = page_mapping(page);
76 unsigned long addr;
77
Ralf Baechle585fa722006-08-12 16:40:08 +010078 if (PageHighMem(page))
79 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 if (mapping && !mapping_mapped(mapping)) {
81 SetPageDcacheDirty(page);
82 return;
83 }
84
85 /*
86 * We could delay the flush for the !page_mapping case too. But that
87 * case is for exec env/arg pages and those are %99 certainly going to
88 * get faulted into the tlb (and thus flushed) anyways.
89 */
90 addr = (unsigned long) page_address(page);
91 flush_data_cache_page(addr);
92}
93
94EXPORT_SYMBOL(__flush_dcache_page);
95
Ralf Baechle7575a492007-03-23 21:36:37 +000096void __flush_anon_page(struct page *page, unsigned long vmaddr)
97{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +000098 unsigned long addr = (unsigned long) page_address(page);
Ralf Baechle7575a492007-03-23 21:36:37 +000099
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000100 if (pages_do_alias(addr, vmaddr)) {
101 if (page_mapped(page) && !Page_dcache_dirty(page)) {
102 void *kaddr;
103
104 kaddr = kmap_coherent(page, vmaddr);
105 flush_data_cache_page((unsigned long)kaddr);
106 kunmap_coherent();
107 } else
108 flush_data_cache_page(addr);
Ralf Baechle7575a492007-03-23 21:36:37 +0000109 }
110}
111
112EXPORT_SYMBOL(__flush_anon_page);
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114void __update_cache(struct vm_area_struct *vma, unsigned long address,
115 pte_t pte)
116{
117 struct page *page;
118 unsigned long pfn, addr;
Ralf Baechle585fa722006-08-12 16:40:08 +0100119 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121 pfn = pte_pfn(pte);
Ralf Baechle585fa722006-08-12 16:40:08 +0100122 if (unlikely(!pfn_valid(pfn)))
123 return;
124 page = pfn_to_page(pfn);
125 if (page_mapping(page) && Page_dcache_dirty(page)) {
126 addr = (unsigned long) page_address(page);
127 if (exec || pages_do_alias(addr, address & PAGE_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 flush_data_cache_page(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 ClearPageDcacheDirty(page);
130 }
131}
132
Chris Dearman35133692007-09-19 00:58:24 +0100133unsigned long _page_cachable_default;
134EXPORT_SYMBOL_GPL(_page_cachable_default);
135
136static inline void setup_protection_map(void)
137{
138 protection_map[0] = PAGE_NONE;
139 protection_map[1] = PAGE_READONLY;
140 protection_map[2] = PAGE_COPY;
141 protection_map[3] = PAGE_COPY;
142 protection_map[4] = PAGE_READONLY;
143 protection_map[5] = PAGE_READONLY;
144 protection_map[6] = PAGE_COPY;
145 protection_map[7] = PAGE_COPY;
146 protection_map[8] = PAGE_NONE;
147 protection_map[9] = PAGE_READONLY;
148 protection_map[10] = PAGE_SHARED;
149 protection_map[11] = PAGE_SHARED;
150 protection_map[12] = PAGE_READONLY;
151 protection_map[13] = PAGE_READONLY;
152 protection_map[14] = PAGE_SHARED;
153 protection_map[15] = PAGE_SHARED;
154}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Ralf Baechle234fcd12008-03-08 09:56:28 +0000156void __devinit cpu_cache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
Ralf Baechle02cf2112005-10-01 13:06:32 +0100158 if (cpu_has_3k_cache) {
159 extern void __weak r3k_cache_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Ralf Baechle02cf2112005-10-01 13:06:32 +0100161 r3k_cache_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 }
Ralf Baechle02cf2112005-10-01 13:06:32 +0100163 if (cpu_has_6k_cache) {
164 extern void __weak r6k_cache_init(void);
165
166 r6k_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100167 }
168 if (cpu_has_4k_cache) {
169 extern void __weak r4k_cache_init(void);
170
171 r4k_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100172 }
173 if (cpu_has_8k_cache) {
174 extern void __weak r8k_cache_init(void);
175
176 r8k_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100177 }
178 if (cpu_has_tx39_cache) {
179 extern void __weak tx39_cache_init(void);
180
181 tx39_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100182 }
Ralf Baechle02cf2112005-10-01 13:06:32 +0100183
Chris Dearman35133692007-09-19 00:58:24 +0100184 setup_protection_map();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
Ralf Baechle24e9d0b962007-07-10 17:32:56 +0100186
187int __weak __uncached_access(struct file *file, unsigned long addr)
188{
189 if (file->f_flags & O_SYNC)
190 return 1;
191
192 return addr >= __pa(high_memory);
193}