blob: 77d96db8253c422ac9e48d93e02c6b6f39b41c1b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechle641e97f2007-10-11 23:46:05 +01006 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
Ralf Baechle7575a492007-03-23 21:36:37 +00007 * Copyright (C) 2007 MIPS Technologies, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Ralf Baechle24e9d0b962007-07-10 17:32:56 +01009#include <linux/fs.h>
10#include <linux/fcntl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
Ralf Baechle641e97f2007-10-11 23:46:05 +010012#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/sched.h>
Ralf Baechledbda6ac2009-02-08 16:00:26 +000015#include <linux/syscalls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/mm.h>
17
18#include <asm/cacheflush.h>
19#include <asm/processor.h>
20#include <asm/cpu.h>
21#include <asm/cpu-features.h>
22
23/* Cache operations. */
24void (*flush_cache_all)(void);
25void (*__flush_cache_all)(void);
26void (*flush_cache_mm)(struct mm_struct *mm);
27void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
28 unsigned long end);
Ralf Baechle53de0d42005-03-18 17:36:42 +000029void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 unsigned long pfn);
Atsushi Nemotod4264f12006-01-29 02:27:51 +090031void (*flush_icache_range)(unsigned long start, unsigned long end);
Kees Cook8229f1a2014-04-18 15:07:19 -070032EXPORT_SYMBOL_GPL(flush_icache_range);
Thomas Bogendoerfere0cee3e2008-08-04 20:53:57 +020033void (*local_flush_icache_range)(unsigned long start, unsigned long end);
James Hogan90f91352014-05-29 10:16:24 +010034EXPORT_SYMBOL_GPL(local_flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Ralf Baechle9c5a3d72008-04-05 15:13:23 +010036void (*__flush_cache_vmap)(void);
37void (*__flush_cache_vunmap)(void);
38
Ralf Baechled9cdc9012011-06-17 16:20:28 +010039void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
Ralf Baechled9cdc9012011-06-17 16:20:28 +010040EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
Kees Cook8229f1a2014-04-18 15:07:19 -070041void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
Ralf Baechled9cdc9012011-06-17 16:20:28 +010042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043/* MIPS specific cache operations */
44void (*flush_cache_sigtramp)(unsigned long addr);
Ralf Baechle7e3bfc72006-04-05 20:42:04 +010045void (*local_flush_data_cache_page)(void * addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046void (*flush_data_cache_page)(unsigned long addr);
47void (*flush_icache_all)(void);
48
Ralf Baechle9202f322006-12-10 18:43:59 +000049EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
Ralf Baechle9ff77c42005-03-08 14:39:39 +000050EXPORT_SYMBOL(flush_data_cache_page);
Sanjay Lalf2e36562012-11-21 18:34:10 -080051EXPORT_SYMBOL(flush_icache_all);
Ralf Baechle9ff77c42005-03-08 14:39:39 +000052
Manuel Lauss80057112014-02-20 14:59:22 +010053#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55/* DMA cache operations. */
56void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
57void (*_dma_cache_wback)(unsigned long start, unsigned long size);
58void (*_dma_cache_inv)(unsigned long start, unsigned long size);
59
60EXPORT_SYMBOL(_dma_cache_wback_inv);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Manuel Lauss80057112014-02-20 14:59:22 +010062#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64/*
65 * We could optimize the case where the cache argument is not BCACHE but
66 * that seems very atypical use ...
67 */
Ralf Baechledbda6ac2009-02-08 16:00:26 +000068SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
69 unsigned int, cache)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070{
Atsushi Nemoto750ccf62005-10-19 19:57:14 +090071 if (bytes == 0)
72 return 0;
Ralf Baechlefe00f942005-03-01 19:22:29 +000073 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 return -EFAULT;
75
76 flush_icache_range(addr, addr + bytes);
77
78 return 0;
79}
80
81void __flush_dcache_page(struct page *page)
82{
83 struct address_space *mapping = page_mapping(page);
84 unsigned long addr;
85
Ralf Baechle585fa722006-08-12 16:40:08 +010086 if (PageHighMem(page))
87 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 if (mapping && !mapping_mapped(mapping)) {
89 SetPageDcacheDirty(page);
90 return;
91 }
92
93 /*
94 * We could delay the flush for the !page_mapping case too. But that
95 * case is for exec env/arg pages and those are %99 certainly going to
96 * get faulted into the tlb (and thus flushed) anyways.
97 */
98 addr = (unsigned long) page_address(page);
99 flush_data_cache_page(addr);
100}
101
102EXPORT_SYMBOL(__flush_dcache_page);
103
Ralf Baechle7575a492007-03-23 21:36:37 +0000104void __flush_anon_page(struct page *page, unsigned long vmaddr)
105{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000106 unsigned long addr = (unsigned long) page_address(page);
Ralf Baechle7575a492007-03-23 21:36:37 +0000107
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000108 if (pages_do_alias(addr, vmaddr)) {
109 if (page_mapped(page) && !Page_dcache_dirty(page)) {
110 void *kaddr;
111
112 kaddr = kmap_coherent(page, vmaddr);
113 flush_data_cache_page((unsigned long)kaddr);
114 kunmap_coherent();
115 } else
116 flush_data_cache_page(addr);
Ralf Baechle7575a492007-03-23 21:36:37 +0000117 }
118}
119
120EXPORT_SYMBOL(__flush_anon_page);
121
Lars Persson4d46a672015-02-26 14:16:03 +0100122void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
123{
124 unsigned long addr;
125
126 if (PageHighMem(page))
127 return;
128
129 addr = (unsigned long) page_address(page);
130 flush_data_cache_page(addr);
131}
132EXPORT_SYMBOL_GPL(__flush_icache_page);
133
Lars Persson5b9593f2015-02-26 14:16:02 +0100134void __update_cache(struct vm_area_struct *vma, unsigned long address,
135 pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
137 struct page *page;
Lars Persson5b9593f2015-02-26 14:16:02 +0100138 unsigned long pfn, addr;
139 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Lars Persson5b9593f2015-02-26 14:16:02 +0100141 pfn = pte_pfn(pte);
Ralf Baechle585fa722006-08-12 16:40:08 +0100142 if (unlikely(!pfn_valid(pfn)))
143 return;
144 page = pfn_to_page(pfn);
145 if (page_mapping(page) && Page_dcache_dirty(page)) {
Lars Persson5b9593f2015-02-26 14:16:02 +0100146 addr = (unsigned long) page_address(page);
147 if (exec || pages_do_alias(addr, address & PAGE_MASK))
148 flush_data_cache_page(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 ClearPageDcacheDirty(page);
150 }
151}
152
Chris Dearman35133692007-09-19 00:58:24 +0100153unsigned long _page_cachable_default;
Anton Altaparmakov7b3e5432010-03-25 20:48:12 +0000154EXPORT_SYMBOL(_page_cachable_default);
Chris Dearman35133692007-09-19 00:58:24 +0100155
156static inline void setup_protection_map(void)
157{
Steven J. Hill05857c62012-09-13 16:51:46 -0500158 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -0800159 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167
168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176
177 } else {
178 protection_map[0] = PAGE_NONE;
179 protection_map[1] = PAGE_READONLY;
180 protection_map[2] = PAGE_COPY;
181 protection_map[3] = PAGE_COPY;
182 protection_map[4] = PAGE_READONLY;
183 protection_map[5] = PAGE_READONLY;
184 protection_map[6] = PAGE_COPY;
185 protection_map[7] = PAGE_COPY;
186 protection_map[8] = PAGE_NONE;
187 protection_map[9] = PAGE_READONLY;
188 protection_map[10] = PAGE_SHARED;
189 protection_map[11] = PAGE_SHARED;
190 protection_map[12] = PAGE_READONLY;
191 protection_map[13] = PAGE_READONLY;
192 protection_map[14] = PAGE_SHARED;
193 protection_map[15] = PAGE_SHARED;
194 }
Chris Dearman35133692007-09-19 00:58:24 +0100195}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000197void cpu_cache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Ralf Baechle02cf2112005-10-01 13:06:32 +0100199 if (cpu_has_3k_cache) {
200 extern void __weak r3k_cache_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Ralf Baechle02cf2112005-10-01 13:06:32 +0100202 r3k_cache_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 }
Ralf Baechle02cf2112005-10-01 13:06:32 +0100204 if (cpu_has_6k_cache) {
205 extern void __weak r6k_cache_init(void);
206
207 r6k_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100208 }
209 if (cpu_has_4k_cache) {
210 extern void __weak r4k_cache_init(void);
211
212 r4k_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100213 }
214 if (cpu_has_8k_cache) {
215 extern void __weak r8k_cache_init(void);
216
217 r8k_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100218 }
219 if (cpu_has_tx39_cache) {
220 extern void __weak tx39_cache_init(void);
221
222 tx39_cache_init();
Ralf Baechle02cf2112005-10-01 13:06:32 +0100223 }
Ralf Baechle02cf2112005-10-01 13:06:32 +0100224
David Daney47d979e2008-12-11 15:33:27 -0800225 if (cpu_has_octeon_cache) {
226 extern void __weak octeon_cache_init(void);
227
228 octeon_cache_init();
229 }
230
Chris Dearman35133692007-09-19 00:58:24 +0100231 setup_protection_map();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232}
Ralf Baechle24e9d0b962007-07-10 17:32:56 +0100233
234int __weak __uncached_access(struct file *file, unsigned long addr)
235{
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +0100236 if (file->f_flags & O_DSYNC)
Ralf Baechle24e9d0b962007-07-10 17:32:56 +0100237 return 1;
238
239 return addr >= __pa(high_memory);
240}