blob: 85a2514cbffc2e9edfc7556ecc225cbd2a304ec2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/cacheflush.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/sched.h>
14#include <linux/mm.h>
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/glue.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010017#include <asm/shmparam.h>
Catalin Marinas376e1422008-11-06 13:23:08 +000018#include <asm/cachetype.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010019
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22/*
23 * Cache Model
24 * ===========
25 */
26#undef _CACHE
27#undef MULTI_CACHE
28
Russell King6cc7cbe2006-09-27 18:00:35 +010029#if defined(CONFIG_CPU_CACHE_V3)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030# ifdef _CACHE
31# define MULTI_CACHE 1
32# else
33# define _CACHE v3
34# endif
35#endif
36
Russell King6cc7cbe2006-09-27 18:00:35 +010037#if defined(CONFIG_CPU_CACHE_V4)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038# ifdef _CACHE
39# define MULTI_CACHE 1
40# else
41# define _CACHE v4
42# endif
43#endif
44
45#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
46 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
47# define MULTI_CACHE 1
48#endif
49
50#if defined(CONFIG_CPU_ARM926T)
51# ifdef _CACHE
52# define MULTI_CACHE 1
53# else
54# define _CACHE arm926
55# endif
56#endif
57
Hyok S. Choid60674e2006-09-26 17:38:18 +090058#if defined(CONFIG_CPU_ARM940T)
59# ifdef _CACHE
60# define MULTI_CACHE 1
61# else
62# define _CACHE arm940
63# endif
64#endif
65
Hyok S. Choif37f46e2006-09-26 17:38:32 +090066#if defined(CONFIG_CPU_ARM946E)
67# ifdef _CACHE
68# define MULTI_CACHE 1
69# else
70# define _CACHE arm946
71# endif
72#endif
73
Russell King6cc7cbe2006-09-27 18:00:35 +010074#if defined(CONFIG_CPU_CACHE_V4WB)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075# ifdef _CACHE
76# define MULTI_CACHE 1
77# else
78# define _CACHE v4wb
79# endif
80#endif
81
82#if defined(CONFIG_CPU_XSCALE)
83# ifdef _CACHE
84# define MULTI_CACHE 1
85# else
86# define _CACHE xscale
87# endif
88#endif
89
Lennert Buytenhek23bdf862006-03-28 21:00:40 +010090#if defined(CONFIG_CPU_XSC3)
91# ifdef _CACHE
92# define MULTI_CACHE 1
93# else
94# define _CACHE xsc3
95# endif
96#endif
97
Assaf Hoffmane50d6402007-10-23 15:14:41 -040098#if defined(CONFIG_CPU_FEROCEON)
Stanislav Samsonov836a8052008-06-03 11:24:40 +030099# define MULTI_CACHE 1
Assaf Hoffmane50d6402007-10-23 15:14:41 -0400100#endif
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#if defined(CONFIG_CPU_V6)
103//# ifdef _CACHE
104# define MULTI_CACHE 1
105//# else
106//# define _CACHE v6
107//# endif
108#endif
109
Catalin Marinasbbe88882007-05-08 22:27:46 +0100110#if defined(CONFIG_CPU_V7)
111//# ifdef _CACHE
112# define MULTI_CACHE 1
113//# else
114//# define _CACHE v7
115//# endif
116#endif
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#if !defined(_CACHE) && !defined(MULTI_CACHE)
119#error Unknown cache maintainence model
120#endif
121
122/*
123 * This flag is used to indicate that the page pointed to by a pte
124 * is dirty and requires cleaning before returning it to the user.
125 */
126#define PG_dcache_dirty PG_arch_1
127
128/*
129 * MM Cache Management
130 * ===================
131 *
132 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
133 * implement these methods.
134 *
135 * Start addresses are inclusive and end addresses are exclusive;
136 * start addresses should be rounded down, end addresses up.
137 *
138 * See Documentation/cachetlb.txt for more information.
139 * Please note that the implementation of these, and the required
140 * effects are cache-type (VIVT/VIPT/PIPT) specific.
141 *
142 * flush_cache_kern_all()
143 *
144 * Unconditionally clean and invalidate the entire cache.
145 *
146 * flush_cache_user_mm(mm)
147 *
148 * Clean and invalidate all user space cache entries
149 * before a change of page tables.
150 *
151 * flush_cache_user_range(start, end, flags)
152 *
153 * Clean and invalidate a range of cache entries in the
154 * specified address space before a change of page tables.
155 * - start - user start address (inclusive, page aligned)
156 * - end - user end address (exclusive, page aligned)
157 * - flags - vma->vm_flags field
158 *
159 * coherent_kern_range(start, end)
160 *
161 * Ensure coherency between the Icache and the Dcache in the
162 * region described by start, end. If you have non-snooping
163 * Harvard caches, you need to implement this function.
164 * - start - virtual start address
165 * - end - virtual end address
166 *
167 * DMA Cache Coherency
168 * ===================
169 *
170 * dma_inv_range(start, end)
171 *
172 * Invalidate (discard) the specified virtual address range.
173 * May not write back any entries. If 'start' or 'end'
174 * are not cache line aligned, those lines must be written
175 * back.
176 * - start - virtual start address
177 * - end - virtual end address
178 *
179 * dma_clean_range(start, end)
180 *
181 * Clean (write back) the specified virtual address range.
182 * - start - virtual start address
183 * - end - virtual end address
184 *
185 * dma_flush_range(start, end)
186 *
187 * Clean and invalidate the specified virtual address range.
188 * - start - virtual start address
189 * - end - virtual end address
190 */
191
192struct cpu_cache_fns {
193 void (*flush_kern_all)(void);
194 void (*flush_user_all)(void);
195 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
196
197 void (*coherent_kern_range)(unsigned long, unsigned long);
198 void (*coherent_user_range)(unsigned long, unsigned long);
199 void (*flush_kern_dcache_page)(void *);
200
Russell King7ae5a762007-02-06 17:39:31 +0000201 void (*dma_inv_range)(const void *, const void *);
202 void (*dma_clean_range)(const void *, const void *);
203 void (*dma_flush_range)(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204};
205
Catalin Marinas953233d2007-02-05 14:48:08 +0100206struct outer_cache_fns {
207 void (*inv_range)(unsigned long, unsigned long);
208 void (*clean_range)(unsigned long, unsigned long);
209 void (*flush_range)(unsigned long, unsigned long);
210};
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212/*
213 * Select the calling method
214 */
215#ifdef MULTI_CACHE
216
217extern struct cpu_cache_fns cpu_cache;
218
219#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
220#define __cpuc_flush_user_all cpu_cache.flush_user_all
221#define __cpuc_flush_user_range cpu_cache.flush_user_range
222#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
223#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
224#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
225
226/*
227 * These are private to the dma-mapping API. Do not use directly.
228 * Their sole purpose is to ensure that data held in the cache
229 * is visible to DMA, or data written by DMA to system memory is
230 * visible to the CPU.
231 */
232#define dmac_inv_range cpu_cache.dma_inv_range
233#define dmac_clean_range cpu_cache.dma_clean_range
234#define dmac_flush_range cpu_cache.dma_flush_range
235
236#else
237
238#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
239#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
240#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
241#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
242#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
243#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
244
245extern void __cpuc_flush_kern_all(void);
246extern void __cpuc_flush_user_all(void);
247extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
248extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
249extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
250extern void __cpuc_flush_dcache_page(void *);
251
252/*
253 * These are private to the dma-mapping API. Do not use directly.
254 * Their sole purpose is to ensure that data held in the cache
255 * is visible to DMA, or data written by DMA to system memory is
256 * visible to the CPU.
257 */
258#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
259#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
260#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
261
Russell King7ae5a762007-02-06 17:39:31 +0000262extern void dmac_inv_range(const void *, const void *);
263extern void dmac_clean_range(const void *, const void *);
264extern void dmac_flush_range(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266#endif
267
Catalin Marinas953233d2007-02-05 14:48:08 +0100268#ifdef CONFIG_OUTER_CACHE
269
270extern struct outer_cache_fns outer_cache;
271
272static inline void outer_inv_range(unsigned long start, unsigned long end)
273{
274 if (outer_cache.inv_range)
275 outer_cache.inv_range(start, end);
276}
277static inline void outer_clean_range(unsigned long start, unsigned long end)
278{
279 if (outer_cache.clean_range)
280 outer_cache.clean_range(start, end);
281}
282static inline void outer_flush_range(unsigned long start, unsigned long end)
283{
284 if (outer_cache.flush_range)
285 outer_cache.flush_range(start, end);
286}
287
288#else
289
290static inline void outer_inv_range(unsigned long start, unsigned long end)
291{ }
292static inline void outer_clean_range(unsigned long start, unsigned long end)
293{ }
294static inline void outer_flush_range(unsigned long start, unsigned long end)
295{ }
296
297#endif
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 * Copy user data from/to a page which is mapped into a different
301 * processes address space. Really, we want to allow our "user
302 * space" model to handle this.
303 */
304#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
305 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 memcpy(dst, src, len); \
George G. Davisa188ad22006-09-02 18:43:20 +0100307 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 } while (0)
309
310#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
311 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 memcpy(dst, src, len); \
313 } while (0)
314
315/*
316 * Convert calls to our calling convention.
317 */
318#define flush_cache_all() __cpuc_flush_kern_all()
Russell Kingd7b6b352005-09-08 15:32:23 +0100319#ifndef CONFIG_CPU_CACHE_VIPT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320static inline void flush_cache_mm(struct mm_struct *mm)
321{
322 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
323 __cpuc_flush_user_all();
324}
325
326static inline void
327flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
328{
329 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
330 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
331 vma->vm_flags);
332}
333
334static inline void
335flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
336{
337 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
338 unsigned long addr = user_addr & PAGE_MASK;
339 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
340 }
341}
George G. Davisa188ad22006-09-02 18:43:20 +0100342
343static inline void
344flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
345 unsigned long uaddr, void *kaddr,
346 unsigned long len, int write)
347{
348 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
349 unsigned long addr = (unsigned long)kaddr;
350 __cpuc_coherent_kern_range(addr, addr + len);
351 }
352}
Russell Kingd7b6b352005-09-08 15:32:23 +0100353#else
354extern void flush_cache_mm(struct mm_struct *mm);
355extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
356extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
George G. Davisa188ad22006-09-02 18:43:20 +0100357extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
358 unsigned long uaddr, void *kaddr,
359 unsigned long len, int write);
Russell Kingd7b6b352005-09-08 15:32:23 +0100360#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
Ralf Baechleec8c0442006-12-12 17:14:57 +0000362#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364/*
365 * flush_cache_user_range is used when we want to ensure that the
366 * Harvard caches are synchronised for the user space address range.
367 * This is used for the ARM private sys_cacheflush system call.
368 */
369#define flush_cache_user_range(vma,start,end) \
370 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
371
372/*
373 * Perform necessary cache operations to ensure that data previously
374 * stored within this range of addresses can be executed by the CPU.
375 */
376#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
377
378/*
379 * Perform necessary cache operations to ensure that the TLB will
380 * see data written in the specified area.
381 */
382#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
383
384/*
385 * flush_dcache_page is used when the kernel has written to the page
386 * cache page at virtual address page->virtual.
387 *
388 * If this page isn't mapped (ie, page_mapping == NULL), or it might
389 * have userspace mappings, then we _must_ always clean + invalidate
390 * the dcache entries associated with the kernel mapping.
391 *
392 * Otherwise we can defer the operation, and clean the cache when we are
393 * about to change to user space. This is the same method as used on SPARC64.
394 * See update_mmu_cache for the user space part.
395 */
396extern void flush_dcache_page(struct page *);
397
Richard Purdie1c9d3df2006-12-30 16:08:50 +0100398extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
399
Catalin Marinas826cbda2008-06-13 10:28:36 +0100400static inline void __flush_icache_all(void)
401{
402 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
403 :
404 : "r" (0));
405}
406
Russell King6020dff2006-12-30 23:17:40 +0000407#define ARCH_HAS_FLUSH_ANON_PAGE
408static inline void flush_anon_page(struct vm_area_struct *vma,
409 struct page *page, unsigned long vmaddr)
410{
411 extern void __flush_anon_page(struct vm_area_struct *vma,
412 struct page *, unsigned long);
413 if (PageAnon(page))
414 __flush_anon_page(vma, page, vmaddr);
415}
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417#define flush_dcache_mmap_lock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700418 spin_lock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419#define flush_dcache_mmap_unlock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700420 spin_unlock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422#define flush_icache_user_range(vma,page,addr,len) \
423 flush_dcache_page(page)
424
425/*
426 * We don't appear to need to do anything here. In fact, if we did, we'd
427 * duplicate cache flushing elsewhere performed by flush_dcache_page().
428 */
429#define flush_icache_page(vma,page) do { } while (0)
430
Jared Hulbert90833fd2007-08-22 17:38:25 +0100431static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
432 unsigned offset, size_t size)
433{
434 const void *start = (void __force *)virt + offset;
435 dmac_inv_range(start, start + size);
436}
437
Catalin Marinas376e1422008-11-06 13:23:08 +0000438/*
439 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
440 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
441 * caches, since the direct-mappings of these pages may contain cached
442 * data, we need to do a full cache flush to ensure that writebacks
443 * don't corrupt data placed into these pages via the new mappings.
444 */
445static inline void flush_cache_vmap(unsigned long start, unsigned long end)
446{
447 if (!cache_is_vipt_nonaliasing())
448 flush_cache_all();
449 else
450 /*
451 * set_pte_at() called from vmap_pte_range() does not
452 * have a DSB after cleaning the cache line.
453 */
454 dsb();
455}
456
457static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
458{
459 if (!cache_is_vipt_nonaliasing())
460 flush_cache_all();
461}
462
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463#endif