blob: 6cbd8fdc9f1fee5e83ffbe50fc125242503f0bae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/cacheflush.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/glue.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010016#include <asm/shmparam.h>
Catalin Marinas376e1422008-11-06 13:23:08 +000017#include <asm/cachetype.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010018
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21/*
22 * Cache Model
23 * ===========
24 */
25#undef _CACHE
26#undef MULTI_CACHE
27
Russell King6cc7cbe2006-09-27 18:00:35 +010028#if defined(CONFIG_CPU_CACHE_V3)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029# ifdef _CACHE
30# define MULTI_CACHE 1
31# else
32# define _CACHE v3
33# endif
34#endif
35
Russell King6cc7cbe2006-09-27 18:00:35 +010036#if defined(CONFIG_CPU_CACHE_V4)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037# ifdef _CACHE
38# define MULTI_CACHE 1
39# else
40# define _CACHE v4
41# endif
42#endif
43
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
46# define MULTI_CACHE 1
47#endif
48
49#if defined(CONFIG_CPU_ARM926T)
50# ifdef _CACHE
51# define MULTI_CACHE 1
52# else
53# define _CACHE arm926
54# endif
55#endif
56
Hyok S. Choid60674e2006-09-26 17:38:18 +090057#if defined(CONFIG_CPU_ARM940T)
58# ifdef _CACHE
59# define MULTI_CACHE 1
60# else
61# define _CACHE arm940
62# endif
63#endif
64
Hyok S. Choif37f46e2006-09-26 17:38:32 +090065#if defined(CONFIG_CPU_ARM946E)
66# ifdef _CACHE
67# define MULTI_CACHE 1
68# else
69# define _CACHE arm946
70# endif
71#endif
72
Russell King6cc7cbe2006-09-27 18:00:35 +010073#if defined(CONFIG_CPU_CACHE_V4WB)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074# ifdef _CACHE
75# define MULTI_CACHE 1
76# else
77# define _CACHE v4wb
78# endif
79#endif
80
81#if defined(CONFIG_CPU_XSCALE)
82# ifdef _CACHE
83# define MULTI_CACHE 1
84# else
85# define _CACHE xscale
86# endif
87#endif
88
Lennert Buytenhek23bdf862006-03-28 21:00:40 +010089#if defined(CONFIG_CPU_XSC3)
90# ifdef _CACHE
91# define MULTI_CACHE 1
92# else
93# define _CACHE xsc3
94# endif
95#endif
96
Assaf Hoffmane50d6402007-10-23 15:14:41 -040097#if defined(CONFIG_CPU_FEROCEON)
Stanislav Samsonov836a8052008-06-03 11:24:40 +030098# define MULTI_CACHE 1
Assaf Hoffmane50d6402007-10-23 15:14:41 -040099#endif
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#if defined(CONFIG_CPU_V6)
102//# ifdef _CACHE
103# define MULTI_CACHE 1
104//# else
105//# define _CACHE v6
106//# endif
107#endif
108
Catalin Marinasbbe88882007-05-08 22:27:46 +0100109#if defined(CONFIG_CPU_V7)
110//# ifdef _CACHE
111# define MULTI_CACHE 1
112//# else
113//# define _CACHE v7
114//# endif
115#endif
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#if !defined(_CACHE) && !defined(MULTI_CACHE)
118#error Unknown cache maintainence model
119#endif
120
121/*
122 * This flag is used to indicate that the page pointed to by a pte
123 * is dirty and requires cleaning before returning it to the user.
124 */
125#define PG_dcache_dirty PG_arch_1
126
127/*
128 * MM Cache Management
129 * ===================
130 *
131 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
132 * implement these methods.
133 *
134 * Start addresses are inclusive and end addresses are exclusive;
135 * start addresses should be rounded down, end addresses up.
136 *
137 * See Documentation/cachetlb.txt for more information.
138 * Please note that the implementation of these, and the required
139 * effects are cache-type (VIVT/VIPT/PIPT) specific.
140 *
141 * flush_cache_kern_all()
142 *
143 * Unconditionally clean and invalidate the entire cache.
144 *
145 * flush_cache_user_mm(mm)
146 *
147 * Clean and invalidate all user space cache entries
148 * before a change of page tables.
149 *
150 * flush_cache_user_range(start, end, flags)
151 *
152 * Clean and invalidate a range of cache entries in the
153 * specified address space before a change of page tables.
154 * - start - user start address (inclusive, page aligned)
155 * - end - user end address (exclusive, page aligned)
156 * - flags - vma->vm_flags field
157 *
158 * coherent_kern_range(start, end)
159 *
160 * Ensure coherency between the Icache and the Dcache in the
161 * region described by start, end. If you have non-snooping
162 * Harvard caches, you need to implement this function.
163 * - start - virtual start address
164 * - end - virtual end address
165 *
166 * DMA Cache Coherency
167 * ===================
168 *
169 * dma_inv_range(start, end)
170 *
171 * Invalidate (discard) the specified virtual address range.
172 * May not write back any entries. If 'start' or 'end'
173 * are not cache line aligned, those lines must be written
174 * back.
175 * - start - virtual start address
176 * - end - virtual end address
177 *
178 * dma_clean_range(start, end)
179 *
180 * Clean (write back) the specified virtual address range.
181 * - start - virtual start address
182 * - end - virtual end address
183 *
184 * dma_flush_range(start, end)
185 *
186 * Clean and invalidate the specified virtual address range.
187 * - start - virtual start address
188 * - end - virtual end address
189 */
190
191struct cpu_cache_fns {
192 void (*flush_kern_all)(void);
193 void (*flush_user_all)(void);
194 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
195
196 void (*coherent_kern_range)(unsigned long, unsigned long);
197 void (*coherent_user_range)(unsigned long, unsigned long);
198 void (*flush_kern_dcache_page)(void *);
199
Russell King7ae5a762007-02-06 17:39:31 +0000200 void (*dma_inv_range)(const void *, const void *);
201 void (*dma_clean_range)(const void *, const void *);
202 void (*dma_flush_range)(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203};
204
Catalin Marinas953233d2007-02-05 14:48:08 +0100205struct outer_cache_fns {
206 void (*inv_range)(unsigned long, unsigned long);
207 void (*clean_range)(unsigned long, unsigned long);
208 void (*flush_range)(unsigned long, unsigned long);
209};
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211/*
212 * Select the calling method
213 */
214#ifdef MULTI_CACHE
215
216extern struct cpu_cache_fns cpu_cache;
217
218#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
219#define __cpuc_flush_user_all cpu_cache.flush_user_all
220#define __cpuc_flush_user_range cpu_cache.flush_user_range
221#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
222#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
223#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
224
225/*
226 * These are private to the dma-mapping API. Do not use directly.
227 * Their sole purpose is to ensure that data held in the cache
228 * is visible to DMA, or data written by DMA to system memory is
229 * visible to the CPU.
230 */
231#define dmac_inv_range cpu_cache.dma_inv_range
232#define dmac_clean_range cpu_cache.dma_clean_range
233#define dmac_flush_range cpu_cache.dma_flush_range
234
235#else
236
237#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
238#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
239#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
240#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
241#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
242#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
243
244extern void __cpuc_flush_kern_all(void);
245extern void __cpuc_flush_user_all(void);
246extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
247extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
248extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
249extern void __cpuc_flush_dcache_page(void *);
250
251/*
252 * These are private to the dma-mapping API. Do not use directly.
253 * Their sole purpose is to ensure that data held in the cache
254 * is visible to DMA, or data written by DMA to system memory is
255 * visible to the CPU.
256 */
257#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
258#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
259#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
260
Russell King7ae5a762007-02-06 17:39:31 +0000261extern void dmac_inv_range(const void *, const void *);
262extern void dmac_clean_range(const void *, const void *);
263extern void dmac_flush_range(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265#endif
266
Catalin Marinas953233d2007-02-05 14:48:08 +0100267#ifdef CONFIG_OUTER_CACHE
268
269extern struct outer_cache_fns outer_cache;
270
271static inline void outer_inv_range(unsigned long start, unsigned long end)
272{
273 if (outer_cache.inv_range)
274 outer_cache.inv_range(start, end);
275}
276static inline void outer_clean_range(unsigned long start, unsigned long end)
277{
278 if (outer_cache.clean_range)
279 outer_cache.clean_range(start, end);
280}
281static inline void outer_flush_range(unsigned long start, unsigned long end)
282{
283 if (outer_cache.flush_range)
284 outer_cache.flush_range(start, end);
285}
286
287#else
288
289static inline void outer_inv_range(unsigned long start, unsigned long end)
290{ }
291static inline void outer_clean_range(unsigned long start, unsigned long end)
292{ }
293static inline void outer_flush_range(unsigned long start, unsigned long end)
294{ }
295
296#endif
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 * Copy user data from/to a page which is mapped into a different
300 * processes address space. Really, we want to allow our "user
301 * space" model to handle this.
302 */
303#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
304 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 memcpy(dst, src, len); \
George G. Davisa188ad22006-09-02 18:43:20 +0100306 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 } while (0)
308
309#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
310 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 memcpy(dst, src, len); \
312 } while (0)
313
314/*
315 * Convert calls to our calling convention.
316 */
317#define flush_cache_all() __cpuc_flush_kern_all()
Russell Kingd7b6b352005-09-08 15:32:23 +0100318#ifndef CONFIG_CPU_CACHE_VIPT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319static inline void flush_cache_mm(struct mm_struct *mm)
320{
321 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
322 __cpuc_flush_user_all();
323}
324
325static inline void
326flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
327{
328 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
329 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
330 vma->vm_flags);
331}
332
333static inline void
334flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
335{
336 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
337 unsigned long addr = user_addr & PAGE_MASK;
338 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
339 }
340}
George G. Davisa188ad22006-09-02 18:43:20 +0100341
342static inline void
343flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
344 unsigned long uaddr, void *kaddr,
345 unsigned long len, int write)
346{
347 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
348 unsigned long addr = (unsigned long)kaddr;
349 __cpuc_coherent_kern_range(addr, addr + len);
350 }
351}
Russell Kingd7b6b352005-09-08 15:32:23 +0100352#else
353extern void flush_cache_mm(struct mm_struct *mm);
354extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
355extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
George G. Davisa188ad22006-09-02 18:43:20 +0100356extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
357 unsigned long uaddr, void *kaddr,
358 unsigned long len, int write);
Russell Kingd7b6b352005-09-08 15:32:23 +0100359#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Ralf Baechleec8c0442006-12-12 17:14:57 +0000361#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
362
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363/*
364 * flush_cache_user_range is used when we want to ensure that the
365 * Harvard caches are synchronised for the user space address range.
366 * This is used for the ARM private sys_cacheflush system call.
367 */
368#define flush_cache_user_range(vma,start,end) \
369 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
370
371/*
372 * Perform necessary cache operations to ensure that data previously
373 * stored within this range of addresses can be executed by the CPU.
374 */
375#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
376
377/*
378 * Perform necessary cache operations to ensure that the TLB will
379 * see data written in the specified area.
380 */
381#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
382
383/*
384 * flush_dcache_page is used when the kernel has written to the page
385 * cache page at virtual address page->virtual.
386 *
387 * If this page isn't mapped (ie, page_mapping == NULL), or it might
388 * have userspace mappings, then we _must_ always clean + invalidate
389 * the dcache entries associated with the kernel mapping.
390 *
391 * Otherwise we can defer the operation, and clean the cache when we are
392 * about to change to user space. This is the same method as used on SPARC64.
393 * See update_mmu_cache for the user space part.
394 */
395extern void flush_dcache_page(struct page *);
396
Richard Purdie1c9d3df2006-12-30 16:08:50 +0100397extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
398
Catalin Marinas826cbda2008-06-13 10:28:36 +0100399static inline void __flush_icache_all(void)
400{
401 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
402 :
403 : "r" (0));
404}
405
Russell King6020dff2006-12-30 23:17:40 +0000406#define ARCH_HAS_FLUSH_ANON_PAGE
407static inline void flush_anon_page(struct vm_area_struct *vma,
408 struct page *page, unsigned long vmaddr)
409{
410 extern void __flush_anon_page(struct vm_area_struct *vma,
411 struct page *, unsigned long);
412 if (PageAnon(page))
413 __flush_anon_page(vma, page, vmaddr);
414}
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416#define flush_dcache_mmap_lock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700417 spin_lock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418#define flush_dcache_mmap_unlock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700419 spin_unlock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421#define flush_icache_user_range(vma,page,addr,len) \
422 flush_dcache_page(page)
423
424/*
425 * We don't appear to need to do anything here. In fact, if we did, we'd
426 * duplicate cache flushing elsewhere performed by flush_dcache_page().
427 */
428#define flush_icache_page(vma,page) do { } while (0)
429
Jared Hulbert90833fd2007-08-22 17:38:25 +0100430static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
431 unsigned offset, size_t size)
432{
433 const void *start = (void __force *)virt + offset;
434 dmac_inv_range(start, start + size);
435}
436
Catalin Marinas376e1422008-11-06 13:23:08 +0000437/*
438 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
439 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
440 * caches, since the direct-mappings of these pages may contain cached
441 * data, we need to do a full cache flush to ensure that writebacks
442 * don't corrupt data placed into these pages via the new mappings.
443 */
444static inline void flush_cache_vmap(unsigned long start, unsigned long end)
445{
446 if (!cache_is_vipt_nonaliasing())
447 flush_cache_all();
448 else
449 /*
450 * set_pte_at() called from vmap_pte_range() does not
451 * have a DSB after cleaning the cache line.
452 */
453 dsb();
454}
455
456static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
457{
458 if (!cache_is_vipt_nonaliasing())
459 flush_cache_all();
460}
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462#endif