blob: b611a8ea0bb2eb9a3a1457654f597d0d25ced48a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/include/asm-arm/cacheflush.h
3 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/sched.h>
14#include <linux/mm.h>
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/glue.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010017#include <asm/shmparam.h>
18
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21/*
22 * Cache Model
23 * ===========
24 */
25#undef _CACHE
26#undef MULTI_CACHE
27
Hyok S. Choib731c312006-09-26 17:37:50 +090028#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) || \
29 defined(CONFIG_CPU_ARM740T)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030# ifdef _CACHE
31# define MULTI_CACHE 1
32# else
33# define _CACHE v3
34# endif
35#endif
36
Hyok S. Choi43f5f012006-09-26 17:38:05 +090037#if defined(CONFIG_CPU_ARM720T) || defined(CONFIG_CPU_ARM7TDMI) || \
38 defined(CONFIG_CPU_ARM9TDMI)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039# ifdef _CACHE
40# define MULTI_CACHE 1
41# else
42# define _CACHE v4
43# endif
44#endif
45
46#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
47 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
48# define MULTI_CACHE 1
49#endif
50
51#if defined(CONFIG_CPU_ARM926T)
52# ifdef _CACHE
53# define MULTI_CACHE 1
54# else
55# define _CACHE arm926
56# endif
57#endif
58
Hyok S. Choid60674e2006-09-26 17:38:18 +090059#if defined(CONFIG_CPU_ARM940T)
60# ifdef _CACHE
61# define MULTI_CACHE 1
62# else
63# define _CACHE arm940
64# endif
65#endif
66
Hyok S. Choif37f46e2006-09-26 17:38:32 +090067#if defined(CONFIG_CPU_ARM946E)
68# ifdef _CACHE
69# define MULTI_CACHE 1
70# else
71# define _CACHE arm946
72# endif
73#endif
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
76# ifdef _CACHE
77# define MULTI_CACHE 1
78# else
79# define _CACHE v4wb
80# endif
81#endif
82
83#if defined(CONFIG_CPU_XSCALE)
84# ifdef _CACHE
85# define MULTI_CACHE 1
86# else
87# define _CACHE xscale
88# endif
89#endif
90
Lennert Buytenhek23bdf862006-03-28 21:00:40 +010091#if defined(CONFIG_CPU_XSC3)
92# ifdef _CACHE
93# define MULTI_CACHE 1
94# else
95# define _CACHE xsc3
96# endif
97#endif
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#if defined(CONFIG_CPU_V6)
100//# ifdef _CACHE
101# define MULTI_CACHE 1
102//# else
103//# define _CACHE v6
104//# endif
105#endif
106
107#if !defined(_CACHE) && !defined(MULTI_CACHE)
108#error Unknown cache maintainence model
109#endif
110
111/*
112 * This flag is used to indicate that the page pointed to by a pte
113 * is dirty and requires cleaning before returning it to the user.
114 */
115#define PG_dcache_dirty PG_arch_1
116
117/*
118 * MM Cache Management
119 * ===================
120 *
121 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
122 * implement these methods.
123 *
124 * Start addresses are inclusive and end addresses are exclusive;
125 * start addresses should be rounded down, end addresses up.
126 *
127 * See Documentation/cachetlb.txt for more information.
128 * Please note that the implementation of these, and the required
129 * effects are cache-type (VIVT/VIPT/PIPT) specific.
130 *
131 * flush_cache_kern_all()
132 *
133 * Unconditionally clean and invalidate the entire cache.
134 *
135 * flush_cache_user_mm(mm)
136 *
137 * Clean and invalidate all user space cache entries
138 * before a change of page tables.
139 *
140 * flush_cache_user_range(start, end, flags)
141 *
142 * Clean and invalidate a range of cache entries in the
143 * specified address space before a change of page tables.
144 * - start - user start address (inclusive, page aligned)
145 * - end - user end address (exclusive, page aligned)
146 * - flags - vma->vm_flags field
147 *
148 * coherent_kern_range(start, end)
149 *
150 * Ensure coherency between the Icache and the Dcache in the
151 * region described by start, end. If you have non-snooping
152 * Harvard caches, you need to implement this function.
153 * - start - virtual start address
154 * - end - virtual end address
155 *
156 * DMA Cache Coherency
157 * ===================
158 *
159 * dma_inv_range(start, end)
160 *
161 * Invalidate (discard) the specified virtual address range.
162 * May not write back any entries. If 'start' or 'end'
163 * are not cache line aligned, those lines must be written
164 * back.
165 * - start - virtual start address
166 * - end - virtual end address
167 *
168 * dma_clean_range(start, end)
169 *
170 * Clean (write back) the specified virtual address range.
171 * - start - virtual start address
172 * - end - virtual end address
173 *
174 * dma_flush_range(start, end)
175 *
176 * Clean and invalidate the specified virtual address range.
177 * - start - virtual start address
178 * - end - virtual end address
179 */
180
181struct cpu_cache_fns {
182 void (*flush_kern_all)(void);
183 void (*flush_user_all)(void);
184 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
185
186 void (*coherent_kern_range)(unsigned long, unsigned long);
187 void (*coherent_user_range)(unsigned long, unsigned long);
188 void (*flush_kern_dcache_page)(void *);
189
190 void (*dma_inv_range)(unsigned long, unsigned long);
191 void (*dma_clean_range)(unsigned long, unsigned long);
192 void (*dma_flush_range)(unsigned long, unsigned long);
193};
194
195/*
196 * Select the calling method
197 */
198#ifdef MULTI_CACHE
199
200extern struct cpu_cache_fns cpu_cache;
201
202#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
203#define __cpuc_flush_user_all cpu_cache.flush_user_all
204#define __cpuc_flush_user_range cpu_cache.flush_user_range
205#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
206#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
207#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
208
209/*
210 * These are private to the dma-mapping API. Do not use directly.
211 * Their sole purpose is to ensure that data held in the cache
212 * is visible to DMA, or data written by DMA to system memory is
213 * visible to the CPU.
214 */
215#define dmac_inv_range cpu_cache.dma_inv_range
216#define dmac_clean_range cpu_cache.dma_clean_range
217#define dmac_flush_range cpu_cache.dma_flush_range
218
219#else
220
221#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
222#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
223#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
224#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
225#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
226#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
227
228extern void __cpuc_flush_kern_all(void);
229extern void __cpuc_flush_user_all(void);
230extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
231extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
232extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
233extern void __cpuc_flush_dcache_page(void *);
234
235/*
236 * These are private to the dma-mapping API. Do not use directly.
237 * Their sole purpose is to ensure that data held in the cache
238 * is visible to DMA, or data written by DMA to system memory is
239 * visible to the CPU.
240 */
241#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
242#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
243#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
244
245extern void dmac_inv_range(unsigned long, unsigned long);
246extern void dmac_clean_range(unsigned long, unsigned long);
247extern void dmac_flush_range(unsigned long, unsigned long);
248
249#endif
250
251/*
252 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
253 * vmalloc, ioremap etc) in kernel space for pages. Since the
254 * direct-mappings of these pages may contain cached data, we need
255 * to do a full cache flush to ensure that writebacks don't corrupt
256 * data placed into these pages via the new mappings.
257 */
258#define flush_cache_vmap(start, end) flush_cache_all()
259#define flush_cache_vunmap(start, end) flush_cache_all()
260
261/*
262 * Copy user data from/to a page which is mapped into a different
263 * processes address space. Really, we want to allow our "user
264 * space" model to handle this.
265 */
266#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
267 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 memcpy(dst, src, len); \
George G. Davisa188ad22006-09-02 18:43:20 +0100269 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 } while (0)
271
272#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
273 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 memcpy(dst, src, len); \
275 } while (0)
276
277/*
278 * Convert calls to our calling convention.
279 */
280#define flush_cache_all() __cpuc_flush_kern_all()
Russell Kingd7b6b352005-09-08 15:32:23 +0100281#ifndef CONFIG_CPU_CACHE_VIPT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282static inline void flush_cache_mm(struct mm_struct *mm)
283{
284 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
285 __cpuc_flush_user_all();
286}
287
288static inline void
289flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
290{
291 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
292 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
293 vma->vm_flags);
294}
295
296static inline void
297flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
298{
299 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
300 unsigned long addr = user_addr & PAGE_MASK;
301 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
302 }
303}
George G. Davisa188ad22006-09-02 18:43:20 +0100304
305static inline void
306flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
307 unsigned long uaddr, void *kaddr,
308 unsigned long len, int write)
309{
310 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
311 unsigned long addr = (unsigned long)kaddr;
312 __cpuc_coherent_kern_range(addr, addr + len);
313 }
314}
Russell Kingd7b6b352005-09-08 15:32:23 +0100315#else
316extern void flush_cache_mm(struct mm_struct *mm);
317extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
318extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
George G. Davisa188ad22006-09-02 18:43:20 +0100319extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
320 unsigned long uaddr, void *kaddr,
321 unsigned long len, int write);
Russell Kingd7b6b352005-09-08 15:32:23 +0100322#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324/*
325 * flush_cache_user_range is used when we want to ensure that the
326 * Harvard caches are synchronised for the user space address range.
327 * This is used for the ARM private sys_cacheflush system call.
328 */
329#define flush_cache_user_range(vma,start,end) \
330 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
331
332/*
333 * Perform necessary cache operations to ensure that data previously
334 * stored within this range of addresses can be executed by the CPU.
335 */
336#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
337
338/*
339 * Perform necessary cache operations to ensure that the TLB will
340 * see data written in the specified area.
341 */
342#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
343
344/*
345 * flush_dcache_page is used when the kernel has written to the page
346 * cache page at virtual address page->virtual.
347 *
348 * If this page isn't mapped (ie, page_mapping == NULL), or it might
349 * have userspace mappings, then we _must_ always clean + invalidate
350 * the dcache entries associated with the kernel mapping.
351 *
352 * Otherwise we can defer the operation, and clean the cache when we are
353 * about to change to user space. This is the same method as used on SPARC64.
354 * See update_mmu_cache for the user space part.
355 */
356extern void flush_dcache_page(struct page *);
357
358#define flush_dcache_mmap_lock(mapping) \
359 write_lock_irq(&(mapping)->tree_lock)
360#define flush_dcache_mmap_unlock(mapping) \
361 write_unlock_irq(&(mapping)->tree_lock)
362
363#define flush_icache_user_range(vma,page,addr,len) \
364 flush_dcache_page(page)
365
366/*
367 * We don't appear to need to do anything here. In fact, if we did, we'd
368 * duplicate cache flushing elsewhere performed by flush_dcache_page().
369 */
370#define flush_icache_page(vma,page) do { } while (0)
371
372#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
373#define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25))
374#define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25))
375#define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
376#define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
377
378#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
379
380#define cache_is_vivt() 1
381#define cache_is_vipt() 0
382#define cache_is_vipt_nonaliasing() 0
383#define cache_is_vipt_aliasing() 0
384
385#elif defined(CONFIG_CPU_CACHE_VIPT)
386
387#define cache_is_vivt() 0
388#define cache_is_vipt() 1
389#define cache_is_vipt_nonaliasing() \
390 ({ \
391 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
392 __cacheid_vipt_nonaliasing(__val); \
393 })
394
395#define cache_is_vipt_aliasing() \
396 ({ \
397 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
398 __cacheid_vipt_aliasing(__val); \
399 })
400
401#else
402
403#define cache_is_vivt() \
404 ({ \
405 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
406 (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
407 })
408
409#define cache_is_vipt() \
410 ({ \
411 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
412 __cacheid_present(__val) && __cacheid_vipt(__val); \
413 })
414
415#define cache_is_vipt_nonaliasing() \
416 ({ \
417 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
418 __cacheid_present(__val) && \
419 __cacheid_vipt_nonaliasing(__val); \
420 })
421
422#define cache_is_vipt_aliasing() \
423 ({ \
424 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
425 __cacheid_present(__val) && \
426 __cacheid_vipt_aliasing(__val); \
427 })
428
429#endif
430
431#endif