blob: d021905cfd163a753057227ee9100f93af9227ec [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/cacheflush.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14
Russell King753790e2011-02-06 15:32:24 +000015#include <asm/glue-cache.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010016#include <asm/shmparam.h>
Catalin Marinas376e1422008-11-06 13:23:08 +000017#include <asm/cachetype.h>
Catalin Marinas33f663f2010-03-24 16:46:52 +010018#include <asm/outercache.h>
Colin Crosse5e483d2011-08-11 17:15:24 -070019#include <asm/rodata.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010020
21#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23/*
Catalin Marinasc0177802010-09-13 15:57:36 +010024 * This flag is used to indicate that the page pointed to by a pte is clean
25 * and does not require cleaning before returning it to the user.
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 */
Catalin Marinasc0177802010-09-13 15:57:36 +010027#define PG_dcache_clean PG_arch_1
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29/*
30 * MM Cache Management
31 * ===================
32 *
33 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
34 * implement these methods.
35 *
36 * Start addresses are inclusive and end addresses are exclusive;
37 * start addresses should be rounded down, end addresses up.
38 *
39 * See Documentation/cachetlb.txt for more information.
40 * Please note that the implementation of these, and the required
41 * effects are cache-type (VIVT/VIPT/PIPT) specific.
42 *
Tony Lindgren81d11952010-09-21 17:16:40 +010043 * flush_icache_all()
44 *
45 * Unconditionally clean and invalidate the entire icache.
46 * Currently only needed for cache-v6.S and cache-v7.S, see
47 * __flush_icache_all for the generic implementation.
48 *
Tony Lindgren20451242010-01-19 23:42:08 +010049 * flush_kern_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 *
51 * Unconditionally clean and invalidate the entire cache.
52 *
Tony Lindgren20451242010-01-19 23:42:08 +010053 * flush_user_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 *
55 * Clean and invalidate all user space cache entries
56 * before a change of page tables.
57 *
Tony Lindgren20451242010-01-19 23:42:08 +010058 * flush_user_range(start, end, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 *
60 * Clean and invalidate a range of cache entries in the
61 * specified address space before a change of page tables.
62 * - start - user start address (inclusive, page aligned)
63 * - end - user end address (exclusive, page aligned)
64 * - flags - vma->vm_flags field
65 *
66 * coherent_kern_range(start, end)
67 *
68 * Ensure coherency between the Icache and the Dcache in the
69 * region described by start, end. If you have non-snooping
70 * Harvard caches, you need to implement this function.
71 * - start - virtual start address
72 * - end - virtual end address
73 *
Tony Lindgren20451242010-01-19 23:42:08 +010074 * coherent_user_range(start, end)
75 *
76 * Ensure coherency between the Icache and the Dcache in the
77 * region described by start, end. If you have non-snooping
78 * Harvard caches, you need to implement this function.
79 * - start - virtual start address
80 * - end - virtual end address
81 *
82 * flush_kern_dcache_area(kaddr, size)
83 *
84 * Ensure that the data held in page is written back.
85 * - kaddr - page address
86 * - size - region size
87 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 * DMA Cache Coherency
89 * ===================
90 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091 * dma_inv_range(start, end)
92 *
93 * Invalidate (discard) the specified virtual address range.
94 * May not write back any entries. If 'start' or 'end'
95 * are not cache line aligned, those lines must be written
96 * back.
97 * - start - virtual start address
98 * - end - virtual end address
99 *
100 * dma_clean_range(start, end)
101 *
102 * Clean (write back) the specified virtual address range.
103 * - start - virtual start address
104 * - end - virtual end address
105 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 * dma_flush_range(start, end)
107 *
108 * Clean and invalidate the specified virtual address range.
109 * - start - virtual start address
110 * - end - virtual end address
111 */
112
113struct cpu_cache_fns {
Tony Lindgren81d11952010-09-21 17:16:40 +0100114 void (*flush_icache_all)(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 void (*flush_kern_all)(void);
116 void (*flush_user_all)(void);
117 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
118
119 void (*coherent_kern_range)(unsigned long, unsigned long);
120 void (*coherent_user_range)(unsigned long, unsigned long);
Russell King2c9b9c82009-11-26 12:56:21 +0000121 void (*flush_kern_dcache_area)(void *, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Russell Kinga9c91472009-11-26 16:19:58 +0000123 void (*dma_map_area)(const void *, size_t, int);
124 void (*dma_unmap_area)(const void *, size_t, int);
125
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 void (*dma_inv_range)(const void *, const void *);
127 void (*dma_clean_range)(const void *, const void *);
Russell King7ae5a762007-02-06 17:39:31 +0000128 void (*dma_flush_range)(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129};
130
131/*
132 * Select the calling method
133 */
134#ifdef MULTI_CACHE
135
136extern struct cpu_cache_fns cpu_cache;
137
Tony Lindgren81d11952010-09-21 17:16:40 +0100138#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
140#define __cpuc_flush_user_all cpu_cache.flush_user_all
141#define __cpuc_flush_user_range cpu_cache.flush_user_range
142#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
143#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
Russell King2c9b9c82009-11-26 12:56:21 +0000144#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146/*
147 * These are private to the dma-mapping API. Do not use directly.
148 * Their sole purpose is to ensure that data held in the cache
149 * is visible to DMA, or data written by DMA to system memory is
150 * visible to the CPU.
151 */
Russell Kinga9c91472009-11-26 16:19:58 +0000152#define dmac_map_area cpu_cache.dma_map_area
Russell King753790e2011-02-06 15:32:24 +0000153#define dmac_unmap_area cpu_cache.dma_unmap_area
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154#define dmac_inv_range cpu_cache.dma_inv_range
155#define dmac_clean_range cpu_cache.dma_clean_range
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#define dmac_flush_range cpu_cache.dma_flush_range
157
158#else
159
Tony Lindgren81d11952010-09-21 17:16:40 +0100160extern void __cpuc_flush_icache_all(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161extern void __cpuc_flush_kern_all(void);
162extern void __cpuc_flush_user_all(void);
163extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
164extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
165extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
Russell King2c9b9c82009-11-26 12:56:21 +0000166extern void __cpuc_flush_dcache_area(void *, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168/*
169 * These are private to the dma-mapping API. Do not use directly.
170 * Their sole purpose is to ensure that data held in the cache
171 * is visible to DMA, or data written by DMA to system memory is
172 * visible to the CPU.
173 */
Russell Kinga9c91472009-11-26 16:19:58 +0000174extern void dmac_map_area(const void *, size_t, int);
175extern void dmac_unmap_area(const void *, size_t, int);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176extern void dmac_inv_range(const void *, const void *);
177extern void dmac_clean_range(const void *, const void *);
Russell King7ae5a762007-02-06 17:39:31 +0000178extern void dmac_flush_range(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180#endif
181
182/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * Copy user data from/to a page which is mapped into a different
184 * processes address space. Really, we want to allow our "user
185 * space" model to handle this.
186 */
Russell King2ef7f3d2009-11-05 13:29:36 +0000187extern void copy_to_user_page(struct vm_area_struct *, struct page *,
188 unsigned long, void *, const void *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
190 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 memcpy(dst, src, len); \
192 } while (0)
193
194/*
195 * Convert calls to our calling convention.
196 */
Tony Lindgren81d11952010-09-21 17:16:40 +0100197
198/* Invalidate I-cache */
199#define __flush_icache_all_generic() \
200 asm("mcr p15, 0, %0, c7, c5, 0" \
201 : : "r" (0));
202
203/* Invalidate I-cache inner shareable */
204#define __flush_icache_all_v7_smp() \
205 asm("mcr p15, 0, %0, c7, c1, 0" \
206 : : "r" (0));
207
208/*
209 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
210 * will fall through to use __flush_icache_all_generic.
211 */
Russell Kinge399b1a2011-01-17 15:08:32 +0000212#if (defined(CONFIG_CPU_V7) && \
213 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
Tony Lindgren81d11952010-09-21 17:16:40 +0100214 defined(CONFIG_SMP_ON_UP)
215#define __flush_icache_preferred __cpuc_flush_icache_all
216#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
217#define __flush_icache_preferred __flush_icache_all_v7_smp
218#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
219#define __flush_icache_preferred __cpuc_flush_icache_all
220#else
221#define __flush_icache_preferred __flush_icache_all_generic
222#endif
223
224static inline void __flush_icache_all(void)
225{
226 __flush_icache_preferred();
227}
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229#define flush_cache_all() __cpuc_flush_kern_all()
Russell King2f0b1922009-10-25 10:40:02 +0000230
231static inline void vivt_flush_cache_mm(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
Rusty Russell56f8ba82009-09-24 09:34:49 -0600233 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 __cpuc_flush_user_all();
235}
236
237static inline void
Russell King2f0b1922009-10-25 10:40:02 +0000238vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239{
Rusty Russell56f8ba82009-09-24 09:34:49 -0600240 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
242 vma->vm_flags);
243}
244
245static inline void
Russell King2f0b1922009-10-25 10:40:02 +0000246vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
Rusty Russell56f8ba82009-09-24 09:34:49 -0600248 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 unsigned long addr = user_addr & PAGE_MASK;
250 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
251 }
252}
George G. Davisa188ad22006-09-02 18:43:20 +0100253
Russell King2f0b1922009-10-25 10:40:02 +0000254#ifndef CONFIG_CPU_CACHE_VIPT
255#define flush_cache_mm(mm) \
256 vivt_flush_cache_mm(mm)
257#define flush_cache_range(vma,start,end) \
258 vivt_flush_cache_range(vma,start,end)
259#define flush_cache_page(vma,addr,pfn) \
260 vivt_flush_cache_page(vma,addr,pfn)
Russell Kingd7b6b352005-09-08 15:32:23 +0100261#else
262extern void flush_cache_mm(struct mm_struct *mm);
263extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
264extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
265#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Ralf Baechleec8c0442006-12-12 17:14:57 +0000267#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269/*
270 * flush_cache_user_range is used when we want to ensure that the
271 * Harvard caches are synchronised for the user space address range.
272 * This is used for the ARM private sys_cacheflush system call.
273 */
Dima Zavin96714b52010-04-27 20:57:04 -0700274#define flush_cache_user_range(start,end) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
276
277/*
278 * Perform necessary cache operations to ensure that data previously
279 * stored within this range of addresses can be executed by the CPU.
280 */
281#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
282
283/*
284 * Perform necessary cache operations to ensure that the TLB will
285 * see data written in the specified area.
286 */
287#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
288
289/*
290 * flush_dcache_page is used when the kernel has written to the page
291 * cache page at virtual address page->virtual.
292 *
293 * If this page isn't mapped (ie, page_mapping == NULL), or it might
294 * have userspace mappings, then we _must_ always clean + invalidate
295 * the dcache entries associated with the kernel mapping.
296 *
297 * Otherwise we can defer the operation, and clean the cache when we are
298 * about to change to user space. This is the same method as used on SPARC64.
299 * See update_mmu_cache for the user space part.
300 */
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100301#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302extern void flush_dcache_page(struct page *);
303
James Bottomley252a9af2010-01-25 11:42:22 -0600304static inline void flush_kernel_vmap_range(void *addr, int size)
305{
306 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
307 __cpuc_flush_dcache_area(addr, (size_t)size);
308}
309static inline void invalidate_kernel_vmap_range(void *addr, int size)
310{
311 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
312 __cpuc_flush_dcache_area(addr, (size_t)size);
313}
Catalin Marinas826cbda2008-06-13 10:28:36 +0100314
Russell King6020dff2006-12-30 23:17:40 +0000315#define ARCH_HAS_FLUSH_ANON_PAGE
316static inline void flush_anon_page(struct vm_area_struct *vma,
317 struct page *page, unsigned long vmaddr)
318{
319 extern void __flush_anon_page(struct vm_area_struct *vma,
320 struct page *, unsigned long);
321 if (PageAnon(page))
322 __flush_anon_page(vma, page, vmaddr);
323}
324
Nicolas Pitre73be1592009-06-12 03:09:29 +0100325#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
326static inline void flush_kernel_dcache_page(struct page *page)
327{
Nicolas Pitre73be1592009-06-12 03:09:29 +0100328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330#define flush_dcache_mmap_lock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700331 spin_lock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332#define flush_dcache_mmap_unlock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700333 spin_unlock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335#define flush_icache_user_range(vma,page,addr,len) \
336 flush_dcache_page(page)
337
338/*
339 * We don't appear to need to do anything here. In fact, if we did, we'd
340 * duplicate cache flushing elsewhere performed by flush_dcache_page().
341 */
342#define flush_icache_page(vma,page) do { } while (0)
343
Catalin Marinas376e1422008-11-06 13:23:08 +0000344/*
345 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
346 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
347 * caches, since the direct-mappings of these pages may contain cached
348 * data, we need to do a full cache flush to ensure that writebacks
349 * don't corrupt data placed into these pages via the new mappings.
350 */
351static inline void flush_cache_vmap(unsigned long start, unsigned long end)
352{
353 if (!cache_is_vipt_nonaliasing())
354 flush_cache_all();
355 else
356 /*
357 * set_pte_at() called from vmap_pte_range() does not
358 * have a DSB after cleaning the cache line.
359 */
360 dsb();
361}
362
363static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
364{
365 if (!cache_is_vipt_nonaliasing())
366 flush_cache_all();
367}
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369#endif