blob: 9ba1436dfe8405dd03c8079deaddbb3b34ec915e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/cacheflush.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14
Russell King753790e2011-02-06 15:32:24 +000015#include <asm/glue-cache.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010016#include <asm/shmparam.h>
Catalin Marinas376e1422008-11-06 13:23:08 +000017#include <asm/cachetype.h>
Catalin Marinas33f663f2010-03-24 16:46:52 +010018#include <asm/outercache.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010019
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22/*
Catalin Marinasc0177802010-09-13 15:57:36 +010023 * This flag is used to indicate that the page pointed to by a pte is clean
24 * and does not require cleaning before returning it to the user.
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 */
Catalin Marinasc0177802010-09-13 15:57:36 +010026#define PG_dcache_clean PG_arch_1
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28/*
29 * MM Cache Management
30 * ===================
31 *
32 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
33 * implement these methods.
34 *
35 * Start addresses are inclusive and end addresses are exclusive;
36 * start addresses should be rounded down, end addresses up.
37 *
38 * See Documentation/cachetlb.txt for more information.
39 * Please note that the implementation of these, and the required
40 * effects are cache-type (VIVT/VIPT/PIPT) specific.
41 *
Tony Lindgren81d11952010-09-21 17:16:40 +010042 * flush_icache_all()
43 *
44 * Unconditionally clean and invalidate the entire icache.
45 * Currently only needed for cache-v6.S and cache-v7.S, see
46 * __flush_icache_all for the generic implementation.
47 *
Tony Lindgren20451242010-01-19 23:42:08 +010048 * flush_kern_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 *
50 * Unconditionally clean and invalidate the entire cache.
51 *
Tony Lindgren20451242010-01-19 23:42:08 +010052 * flush_user_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 *
54 * Clean and invalidate all user space cache entries
55 * before a change of page tables.
56 *
Tony Lindgren20451242010-01-19 23:42:08 +010057 * flush_user_range(start, end, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 *
59 * Clean and invalidate a range of cache entries in the
60 * specified address space before a change of page tables.
61 * - start - user start address (inclusive, page aligned)
62 * - end - user end address (exclusive, page aligned)
63 * - flags - vma->vm_flags field
64 *
65 * coherent_kern_range(start, end)
66 *
67 * Ensure coherency between the Icache and the Dcache in the
68 * region described by start, end. If you have non-snooping
69 * Harvard caches, you need to implement this function.
70 * - start - virtual start address
71 * - end - virtual end address
72 *
Tony Lindgren20451242010-01-19 23:42:08 +010073 * coherent_user_range(start, end)
74 *
75 * Ensure coherency between the Icache and the Dcache in the
76 * region described by start, end. If you have non-snooping
77 * Harvard caches, you need to implement this function.
78 * - start - virtual start address
79 * - end - virtual end address
80 *
81 * flush_kern_dcache_area(kaddr, size)
82 *
83 * Ensure that the data held in page is written back.
84 * - kaddr - page address
85 * - size - region size
86 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 * DMA Cache Coherency
88 * ===================
89 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090 * dma_inv_range(start, end)
91 *
92 * Invalidate (discard) the specified virtual address range.
93 * May not write back any entries. If 'start' or 'end'
94 * are not cache line aligned, those lines must be written
95 * back.
96 * - start - virtual start address
97 * - end - virtual end address
98 *
99 * dma_clean_range(start, end)
100 *
101 * Clean (write back) the specified virtual address range.
102 * - start - virtual start address
103 * - end - virtual end address
104 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 * dma_flush_range(start, end)
106 *
107 * Clean and invalidate the specified virtual address range.
108 * - start - virtual start address
109 * - end - virtual end address
110 */
111
112struct cpu_cache_fns {
Tony Lindgren81d11952010-09-21 17:16:40 +0100113 void (*flush_icache_all)(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 void (*flush_kern_all)(void);
115 void (*flush_user_all)(void);
116 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
117
118 void (*coherent_kern_range)(unsigned long, unsigned long);
119 void (*coherent_user_range)(unsigned long, unsigned long);
Russell King2c9b9c82009-11-26 12:56:21 +0000120 void (*flush_kern_dcache_area)(void *, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Russell Kinga9c91472009-11-26 16:19:58 +0000122 void (*dma_map_area)(const void *, size_t, int);
123 void (*dma_unmap_area)(const void *, size_t, int);
124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 void (*dma_inv_range)(const void *, const void *);
126 void (*dma_clean_range)(const void *, const void *);
Russell King7ae5a762007-02-06 17:39:31 +0000127 void (*dma_flush_range)(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128};
129
130/*
131 * Select the calling method
132 */
133#ifdef MULTI_CACHE
134
135extern struct cpu_cache_fns cpu_cache;
136
Tony Lindgren81d11952010-09-21 17:16:40 +0100137#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
139#define __cpuc_flush_user_all cpu_cache.flush_user_all
140#define __cpuc_flush_user_range cpu_cache.flush_user_range
141#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
142#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
Russell King2c9b9c82009-11-26 12:56:21 +0000143#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145/*
146 * These are private to the dma-mapping API. Do not use directly.
147 * Their sole purpose is to ensure that data held in the cache
148 * is visible to DMA, or data written by DMA to system memory is
149 * visible to the CPU.
150 */
Russell Kinga9c91472009-11-26 16:19:58 +0000151#define dmac_map_area cpu_cache.dma_map_area
Russell King753790e2011-02-06 15:32:24 +0000152#define dmac_unmap_area cpu_cache.dma_unmap_area
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153#define dmac_inv_range cpu_cache.dma_inv_range
154#define dmac_clean_range cpu_cache.dma_clean_range
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155#define dmac_flush_range cpu_cache.dma_flush_range
156
157#else
158
Tony Lindgren81d11952010-09-21 17:16:40 +0100159extern void __cpuc_flush_icache_all(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160extern void __cpuc_flush_kern_all(void);
161extern void __cpuc_flush_user_all(void);
162extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
163extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
164extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
Russell King2c9b9c82009-11-26 12:56:21 +0000165extern void __cpuc_flush_dcache_area(void *, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167/*
168 * These are private to the dma-mapping API. Do not use directly.
169 * Their sole purpose is to ensure that data held in the cache
170 * is visible to DMA, or data written by DMA to system memory is
171 * visible to the CPU.
172 */
Russell Kinga9c91472009-11-26 16:19:58 +0000173extern void dmac_map_area(const void *, size_t, int);
174extern void dmac_unmap_area(const void *, size_t, int);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175extern void dmac_inv_range(const void *, const void *);
176extern void dmac_clean_range(const void *, const void *);
Russell King7ae5a762007-02-06 17:39:31 +0000177extern void dmac_flush_range(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179#endif
180
181/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * Copy user data from/to a page which is mapped into a different
183 * processes address space. Really, we want to allow our "user
184 * space" model to handle this.
185 */
Russell King2ef7f3d2009-11-05 13:29:36 +0000186extern void copy_to_user_page(struct vm_area_struct *, struct page *,
187 unsigned long, void *, const void *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
189 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 memcpy(dst, src, len); \
191 } while (0)
192
193/*
194 * Convert calls to our calling convention.
195 */
Tony Lindgren81d11952010-09-21 17:16:40 +0100196
197/* Invalidate I-cache */
198#define __flush_icache_all_generic() \
199 asm("mcr p15, 0, %0, c7, c5, 0" \
200 : : "r" (0));
201
202/* Invalidate I-cache inner shareable */
203#define __flush_icache_all_v7_smp() \
204 asm("mcr p15, 0, %0, c7, c1, 0" \
205 : : "r" (0));
206
207/*
208 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
209 * will fall through to use __flush_icache_all_generic.
210 */
Russell Kinge399b1a2011-01-17 15:08:32 +0000211#if (defined(CONFIG_CPU_V7) && \
212 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
Tony Lindgren81d11952010-09-21 17:16:40 +0100213 defined(CONFIG_SMP_ON_UP)
214#define __flush_icache_preferred __cpuc_flush_icache_all
215#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
216#define __flush_icache_preferred __flush_icache_all_v7_smp
217#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
218#define __flush_icache_preferred __cpuc_flush_icache_all
219#else
220#define __flush_icache_preferred __flush_icache_all_generic
221#endif
222
223static inline void __flush_icache_all(void)
224{
225 __flush_icache_preferred();
226}
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228#define flush_cache_all() __cpuc_flush_kern_all()
Russell King2f0b1922009-10-25 10:40:02 +0000229
230static inline void vivt_flush_cache_mm(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
Rusty Russell56f8ba82009-09-24 09:34:49 -0600232 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 __cpuc_flush_user_all();
234}
235
236static inline void
Russell King2f0b1922009-10-25 10:40:02 +0000237vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
Rusty Russell56f8ba82009-09-24 09:34:49 -0600239 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
241 vma->vm_flags);
242}
243
244static inline void
Russell King2f0b1922009-10-25 10:40:02 +0000245vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
Rusty Russell56f8ba82009-09-24 09:34:49 -0600247 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 unsigned long addr = user_addr & PAGE_MASK;
249 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
250 }
251}
George G. Davisa188ad22006-09-02 18:43:20 +0100252
Russell King2f0b1922009-10-25 10:40:02 +0000253#ifndef CONFIG_CPU_CACHE_VIPT
254#define flush_cache_mm(mm) \
255 vivt_flush_cache_mm(mm)
256#define flush_cache_range(vma,start,end) \
257 vivt_flush_cache_range(vma,start,end)
258#define flush_cache_page(vma,addr,pfn) \
259 vivt_flush_cache_page(vma,addr,pfn)
Russell Kingd7b6b352005-09-08 15:32:23 +0100260#else
261extern void flush_cache_mm(struct mm_struct *mm);
262extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
263extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
264#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Ralf Baechleec8c0442006-12-12 17:14:57 +0000266#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/*
269 * flush_cache_user_range is used when we want to ensure that the
270 * Harvard caches are synchronised for the user space address range.
271 * This is used for the ARM private sys_cacheflush system call.
272 */
Dima Zavin96714b52010-04-27 20:57:04 -0700273#define flush_cache_user_range(start,end) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
275
276/*
277 * Perform necessary cache operations to ensure that data previously
278 * stored within this range of addresses can be executed by the CPU.
279 */
280#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
281
282/*
283 * Perform necessary cache operations to ensure that the TLB will
284 * see data written in the specified area.
285 */
286#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
287
288/*
289 * flush_dcache_page is used when the kernel has written to the page
290 * cache page at virtual address page->virtual.
291 *
292 * If this page isn't mapped (ie, page_mapping == NULL), or it might
293 * have userspace mappings, then we _must_ always clean + invalidate
294 * the dcache entries associated with the kernel mapping.
295 *
296 * Otherwise we can defer the operation, and clean the cache when we are
297 * about to change to user space. This is the same method as used on SPARC64.
298 * See update_mmu_cache for the user space part.
299 */
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100300#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301extern void flush_dcache_page(struct page *);
302
James Bottomley252a9af2010-01-25 11:42:22 -0600303static inline void flush_kernel_vmap_range(void *addr, int size)
304{
305 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
306 __cpuc_flush_dcache_area(addr, (size_t)size);
307}
308static inline void invalidate_kernel_vmap_range(void *addr, int size)
309{
310 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
311 __cpuc_flush_dcache_area(addr, (size_t)size);
312}
Catalin Marinas826cbda2008-06-13 10:28:36 +0100313
Russell King6020dff2006-12-30 23:17:40 +0000314#define ARCH_HAS_FLUSH_ANON_PAGE
315static inline void flush_anon_page(struct vm_area_struct *vma,
316 struct page *page, unsigned long vmaddr)
317{
318 extern void __flush_anon_page(struct vm_area_struct *vma,
319 struct page *, unsigned long);
320 if (PageAnon(page))
321 __flush_anon_page(vma, page, vmaddr);
322}
323
Nicolas Pitre73be1592009-06-12 03:09:29 +0100324#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
325static inline void flush_kernel_dcache_page(struct page *page)
326{
Nicolas Pitre73be1592009-06-12 03:09:29 +0100327}
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329#define flush_dcache_mmap_lock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700330 spin_lock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331#define flush_dcache_mmap_unlock(mapping) \
Nick Piggin19fd6232008-07-25 19:45:32 -0700332 spin_unlock_irq(&(mapping)->tree_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334#define flush_icache_user_range(vma,page,addr,len) \
335 flush_dcache_page(page)
336
337/*
338 * We don't appear to need to do anything here. In fact, if we did, we'd
339 * duplicate cache flushing elsewhere performed by flush_dcache_page().
340 */
341#define flush_icache_page(vma,page) do { } while (0)
342
Catalin Marinas376e1422008-11-06 13:23:08 +0000343/*
344 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
345 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
346 * caches, since the direct-mappings of these pages may contain cached
347 * data, we need to do a full cache flush to ensure that writebacks
348 * don't corrupt data placed into these pages via the new mappings.
349 */
350static inline void flush_cache_vmap(unsigned long start, unsigned long end)
351{
352 if (!cache_is_vipt_nonaliasing())
353 flush_cache_all();
354 else
355 /*
356 * set_pte_at() called from vmap_pte_range() does not
357 * have a DSB after cleaning the cache line.
358 */
359 dsb();
360}
361
362static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
363{
364 if (!cache_is_vipt_nonaliasing())
365 flush_cache_all();
366}
367
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700368int set_memory_ro(unsigned long addr, int numpages);
369int set_memory_rw(unsigned long addr, int numpages);
370int set_memory_x(unsigned long addr, int numpages);
371int set_memory_nx(unsigned long addr, int numpages);
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373#endif