Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle |
| 7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. |
| 8 | */ |
| 9 | #ifndef _ASM_CACHEFLUSH_H |
| 10 | #define _ASM_CACHEFLUSH_H |
| 11 | |
| 12 | /* Keep includes the same across arches. */ |
| 13 | #include <linux/mm.h> |
| 14 | #include <asm/cpu-features.h> |
| 15 | |
| 16 | /* Cache flushing: |
| 17 | * |
| 18 | * - flush_cache_all() flushes entire cache |
| 19 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 20 | * - flush_cache_dup mm(mm) handles cache flushing when forking |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page |
| 22 | * - flush_cache_range(vma, start, end) flushes a range of pages |
| 23 | * - flush_icache_range(start, end) flush a range of instructions |
| 24 | * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * |
| 26 | * MIPS specific flush operations: |
| 27 | * |
| 28 | * - flush_cache_sigtramp() flush signal trampoline |
| 29 | * - flush_icache_all() flush the entire instruction cache |
| 30 | * - flush_data_cache_page() flushes a page from the data cache |
| 31 | */ |
| 32 | extern void (*flush_cache_all)(void); |
| 33 | extern void (*__flush_cache_all)(void); |
| 34 | extern void (*flush_cache_mm)(struct mm_struct *mm); |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 35 | #define flush_cache_dup_mm(mm) do { (void) (mm); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | extern void (*flush_cache_range)(struct vm_area_struct *vma, |
| 37 | unsigned long start, unsigned long end); |
| 38 | extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); |
| 39 | extern void __flush_dcache_page(struct page *page); |
| 40 | |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 41 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | static inline void flush_dcache_page(struct page *page) |
| 43 | { |
Ralf Baechle | 585fa72 | 2006-08-12 16:40:08 +0100 | [diff] [blame] | 44 | if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | __flush_dcache_page(page); |
| 46 | |
| 47 | } |
| 48 | |
| 49 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 50 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| 51 | |
Ralf Baechle | 7575a49 | 2007-03-23 21:36:37 +0000 | [diff] [blame] | 52 | #define ARCH_HAS_FLUSH_ANON_PAGE |
| 53 | extern void __flush_anon_page(struct page *, unsigned long); |
| 54 | static inline void flush_anon_page(struct vm_area_struct *vma, |
| 55 | struct page *page, unsigned long vmaddr) |
| 56 | { |
| 57 | if (cpu_has_dc_aliases && PageAnon(page)) |
| 58 | __flush_anon_page(page, vmaddr); |
| 59 | } |
| 60 | |
Ralf Baechle | 585fa72 | 2006-08-12 16:40:08 +0100 | [diff] [blame] | 61 | static inline void flush_icache_page(struct vm_area_struct *vma, |
| 62 | struct page *page) |
| 63 | { |
| 64 | } |
| 65 | |
Atsushi Nemoto | d4264f1 | 2006-01-29 02:27:51 +0900 | [diff] [blame] | 66 | extern void (*flush_icache_range)(unsigned long start, unsigned long end); |
Thomas Bogendoerfer | e0cee3e | 2008-08-04 20:53:57 +0200 | [diff] [blame] | 67 | extern void (*local_flush_icache_range)(unsigned long start, unsigned long end); |
Ralf Baechle | 9c5a3d7 | 2008-04-05 15:13:23 +0100 | [diff] [blame] | 68 | |
| 69 | extern void (*__flush_cache_vmap)(void); |
| 70 | |
| 71 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
| 72 | { |
| 73 | if (cpu_has_dc_aliases) |
| 74 | __flush_cache_vmap(); |
| 75 | } |
| 76 | |
| 77 | extern void (*__flush_cache_vunmap)(void); |
| 78 | |
| 79 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
| 80 | { |
| 81 | if (cpu_has_dc_aliases) |
| 82 | __flush_cache_vunmap(); |
| 83 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | |
Ralf Baechle | f8829ca | 2006-10-21 23:17:35 +0100 | [diff] [blame] | 85 | extern void copy_to_user_page(struct vm_area_struct *vma, |
Ralf Baechle | 53de0d4 | 2005-03-18 17:36:42 +0000 | [diff] [blame] | 86 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
Ralf Baechle | f8829ca | 2006-10-21 23:17:35 +0100 | [diff] [blame] | 87 | unsigned long len); |
Ralf Baechle | 53de0d4 | 2005-03-18 17:36:42 +0000 | [diff] [blame] | 88 | |
Ralf Baechle | f8829ca | 2006-10-21 23:17:35 +0100 | [diff] [blame] | 89 | extern void copy_from_user_page(struct vm_area_struct *vma, |
Ralf Baechle | 53de0d4 | 2005-03-18 17:36:42 +0000 | [diff] [blame] | 90 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
Ralf Baechle | f8829ca | 2006-10-21 23:17:35 +0100 | [diff] [blame] | 91 | unsigned long len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
| 93 | extern void (*flush_cache_sigtramp)(unsigned long addr); |
| 94 | extern void (*flush_icache_all)(void); |
Ralf Baechle | 7e3bfc7 | 2006-04-05 20:42:04 +0100 | [diff] [blame] | 95 | extern void (*local_flush_data_cache_page)(void * addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | extern void (*flush_data_cache_page)(unsigned long addr); |
| 97 | |
| 98 | /* |
| 99 | * This flag is used to indicate that the page pointed to by a pte |
| 100 | * is dirty and requires cleaning before returning it to the user. |
| 101 | */ |
| 102 | #define PG_dcache_dirty PG_arch_1 |
| 103 | |
| 104 | #define Page_dcache_dirty(page) \ |
| 105 | test_bit(PG_dcache_dirty, &(page)->flags) |
| 106 | #define SetPageDcacheDirty(page) \ |
| 107 | set_bit(PG_dcache_dirty, &(page)->flags) |
| 108 | #define ClearPageDcacheDirty(page) \ |
| 109 | clear_bit(PG_dcache_dirty, &(page)->flags) |
| 110 | |
Thiemo Seufer | ba5187d | 2005-04-25 16:36:23 +0000 | [diff] [blame] | 111 | /* Run kernel code uncached, useful for cache probing functions. */ |
Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 112 | unsigned long run_uncached(void *func); |
Thiemo Seufer | ba5187d | 2005-04-25 16:36:23 +0000 | [diff] [blame] | 113 | |
Ralf Baechle | 7575a49 | 2007-03-23 21:36:37 +0000 | [diff] [blame] | 114 | extern void *kmap_coherent(struct page *page, unsigned long addr); |
Ralf Baechle | eacb9d6 | 2007-04-26 15:46:25 +0100 | [diff] [blame] | 115 | extern void kunmap_coherent(void); |
Ralf Baechle | 7575a49 | 2007-03-23 21:36:37 +0000 | [diff] [blame] | 116 | |
Ralf Baechle | d9cdc901 | 2011-06-17 16:20:28 +0100 | [diff] [blame] | 117 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
| 118 | static inline void flush_kernel_dcache_page(struct page *page) |
| 119 | { |
| 120 | BUG_ON(cpu_has_dc_aliases && PageHighMem(page)); |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a |
| 125 | * cache writeback and invalidate operation. |
| 126 | */ |
| 127 | extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); |
| 128 | |
| 129 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
| 130 | { |
| 131 | if (cpu_has_dc_aliases) |
| 132 | __flush_kernel_vmap_range((unsigned long) vaddr, size); |
| 133 | } |
| 134 | |
| 135 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
| 136 | { |
| 137 | if (cpu_has_dc_aliases) |
| 138 | __flush_kernel_vmap_range((unsigned long) vaddr, size); |
| 139 | } |
| 140 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | #endif /* _ASM_CACHEFLUSH_H */ |