H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_CACHEFLUSH_H |
| 2 | #define _ASM_X86_CACHEFLUSH_H |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 3 | |
| 4 | /* Keep includes the same across arches. */ |
| 5 | #include <linux/mm.h> |
| 6 | |
| 7 | /* Caches aren't brain-dead on the intel. */ |
Tejun Heo | d325100 | 2009-02-25 11:01:40 +0900 | [diff] [blame] | 8 | static inline void flush_cache_all(void) { } |
| 9 | static inline void flush_cache_mm(struct mm_struct *mm) { } |
| 10 | static inline void flush_cache_dup_mm(struct mm_struct *mm) { } |
| 11 | static inline void flush_cache_range(struct vm_area_struct *vma, |
| 12 | unsigned long start, unsigned long end) { } |
| 13 | static inline void flush_cache_page(struct vm_area_struct *vma, |
| 14 | unsigned long vmaddr, unsigned long pfn) { } |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 15 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
Tejun Heo | d325100 | 2009-02-25 11:01:40 +0900 | [diff] [blame] | 16 | static inline void flush_dcache_page(struct page *page) { } |
| 17 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } |
| 18 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } |
| 19 | static inline void flush_icache_range(unsigned long start, |
| 20 | unsigned long end) { } |
| 21 | static inline void flush_icache_page(struct vm_area_struct *vma, |
| 22 | struct page *page) { } |
| 23 | static inline void flush_icache_user_range(struct vm_area_struct *vma, |
| 24 | struct page *page, |
| 25 | unsigned long addr, |
| 26 | unsigned long len) { } |
| 27 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } |
| 28 | static inline void flush_cache_vunmap(unsigned long start, |
| 29 | unsigned long end) { } |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 30 | |
Tejun Heo | d325100 | 2009-02-25 11:01:40 +0900 | [diff] [blame] | 31 | static inline void copy_to_user_page(struct vm_area_struct *vma, |
| 32 | struct page *page, unsigned long vaddr, |
| 33 | void *dst, const void *src, |
| 34 | unsigned long len) |
| 35 | { |
| 36 | memcpy(dst, src, len); |
| 37 | } |
| 38 | |
| 39 | static inline void copy_from_user_page(struct vm_area_struct *vma, |
| 40 | struct page *page, unsigned long vaddr, |
| 41 | void *dst, const void *src, |
| 42 | unsigned long len) |
| 43 | { |
| 44 | memcpy(dst, src, len); |
| 45 | } |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 46 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 47 | #ifdef CONFIG_X86_PAT |
| 48 | /* |
| 49 | * X86 PAT uses page flags WC and Uncached together to keep track of |
| 50 | * memory type of pages that have backing page struct. X86 PAT supports 3 |
| 51 | * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and |
| 52 | * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not |
| 53 | * been changed from its default (value of -1 used to denote this). |
| 54 | * Note we do not support _PAGE_CACHE_UC here. |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 55 | */ |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 56 | |
| 57 | #define _PGMT_DEFAULT 0 |
| 58 | #define _PGMT_WC (1UL << PG_arch_1) |
| 59 | #define _PGMT_UC_MINUS (1UL << PG_uncached) |
| 60 | #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) |
| 61 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) |
| 62 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) |
| 63 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 64 | static inline unsigned long get_page_memtype(struct page *pg) |
| 65 | { |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 66 | unsigned long pg_flags = pg->flags & _PGMT_MASK; |
| 67 | |
| 68 | if (pg_flags == _PGMT_DEFAULT) |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 69 | return -1; |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 70 | else if (pg_flags == _PGMT_WC) |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 71 | return _PAGE_CACHE_WC; |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 72 | else if (pg_flags == _PGMT_UC_MINUS) |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 73 | return _PAGE_CACHE_UC_MINUS; |
| 74 | else |
| 75 | return _PAGE_CACHE_WB; |
| 76 | } |
| 77 | |
| 78 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) |
| 79 | { |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 80 | unsigned long memtype_flags = _PGMT_DEFAULT; |
| 81 | unsigned long old_flags; |
| 82 | unsigned long new_flags; |
| 83 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 84 | switch (memtype) { |
| 85 | case _PAGE_CACHE_WC: |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 86 | memtype_flags = _PGMT_WC; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 87 | break; |
| 88 | case _PAGE_CACHE_UC_MINUS: |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 89 | memtype_flags = _PGMT_UC_MINUS; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 90 | break; |
| 91 | case _PAGE_CACHE_WB: |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 92 | memtype_flags = _PGMT_WB; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 93 | break; |
| 94 | } |
Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 95 | |
| 96 | do { |
| 97 | old_flags = pg->flags; |
| 98 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; |
| 99 | } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 100 | } |
| 101 | #else |
| 102 | static inline unsigned long get_page_memtype(struct page *pg) { return -1; } |
| 103 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } |
| 104 | #endif |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 105 | |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 106 | /* |
| 107 | * The set_memory_* API can be used to change various attributes of a virtual |
| 108 | * address range. The attributes include: |
| 109 | * Cachability : UnCached, WriteCombining, WriteBack |
| 110 | * Executability : eXeutable, NoteXecutable |
| 111 | * Read/Write : ReadOnly, ReadWrite |
| 112 | * Presence : NotPresent |
| 113 | * |
| 114 | * Within a catagory, the attributes are mutually exclusive. |
| 115 | * |
| 116 | * The implementation of this API will take care of various aspects that |
| 117 | * are associated with changing such attributes, such as: |
| 118 | * - Flushing TLBs |
| 119 | * - Flushing CPU caches |
| 120 | * - Making sure aliases of the memory behind the mapping don't violate |
| 121 | * coherency rules as defined by the CPU in the system. |
| 122 | * |
| 123 | * What this API does not do: |
| 124 | * - Provide exclusion between various callers - including callers that |
| 125 | * operation on other mappings of the same physical page |
| 126 | * - Restore default attributes when a page is freed |
| 127 | * - Guarantee that mappings other than the requested one are |
| 128 | * in any state, other than that these do not violate rules for |
| 129 | * the CPU you have. Do not depend on any effects on other mappings, |
| 130 | * CPUs other than the one you have may have more relaxed rules. |
| 131 | * The caller is required to take care of these. |
| 132 | */ |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 133 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 134 | int _set_memory_uc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 135 | int _set_memory_wc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 136 | int _set_memory_wb(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 137 | int set_memory_uc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 138 | int set_memory_wc(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 139 | int set_memory_wb(unsigned long addr, int numpages); |
| 140 | int set_memory_x(unsigned long addr, int numpages); |
| 141 | int set_memory_nx(unsigned long addr, int numpages); |
| 142 | int set_memory_ro(unsigned long addr, int numpages); |
| 143 | int set_memory_rw(unsigned long addr, int numpages); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 144 | int set_memory_np(unsigned long addr, int numpages); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 145 | int set_memory_4k(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 146 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 147 | int set_memory_array_uc(unsigned long *addr, int addrinarray); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 148 | int set_memory_array_wc(unsigned long *addr, int addrinarray); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 149 | int set_memory_array_wb(unsigned long *addr, int addrinarray); |
| 150 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 151 | int set_pages_array_uc(struct page **pages, int addrinarray); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 152 | int set_pages_array_wc(struct page **pages, int addrinarray); |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 153 | int set_pages_array_wb(struct page **pages, int addrinarray); |
| 154 | |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 155 | /* |
| 156 | * For legacy compatibility with the old APIs, a few functions |
| 157 | * are provided that work on a "struct page". |
| 158 | * These functions operate ONLY on the 1:1 kernel mapping of the |
| 159 | * memory that the struct page represents, and internally just |
| 160 | * call the set_memory_* function. See the description of the |
| 161 | * set_memory_* function for more details on conventions. |
| 162 | * |
| 163 | * These APIs should be considered *deprecated* and are likely going to |
| 164 | * be removed in the future. |
| 165 | * The reason for this is the implicit operation on the 1:1 mapping only, |
| 166 | * making this not a generally useful API. |
| 167 | * |
| 168 | * Specifically, many users of the old APIs had a virtual address, |
| 169 | * called virt_to_page() or vmalloc_to_page() on that address to |
| 170 | * get a struct page* that the old API required. |
| 171 | * To convert these cases, use set_memory_*() on the original |
| 172 | * virtual address, do not use these functions. |
| 173 | */ |
| 174 | |
| 175 | int set_pages_uc(struct page *page, int numpages); |
| 176 | int set_pages_wb(struct page *page, int numpages); |
| 177 | int set_pages_x(struct page *page, int numpages); |
| 178 | int set_pages_nx(struct page *page, int numpages); |
| 179 | int set_pages_ro(struct page *page, int numpages); |
| 180 | int set_pages_rw(struct page *page, int numpages); |
| 181 | |
| 182 | |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 183 | void clflush_cache_range(void *addr, unsigned int size); |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 184 | |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 185 | #ifdef CONFIG_DEBUG_RODATA |
| 186 | void mark_rodata_ro(void); |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 187 | extern const int rodata_test_data; |
Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame] | 188 | extern int kernel_set_to_readonly; |
Steven Rostedt | 1623963 | 2009-02-17 17:57:30 -0500 | [diff] [blame] | 189 | void set_kernel_text_rw(void); |
| 190 | void set_kernel_text_ro(void); |
| 191 | #else |
| 192 | static inline void set_kernel_text_rw(void) { } |
| 193 | static inline void set_kernel_text_ro(void) { } |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 194 | #endif |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 195 | |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 196 | #ifdef CONFIG_DEBUG_RODATA_TEST |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 197 | int rodata_test(void); |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 198 | #else |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 199 | static inline int rodata_test(void) |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 200 | { |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 201 | return 0; |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 202 | } |
| 203 | #endif |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 204 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 205 | #endif /* _ASM_X86_CACHEFLUSH_H */ |