H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_CACHEFLUSH_H |
| 2 | #define _ASM_X86_CACHEFLUSH_H |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 3 | |
| 4 | /* Keep includes the same across arches. */ |
| 5 | #include <linux/mm.h> |
| 6 | |
| 7 | /* Caches aren't brain-dead on the intel. */ |
Tejun Heo | d325100 | 2009-02-25 11:01:40 +0900 | [diff] [blame] | 8 | static inline void flush_cache_all(void) { } |
| 9 | static inline void flush_cache_mm(struct mm_struct *mm) { } |
| 10 | static inline void flush_cache_dup_mm(struct mm_struct *mm) { } |
| 11 | static inline void flush_cache_range(struct vm_area_struct *vma, |
| 12 | unsigned long start, unsigned long end) { } |
| 13 | static inline void flush_cache_page(struct vm_area_struct *vma, |
| 14 | unsigned long vmaddr, unsigned long pfn) { } |
| 15 | static inline void flush_dcache_page(struct page *page) { } |
| 16 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } |
| 17 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } |
| 18 | static inline void flush_icache_range(unsigned long start, |
| 19 | unsigned long end) { } |
| 20 | static inline void flush_icache_page(struct vm_area_struct *vma, |
| 21 | struct page *page) { } |
| 22 | static inline void flush_icache_user_range(struct vm_area_struct *vma, |
| 23 | struct page *page, |
| 24 | unsigned long addr, |
| 25 | unsigned long len) { } |
| 26 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } |
| 27 | static inline void flush_cache_vunmap(unsigned long start, |
| 28 | unsigned long end) { } |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 29 | |
Tejun Heo | d325100 | 2009-02-25 11:01:40 +0900 | [diff] [blame] | 30 | static inline void copy_to_user_page(struct vm_area_struct *vma, |
| 31 | struct page *page, unsigned long vaddr, |
| 32 | void *dst, const void *src, |
| 33 | unsigned long len) |
| 34 | { |
| 35 | memcpy(dst, src, len); |
| 36 | } |
| 37 | |
| 38 | static inline void copy_from_user_page(struct vm_area_struct *vma, |
| 39 | struct page *page, unsigned long vaddr, |
| 40 | void *dst, const void *src, |
| 41 | unsigned long len) |
| 42 | { |
| 43 | memcpy(dst, src, len); |
| 44 | } |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 45 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 46 | #define PG_WC PG_arch_1 |
| 47 | PAGEFLAG(WC, WC) |
| 48 | |
| 49 | #ifdef CONFIG_X86_PAT |
| 50 | /* |
| 51 | * X86 PAT uses page flags WC and Uncached together to keep track of |
| 52 | * memory type of pages that have backing page struct. X86 PAT supports 3 |
| 53 | * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and |
| 54 | * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not |
| 55 | * been changed from its default (value of -1 used to denote this). |
| 56 | * Note we do not support _PAGE_CACHE_UC here. |
| 57 | * |
| 58 | * Caller must hold memtype_lock for atomicity. |
| 59 | */ |
| 60 | static inline unsigned long get_page_memtype(struct page *pg) |
| 61 | { |
| 62 | if (!PageUncached(pg) && !PageWC(pg)) |
| 63 | return -1; |
| 64 | else if (!PageUncached(pg) && PageWC(pg)) |
| 65 | return _PAGE_CACHE_WC; |
| 66 | else if (PageUncached(pg) && !PageWC(pg)) |
| 67 | return _PAGE_CACHE_UC_MINUS; |
| 68 | else |
| 69 | return _PAGE_CACHE_WB; |
| 70 | } |
| 71 | |
| 72 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) |
| 73 | { |
| 74 | switch (memtype) { |
| 75 | case _PAGE_CACHE_WC: |
| 76 | ClearPageUncached(pg); |
| 77 | SetPageWC(pg); |
| 78 | break; |
| 79 | case _PAGE_CACHE_UC_MINUS: |
| 80 | SetPageUncached(pg); |
| 81 | ClearPageWC(pg); |
| 82 | break; |
| 83 | case _PAGE_CACHE_WB: |
| 84 | SetPageUncached(pg); |
| 85 | SetPageWC(pg); |
| 86 | break; |
| 87 | default: |
| 88 | case -1: |
| 89 | ClearPageUncached(pg); |
| 90 | ClearPageWC(pg); |
| 91 | break; |
| 92 | } |
| 93 | } |
| 94 | #else |
| 95 | static inline unsigned long get_page_memtype(struct page *pg) { return -1; } |
| 96 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } |
| 97 | #endif |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 98 | |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 99 | /* |
| 100 | * The set_memory_* API can be used to change various attributes of a virtual |
| 101 | * address range. The attributes include: |
| 102 | * Cachability : UnCached, WriteCombining, WriteBack |
| 103 | * Executability : eXeutable, NoteXecutable |
| 104 | * Read/Write : ReadOnly, ReadWrite |
| 105 | * Presence : NotPresent |
| 106 | * |
| 107 | * Within a catagory, the attributes are mutually exclusive. |
| 108 | * |
| 109 | * The implementation of this API will take care of various aspects that |
| 110 | * are associated with changing such attributes, such as: |
| 111 | * - Flushing TLBs |
| 112 | * - Flushing CPU caches |
| 113 | * - Making sure aliases of the memory behind the mapping don't violate |
| 114 | * coherency rules as defined by the CPU in the system. |
| 115 | * |
| 116 | * What this API does not do: |
| 117 | * - Provide exclusion between various callers - including callers that |
| 118 | * operation on other mappings of the same physical page |
| 119 | * - Restore default attributes when a page is freed |
| 120 | * - Guarantee that mappings other than the requested one are |
| 121 | * in any state, other than that these do not violate rules for |
| 122 | * the CPU you have. Do not depend on any effects on other mappings, |
| 123 | * CPUs other than the one you have may have more relaxed rules. |
| 124 | * The caller is required to take care of these. |
| 125 | */ |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 126 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 127 | int _set_memory_uc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 128 | int _set_memory_wc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 129 | int _set_memory_wb(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 130 | int set_memory_uc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 131 | int set_memory_wc(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 132 | int set_memory_wb(unsigned long addr, int numpages); |
| 133 | int set_memory_x(unsigned long addr, int numpages); |
| 134 | int set_memory_nx(unsigned long addr, int numpages); |
| 135 | int set_memory_ro(unsigned long addr, int numpages); |
| 136 | int set_memory_rw(unsigned long addr, int numpages); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 137 | int set_memory_np(unsigned long addr, int numpages); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 138 | int set_memory_4k(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 139 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 140 | int set_memory_array_uc(unsigned long *addr, int addrinarray); |
| 141 | int set_memory_array_wb(unsigned long *addr, int addrinarray); |
| 142 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 143 | int set_pages_array_uc(struct page **pages, int addrinarray); |
| 144 | int set_pages_array_wb(struct page **pages, int addrinarray); |
| 145 | |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 146 | /* |
| 147 | * For legacy compatibility with the old APIs, a few functions |
| 148 | * are provided that work on a "struct page". |
| 149 | * These functions operate ONLY on the 1:1 kernel mapping of the |
| 150 | * memory that the struct page represents, and internally just |
| 151 | * call the set_memory_* function. See the description of the |
| 152 | * set_memory_* function for more details on conventions. |
| 153 | * |
| 154 | * These APIs should be considered *deprecated* and are likely going to |
| 155 | * be removed in the future. |
| 156 | * The reason for this is the implicit operation on the 1:1 mapping only, |
| 157 | * making this not a generally useful API. |
| 158 | * |
| 159 | * Specifically, many users of the old APIs had a virtual address, |
| 160 | * called virt_to_page() or vmalloc_to_page() on that address to |
| 161 | * get a struct page* that the old API required. |
| 162 | * To convert these cases, use set_memory_*() on the original |
| 163 | * virtual address, do not use these functions. |
| 164 | */ |
| 165 | |
| 166 | int set_pages_uc(struct page *page, int numpages); |
| 167 | int set_pages_wb(struct page *page, int numpages); |
| 168 | int set_pages_x(struct page *page, int numpages); |
| 169 | int set_pages_nx(struct page *page, int numpages); |
| 170 | int set_pages_ro(struct page *page, int numpages); |
| 171 | int set_pages_rw(struct page *page, int numpages); |
| 172 | |
| 173 | |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 174 | void clflush_cache_range(void *addr, unsigned int size); |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 175 | |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 176 | #ifdef CONFIG_DEBUG_RODATA |
| 177 | void mark_rodata_ro(void); |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 178 | extern const int rodata_test_data; |
Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame^] | 179 | extern int kernel_set_to_readonly; |
Steven Rostedt | 1623963 | 2009-02-17 17:57:30 -0500 | [diff] [blame] | 180 | void set_kernel_text_rw(void); |
| 181 | void set_kernel_text_ro(void); |
| 182 | #else |
| 183 | static inline void set_kernel_text_rw(void) { } |
| 184 | static inline void set_kernel_text_ro(void) { } |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 185 | #endif |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 186 | |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 187 | #ifdef CONFIG_DEBUG_RODATA_TEST |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 188 | int rodata_test(void); |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 189 | #else |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 190 | static inline int rodata_test(void) |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 191 | { |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 192 | return 0; |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 193 | } |
| 194 | #endif |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 195 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 196 | #endif /* _ASM_X86_CACHEFLUSH_H */ |