H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_CACHEFLUSH_H |
| 2 | #define _ASM_X86_CACHEFLUSH_H |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 3 | |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 4 | /* Caches aren't brain-dead on the intel. */ |
Akinobu Mita | cc67ba63 | 2011-01-20 20:32:14 +0900 | [diff] [blame] | 5 | #include <asm-generic/cacheflush.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 6 | #include <asm/special_insns.h> |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 7 | #include <asm/uaccess.h> |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 8 | |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 9 | /* |
| 10 | * The set_memory_* API can be used to change various attributes of a virtual |
| 11 | * address range. The attributes include: |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 12 | * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 13 | * Executability : eXeutable, NoteXecutable |
| 14 | * Read/Write : ReadOnly, ReadWrite |
| 15 | * Presence : NotPresent |
| 16 | * |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 17 | * Within a category, the attributes are mutually exclusive. |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 18 | * |
| 19 | * The implementation of this API will take care of various aspects that |
| 20 | * are associated with changing such attributes, such as: |
| 21 | * - Flushing TLBs |
| 22 | * - Flushing CPU caches |
| 23 | * - Making sure aliases of the memory behind the mapping don't violate |
| 24 | * coherency rules as defined by the CPU in the system. |
| 25 | * |
| 26 | * What this API does not do: |
| 27 | * - Provide exclusion between various callers - including callers that |
| 28 | * operation on other mappings of the same physical page |
| 29 | * - Restore default attributes when a page is freed |
| 30 | * - Guarantee that mappings other than the requested one are |
| 31 | * in any state, other than that these do not violate rules for |
| 32 | * the CPU you have. Do not depend on any effects on other mappings, |
| 33 | * CPUs other than the one you have may have more relaxed rules. |
| 34 | * The caller is required to take care of these. |
| 35 | */ |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 36 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 37 | int _set_memory_uc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 38 | int _set_memory_wc(unsigned long addr, int numpages); |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 39 | int _set_memory_wt(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 40 | int _set_memory_wb(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 41 | int set_memory_uc(unsigned long addr, int numpages); |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 42 | int set_memory_wc(unsigned long addr, int numpages); |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 43 | int set_memory_wt(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 44 | int set_memory_wb(unsigned long addr, int numpages); |
| 45 | int set_memory_x(unsigned long addr, int numpages); |
| 46 | int set_memory_nx(unsigned long addr, int numpages); |
| 47 | int set_memory_ro(unsigned long addr, int numpages); |
| 48 | int set_memory_rw(unsigned long addr, int numpages); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 49 | int set_memory_np(unsigned long addr, int numpages); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 50 | int set_memory_4k(unsigned long addr, int numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 51 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 52 | int set_memory_array_uc(unsigned long *addr, int addrinarray); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 53 | int set_memory_array_wc(unsigned long *addr, int addrinarray); |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 54 | int set_memory_array_wt(unsigned long *addr, int addrinarray); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 55 | int set_memory_array_wb(unsigned long *addr, int addrinarray); |
| 56 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 57 | int set_pages_array_uc(struct page **pages, int addrinarray); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 58 | int set_pages_array_wc(struct page **pages, int addrinarray); |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 59 | int set_pages_array_wt(struct page **pages, int addrinarray); |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 60 | int set_pages_array_wb(struct page **pages, int addrinarray); |
| 61 | |
Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 62 | /* |
| 63 | * For legacy compatibility with the old APIs, a few functions |
| 64 | * are provided that work on a "struct page". |
| 65 | * These functions operate ONLY on the 1:1 kernel mapping of the |
| 66 | * memory that the struct page represents, and internally just |
| 67 | * call the set_memory_* function. See the description of the |
| 68 | * set_memory_* function for more details on conventions. |
| 69 | * |
| 70 | * These APIs should be considered *deprecated* and are likely going to |
| 71 | * be removed in the future. |
| 72 | * The reason for this is the implicit operation on the 1:1 mapping only, |
| 73 | * making this not a generally useful API. |
| 74 | * |
| 75 | * Specifically, many users of the old APIs had a virtual address, |
| 76 | * called virt_to_page() or vmalloc_to_page() on that address to |
| 77 | * get a struct page* that the old API required. |
| 78 | * To convert these cases, use set_memory_*() on the original |
| 79 | * virtual address, do not use these functions. |
| 80 | */ |
| 81 | |
| 82 | int set_pages_uc(struct page *page, int numpages); |
| 83 | int set_pages_wb(struct page *page, int numpages); |
| 84 | int set_pages_x(struct page *page, int numpages); |
| 85 | int set_pages_nx(struct page *page, int numpages); |
| 86 | int set_pages_ro(struct page *page, int numpages); |
| 87 | int set_pages_rw(struct page *page, int numpages); |
| 88 | |
| 89 | |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 90 | void clflush_cache_range(void *addr, unsigned int size); |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 91 | |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 92 | #ifdef CONFIG_DEBUG_RODATA |
| 93 | void mark_rodata_ro(void); |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 94 | extern const int rodata_test_data; |
Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame] | 95 | extern int kernel_set_to_readonly; |
Steven Rostedt | 1623963 | 2009-02-17 17:57:30 -0500 | [diff] [blame] | 96 | void set_kernel_text_rw(void); |
| 97 | void set_kernel_text_ro(void); |
| 98 | #else |
| 99 | static inline void set_kernel_text_rw(void) { } |
| 100 | static inline void set_kernel_text_ro(void) { } |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 101 | #endif |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 102 | |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 103 | #ifdef CONFIG_DEBUG_RODATA_TEST |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 104 | int rodata_test(void); |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 105 | #else |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 106 | static inline int rodata_test(void) |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 107 | { |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 108 | return 0; |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 109 | } |
| 110 | #endif |
Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 111 | |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 112 | #ifdef ARCH_HAS_NOCACHE_UACCESS |
| 113 | |
| 114 | /** |
| 115 | * arch_memcpy_to_pmem - copy data to persistent memory |
| 116 | * @dst: destination buffer for the copy |
| 117 | * @src: source buffer for the copy |
| 118 | * @n: length of the copy in bytes |
| 119 | * |
| 120 | * Copy data to persistent memory media via non-temporal stores so that |
| 121 | * a subsequent arch_wmb_pmem() can flush cpu and memory controller |
| 122 | * write buffers to guarantee durability. |
| 123 | */ |
| 124 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, |
| 125 | size_t n) |
| 126 | { |
| 127 | int unwritten; |
| 128 | |
| 129 | /* |
| 130 | * We are copying between two kernel buffers, if |
| 131 | * __copy_from_user_inatomic_nocache() returns an error (page |
| 132 | * fault) we would have already reported a general protection fault |
| 133 | * before the WARN+BUG. |
| 134 | */ |
| 135 | unwritten = __copy_from_user_inatomic_nocache((void __force *) dst, |
| 136 | (void __user *) src, n); |
| 137 | if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n", |
| 138 | __func__, dst, src, unwritten)) |
| 139 | BUG(); |
| 140 | } |
| 141 | |
| 142 | /** |
| 143 | * arch_wmb_pmem - synchronize writes to persistent memory |
| 144 | * |
| 145 | * After a series of arch_memcpy_to_pmem() operations this drains data |
| 146 | * from cpu write buffers and any platform (memory controller) buffers |
| 147 | * to ensure that written data is durable on persistent memory media. |
| 148 | */ |
| 149 | static inline void arch_wmb_pmem(void) |
| 150 | { |
| 151 | /* |
| 152 | * wmb() to 'sfence' all previous writes such that they are |
| 153 | * architecturally visible to 'pcommit'. Note, that we've |
| 154 | * already arranged for pmem writes to avoid the cache via |
| 155 | * arch_memcpy_to_pmem(). |
| 156 | */ |
| 157 | wmb(); |
| 158 | pcommit_sfence(); |
| 159 | } |
| 160 | |
| 161 | static inline bool __arch_has_wmb_pmem(void) |
| 162 | { |
| 163 | #ifdef CONFIG_X86_64 |
| 164 | /* |
| 165 | * We require that wmb() be an 'sfence', that is only guaranteed on |
| 166 | * 64-bit builds |
| 167 | */ |
| 168 | return static_cpu_has(X86_FEATURE_PCOMMIT); |
| 169 | #else |
| 170 | return false; |
| 171 | #endif |
| 172 | } |
| 173 | #else /* ARCH_HAS_NOCACHE_UACCESS i.e. ARCH=um */ |
| 174 | extern void arch_memcpy_to_pmem(void __pmem *dst, const void *src, size_t n); |
| 175 | extern void arch_wmb_pmem(void); |
| 176 | |
| 177 | static inline bool __arch_has_wmb_pmem(void) |
| 178 | { |
| 179 | return false; |
| 180 | } |
| 181 | #endif |
| 182 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 183 | #endif /* _ASM_X86_CACHEFLUSH_H */ |