blob: ec23bb753a3eb2723e0ea9b6b074359369a50212 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H
Thomas Gleixnerb2bba722007-10-15 23:28:20 +02003
Thomas Gleixnerb2bba722007-10-15 23:28:20 +02004/* Caches aren't brain-dead on the intel. */
Akinobu Mitacc67ba632011-01-20 20:32:14 +09005#include <asm-generic/cacheflush.h>
David Howellsf05e7982012-03-28 18:11:12 +01006#include <asm/special_insns.h>
Ross Zwisler61031952015-06-25 03:08:39 -04007#include <asm/uaccess.h>
Thomas Gleixnerb2bba722007-10-15 23:28:20 +02008
Arjan van de Ven7219beb2008-04-17 17:41:31 +02009/*
10 * The set_memory_* API can be used to change various attributes of a virtual
11 * address range. The attributes include:
12 * Cachability : UnCached, WriteCombining, WriteBack
13 * Executability : eXeutable, NoteXecutable
14 * Read/Write : ReadOnly, ReadWrite
15 * Presence : NotPresent
16 *
Lucas De Marchi0d2eb442011-03-17 16:24:16 -030017 * Within a category, the attributes are mutually exclusive.
Arjan van de Ven7219beb2008-04-17 17:41:31 +020018 *
19 * The implementation of this API will take care of various aspects that
20 * are associated with changing such attributes, such as:
21 * - Flushing TLBs
22 * - Flushing CPU caches
23 * - Making sure aliases of the memory behind the mapping don't violate
24 * coherency rules as defined by the CPU in the system.
25 *
26 * What this API does not do:
27 * - Provide exclusion between various callers - including callers that
28 * operation on other mappings of the same physical page
29 * - Restore default attributes when a page is freed
30 * - Guarantee that mappings other than the requested one are
31 * in any state, other than that these do not violate rules for
32 * the CPU you have. Do not depend on any effects on other mappings,
33 * CPUs other than the one you have may have more relaxed rules.
34 * The caller is required to take care of these.
35 */
Arjan van de Ven75cbade2008-01-30 13:34:06 +010036
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070037int _set_memory_uc(unsigned long addr, int numpages);
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -070038int _set_memory_wc(unsigned long addr, int numpages);
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070039int _set_memory_wb(unsigned long addr, int numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +010040int set_memory_uc(unsigned long addr, int numpages);
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -070041int set_memory_wc(unsigned long addr, int numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +010042int set_memory_wb(unsigned long addr, int numpages);
43int set_memory_x(unsigned long addr, int numpages);
44int set_memory_nx(unsigned long addr, int numpages);
45int set_memory_ro(unsigned long addr, int numpages);
46int set_memory_rw(unsigned long addr, int numpages);
Ingo Molnarf62d0f02008-01-30 13:34:07 +010047int set_memory_np(unsigned long addr, int numpages);
Andi Kleenc9caa022008-03-12 03:53:29 +010048int set_memory_4k(unsigned long addr, int numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +010049
Shaohua Lid75586a2008-08-21 10:46:06 +080050int set_memory_array_uc(unsigned long *addr, int addrinarray);
Pauli Nieminen4f646252010-04-01 12:45:01 +000051int set_memory_array_wc(unsigned long *addr, int addrinarray);
Shaohua Lid75586a2008-08-21 10:46:06 +080052int set_memory_array_wb(unsigned long *addr, int addrinarray);
53
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -070054int set_pages_array_uc(struct page **pages, int addrinarray);
Pauli Nieminen4f646252010-04-01 12:45:01 +000055int set_pages_array_wc(struct page **pages, int addrinarray);
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -070056int set_pages_array_wb(struct page **pages, int addrinarray);
57
Arjan van de Ven7219beb2008-04-17 17:41:31 +020058/*
59 * For legacy compatibility with the old APIs, a few functions
60 * are provided that work on a "struct page".
61 * These functions operate ONLY on the 1:1 kernel mapping of the
62 * memory that the struct page represents, and internally just
63 * call the set_memory_* function. See the description of the
64 * set_memory_* function for more details on conventions.
65 *
66 * These APIs should be considered *deprecated* and are likely going to
67 * be removed in the future.
68 * The reason for this is the implicit operation on the 1:1 mapping only,
69 * making this not a generally useful API.
70 *
71 * Specifically, many users of the old APIs had a virtual address,
72 * called virt_to_page() or vmalloc_to_page() on that address to
73 * get a struct page* that the old API required.
74 * To convert these cases, use set_memory_*() on the original
75 * virtual address, do not use these functions.
76 */
77
78int set_pages_uc(struct page *page, int numpages);
79int set_pages_wb(struct page *page, int numpages);
80int set_pages_x(struct page *page, int numpages);
81int set_pages_nx(struct page *page, int numpages);
82int set_pages_ro(struct page *page, int numpages);
83int set_pages_rw(struct page *page, int numpages);
84
85
Ingo Molnar4c61afc2008-01-30 13:34:09 +010086void clflush_cache_range(void *addr, unsigned int size);
Thomas Gleixnerb2bba722007-10-15 23:28:20 +020087
Thomas Gleixnerb2bba722007-10-15 23:28:20 +020088#ifdef CONFIG_DEBUG_RODATA
89void mark_rodata_ro(void);
Harvey Harrison7bfeab92008-02-12 12:12:01 -080090extern const int rodata_test_data;
Suresh Siddha502f6602009-10-28 18:46:56 -080091extern int kernel_set_to_readonly;
Steven Rostedt16239632009-02-17 17:57:30 -050092void set_kernel_text_rw(void);
93void set_kernel_text_ro(void);
94#else
95static inline void set_kernel_text_rw(void) { }
96static inline void set_kernel_text_ro(void) { }
Thomas Gleixnerb2bba722007-10-15 23:28:20 +020097#endif
Harvey Harrison7bfeab92008-02-12 12:12:01 -080098
Arjan van de Venedeed302008-01-30 13:34:08 +010099#ifdef CONFIG_DEBUG_RODATA_TEST
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800100int rodata_test(void);
Arjan van de Venedeed302008-01-30 13:34:08 +0100101#else
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800102static inline int rodata_test(void)
Arjan van de Venedeed302008-01-30 13:34:08 +0100103{
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800104 return 0;
Arjan van de Venedeed302008-01-30 13:34:08 +0100105}
106#endif
Thomas Gleixnerb2bba722007-10-15 23:28:20 +0200107
Ross Zwisler61031952015-06-25 03:08:39 -0400108#ifdef ARCH_HAS_NOCACHE_UACCESS
109
110/**
111 * arch_memcpy_to_pmem - copy data to persistent memory
112 * @dst: destination buffer for the copy
113 * @src: source buffer for the copy
114 * @n: length of the copy in bytes
115 *
116 * Copy data to persistent memory media via non-temporal stores so that
117 * a subsequent arch_wmb_pmem() can flush cpu and memory controller
118 * write buffers to guarantee durability.
119 */
120static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
121 size_t n)
122{
123 int unwritten;
124
125 /*
126 * We are copying between two kernel buffers, if
127 * __copy_from_user_inatomic_nocache() returns an error (page
128 * fault) we would have already reported a general protection fault
129 * before the WARN+BUG.
130 */
131 unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
132 (void __user *) src, n);
133 if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
134 __func__, dst, src, unwritten))
135 BUG();
136}
137
138/**
139 * arch_wmb_pmem - synchronize writes to persistent memory
140 *
141 * After a series of arch_memcpy_to_pmem() operations this drains data
142 * from cpu write buffers and any platform (memory controller) buffers
143 * to ensure that written data is durable on persistent memory media.
144 */
145static inline void arch_wmb_pmem(void)
146{
147 /*
148 * wmb() to 'sfence' all previous writes such that they are
149 * architecturally visible to 'pcommit'. Note, that we've
150 * already arranged for pmem writes to avoid the cache via
151 * arch_memcpy_to_pmem().
152 */
153 wmb();
154 pcommit_sfence();
155}
156
157static inline bool __arch_has_wmb_pmem(void)
158{
159#ifdef CONFIG_X86_64
160 /*
161 * We require that wmb() be an 'sfence', that is only guaranteed on
162 * 64-bit builds
163 */
164 return static_cpu_has(X86_FEATURE_PCOMMIT);
165#else
166 return false;
167#endif
168}
169#else /* ARCH_HAS_NOCACHE_UACCESS i.e. ARCH=um */
170extern void arch_memcpy_to_pmem(void __pmem *dst, const void *src, size_t n);
171extern void arch_wmb_pmem(void);
172
173static inline bool __arch_has_wmb_pmem(void)
174{
175 return false;
176}
177#endif
178
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700179#endif /* _ASM_X86_CACHEFLUSH_H */