blob: 63e35ec9075ca4ff6b813d946463655d905a5c60 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H
Thomas Gleixnerb2bba722007-10-15 23:28:20 +02003
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
Tejun Heod3251002009-02-25 11:01:40 +09008static inline void flush_cache_all(void) { }
9static inline void flush_cache_mm(struct mm_struct *mm) { }
10static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
11static inline void flush_cache_range(struct vm_area_struct *vma,
12 unsigned long start, unsigned long end) { }
13static inline void flush_cache_page(struct vm_area_struct *vma,
14 unsigned long vmaddr, unsigned long pfn) { }
Ilya Loginov2d4dc892009-11-26 09:16:19 +010015#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
Tejun Heod3251002009-02-25 11:01:40 +090016static inline void flush_dcache_page(struct page *page) { }
17static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
18static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
19static inline void flush_icache_range(unsigned long start,
20 unsigned long end) { }
21static inline void flush_icache_page(struct vm_area_struct *vma,
22 struct page *page) { }
23static inline void flush_icache_user_range(struct vm_area_struct *vma,
24 struct page *page,
25 unsigned long addr,
26 unsigned long len) { }
27static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
28static inline void flush_cache_vunmap(unsigned long start,
29 unsigned long end) { }
Thomas Gleixnerb2bba722007-10-15 23:28:20 +020030
Tejun Heod3251002009-02-25 11:01:40 +090031static inline void copy_to_user_page(struct vm_area_struct *vma,
32 struct page *page, unsigned long vaddr,
33 void *dst, const void *src,
34 unsigned long len)
35{
36 memcpy(dst, src, len);
37}
38
39static inline void copy_from_user_page(struct vm_area_struct *vma,
40 struct page *page, unsigned long vaddr,
41 void *dst, const void *src,
42 unsigned long len)
43{
44 memcpy(dst, src, len);
45}
Thomas Gleixnerb2bba722007-10-15 23:28:20 +020046
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070047#ifdef CONFIG_X86_PAT
48/*
49 * X86 PAT uses page flags WC and Uncached together to keep track of
50 * memory type of pages that have backing page struct. X86 PAT supports 3
51 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
52 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
53 * been changed from its default (value of -1 used to denote this).
54 * Note we do not support _PAGE_CACHE_UC here.
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070055 */
Robin Holt1f9cc3c2010-04-23 10:36:22 -050056
57#define _PGMT_DEFAULT 0
58#define _PGMT_WC (1UL << PG_arch_1)
59#define _PGMT_UC_MINUS (1UL << PG_uncached)
60#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
61#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
62#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
63
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070064static inline unsigned long get_page_memtype(struct page *pg)
65{
Robin Holt1f9cc3c2010-04-23 10:36:22 -050066 unsigned long pg_flags = pg->flags & _PGMT_MASK;
67
68 if (pg_flags == _PGMT_DEFAULT)
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070069 return -1;
Robin Holt1f9cc3c2010-04-23 10:36:22 -050070 else if (pg_flags == _PGMT_WC)
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070071 return _PAGE_CACHE_WC;
Robin Holt1f9cc3c2010-04-23 10:36:22 -050072 else if (pg_flags == _PGMT_UC_MINUS)
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070073 return _PAGE_CACHE_UC_MINUS;
74 else
75 return _PAGE_CACHE_WB;
76}
77
78static inline void set_page_memtype(struct page *pg, unsigned long memtype)
79{
Robin Holt1f9cc3c2010-04-23 10:36:22 -050080 unsigned long memtype_flags = _PGMT_DEFAULT;
81 unsigned long old_flags;
82 unsigned long new_flags;
83
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070084 switch (memtype) {
85 case _PAGE_CACHE_WC:
Robin Holt1f9cc3c2010-04-23 10:36:22 -050086 memtype_flags = _PGMT_WC;
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070087 break;
88 case _PAGE_CACHE_UC_MINUS:
Robin Holt1f9cc3c2010-04-23 10:36:22 -050089 memtype_flags = _PGMT_UC_MINUS;
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070090 break;
91 case _PAGE_CACHE_WB:
Robin Holt1f9cc3c2010-04-23 10:36:22 -050092 memtype_flags = _PGMT_WB;
Venkatesh Pallipadif5841742009-07-10 09:57:38 -070093 break;
94 }
Robin Holt1f9cc3c2010-04-23 10:36:22 -050095
96 do {
97 old_flags = pg->flags;
98 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
99 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700100}
101#else
102static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
103static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
104#endif
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100105
Arjan van de Ven7219beb2008-04-17 17:41:31 +0200106/*
107 * The set_memory_* API can be used to change various attributes of a virtual
108 * address range. The attributes include:
109 * Cachability : UnCached, WriteCombining, WriteBack
110 * Executability : eXeutable, NoteXecutable
111 * Read/Write : ReadOnly, ReadWrite
112 * Presence : NotPresent
113 *
114 * Within a catagory, the attributes are mutually exclusive.
115 *
116 * The implementation of this API will take care of various aspects that
117 * are associated with changing such attributes, such as:
118 * - Flushing TLBs
119 * - Flushing CPU caches
120 * - Making sure aliases of the memory behind the mapping don't violate
121 * coherency rules as defined by the CPU in the system.
122 *
123 * What this API does not do:
124 * - Provide exclusion between various callers - including callers that
125 * operation on other mappings of the same physical page
126 * - Restore default attributes when a page is freed
127 * - Guarantee that mappings other than the requested one are
128 * in any state, other than that these do not violate rules for
129 * the CPU you have. Do not depend on any effects on other mappings,
130 * CPUs other than the one you have may have more relaxed rules.
131 * The caller is required to take care of these.
132 */
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100133
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700134int _set_memory_uc(unsigned long addr, int numpages);
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -0700135int _set_memory_wc(unsigned long addr, int numpages);
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700136int _set_memory_wb(unsigned long addr, int numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100137int set_memory_uc(unsigned long addr, int numpages);
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -0700138int set_memory_wc(unsigned long addr, int numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100139int set_memory_wb(unsigned long addr, int numpages);
140int set_memory_x(unsigned long addr, int numpages);
141int set_memory_nx(unsigned long addr, int numpages);
142int set_memory_ro(unsigned long addr, int numpages);
143int set_memory_rw(unsigned long addr, int numpages);
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100144int set_memory_np(unsigned long addr, int numpages);
Andi Kleenc9caa022008-03-12 03:53:29 +0100145int set_memory_4k(unsigned long addr, int numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100146
Shaohua Lid75586a2008-08-21 10:46:06 +0800147int set_memory_array_uc(unsigned long *addr, int addrinarray);
Pauli Nieminen4f646252010-04-01 12:45:01 +0000148int set_memory_array_wc(unsigned long *addr, int addrinarray);
Shaohua Lid75586a2008-08-21 10:46:06 +0800149int set_memory_array_wb(unsigned long *addr, int addrinarray);
150
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -0700151int set_pages_array_uc(struct page **pages, int addrinarray);
Pauli Nieminen4f646252010-04-01 12:45:01 +0000152int set_pages_array_wc(struct page **pages, int addrinarray);
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -0700153int set_pages_array_wb(struct page **pages, int addrinarray);
154
Arjan van de Ven7219beb2008-04-17 17:41:31 +0200155/*
156 * For legacy compatibility with the old APIs, a few functions
157 * are provided that work on a "struct page".
158 * These functions operate ONLY on the 1:1 kernel mapping of the
159 * memory that the struct page represents, and internally just
160 * call the set_memory_* function. See the description of the
161 * set_memory_* function for more details on conventions.
162 *
163 * These APIs should be considered *deprecated* and are likely going to
164 * be removed in the future.
165 * The reason for this is the implicit operation on the 1:1 mapping only,
166 * making this not a generally useful API.
167 *
168 * Specifically, many users of the old APIs had a virtual address,
169 * called virt_to_page() or vmalloc_to_page() on that address to
170 * get a struct page* that the old API required.
171 * To convert these cases, use set_memory_*() on the original
172 * virtual address, do not use these functions.
173 */
174
175int set_pages_uc(struct page *page, int numpages);
176int set_pages_wb(struct page *page, int numpages);
177int set_pages_x(struct page *page, int numpages);
178int set_pages_nx(struct page *page, int numpages);
179int set_pages_ro(struct page *page, int numpages);
180int set_pages_rw(struct page *page, int numpages);
181
182
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100183void clflush_cache_range(void *addr, unsigned int size);
Thomas Gleixnerb2bba722007-10-15 23:28:20 +0200184
Thomas Gleixnerb2bba722007-10-15 23:28:20 +0200185#ifdef CONFIG_DEBUG_RODATA
186void mark_rodata_ro(void);
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800187extern const int rodata_test_data;
Suresh Siddha502f6602009-10-28 18:46:56 -0800188extern int kernel_set_to_readonly;
Steven Rostedt16239632009-02-17 17:57:30 -0500189void set_kernel_text_rw(void);
190void set_kernel_text_ro(void);
191#else
192static inline void set_kernel_text_rw(void) { }
193static inline void set_kernel_text_ro(void) { }
Thomas Gleixnerb2bba722007-10-15 23:28:20 +0200194#endif
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800195
Arjan van de Venedeed302008-01-30 13:34:08 +0100196#ifdef CONFIG_DEBUG_RODATA_TEST
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800197int rodata_test(void);
Arjan van de Venedeed302008-01-30 13:34:08 +0100198#else
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800199static inline int rodata_test(void)
Arjan van de Venedeed302008-01-30 13:34:08 +0100200{
Harvey Harrison7bfeab92008-02-12 12:12:01 -0800201 return 0;
Arjan van de Venedeed302008-01-30 13:34:08 +0100202}
203#endif
Thomas Gleixnerb2bba722007-10-15 23:28:20 +0200204
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700205#endif /* _ASM_X86_CACHEFLUSH_H */