blob: 97ec9e7d29d9ca2b93890163667c443108b9852a [file] [log] [blame]
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Thanks to Ben LaHaise for precious feedback.
Ingo Molnar9f4c8152008-01-30 13:33:41 +01004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/highmem.h>
Ingo Molnar81922062008-01-30 13:34:04 +01006#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/module.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01008#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/slab.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010010#include <linux/mm.h>
11
Thomas Gleixner950f9d92008-01-30 13:34:06 +010012#include <asm/e820.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/processor.h>
14#include <asm/tlbflush.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080015#include <asm/sections.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010016#include <asm/uaccess.h>
17#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Arjan van de Vened724be2008-01-30 13:34:04 +010019static inline int
20within(unsigned long addr, unsigned long start, unsigned long end)
Ingo Molnar687c4822008-01-30 13:34:04 +010021{
Arjan van de Vened724be2008-01-30 13:34:04 +010022 return addr >= start && addr < end;
23}
24
25/*
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010026 * Flushing functions
27 */
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010028
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010029/**
30 * clflush_cache_range - flush a cache range with clflush
31 * @addr: virtual start address
32 * @size: number of bytes to flush
33 *
34 * clflush is an unordered instruction which needs fencing with mfence
35 * to avoid ordering issues.
36 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +010037void clflush_cache_range(void *vaddr, unsigned int size)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010038{
Ingo Molnar4c61afc2008-01-30 13:34:09 +010039 void *vend = vaddr + size - 1;
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010040
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010041 mb();
Ingo Molnar4c61afc2008-01-30 13:34:09 +010042
43 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
44 clflush(vaddr);
45 /*
46 * Flush any possible final partial cacheline:
47 */
48 clflush(vend);
49
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010050 mb();
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010051}
52
Thomas Gleixneraf1e6842008-01-30 13:34:08 +010053static void __cpa_flush_all(void *arg)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010054{
55 /*
56 * Flush all to work around Errata in early athlons regarding
57 * large page flushing.
58 */
59 __flush_tlb_all();
60
61 if (boot_cpu_data.x86_model >= 4)
62 wbinvd();
63}
64
Thomas Gleixneraf1e6842008-01-30 13:34:08 +010065static void cpa_flush_all(void)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010066{
67 BUG_ON(irqs_disabled());
68
Thomas Gleixneraf1e6842008-01-30 13:34:08 +010069 on_each_cpu(__cpa_flush_all, NULL, 1, 1);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010070}
71
Thomas Gleixner57a6a462008-01-30 13:34:08 +010072static void __cpa_flush_range(void *arg)
73{
Thomas Gleixner57a6a462008-01-30 13:34:08 +010074 /*
75 * We could optimize that further and do individual per page
76 * tlb invalidates for a low number of pages. Caveat: we must
77 * flush the high aliases on 64bit as well.
78 */
79 __flush_tlb_all();
Thomas Gleixner57a6a462008-01-30 13:34:08 +010080}
81
Ingo Molnar4c61afc2008-01-30 13:34:09 +010082static void cpa_flush_range(unsigned long start, int numpages)
Thomas Gleixner57a6a462008-01-30 13:34:08 +010083{
Ingo Molnar4c61afc2008-01-30 13:34:09 +010084 unsigned int i, level;
85 unsigned long addr;
86
Thomas Gleixner57a6a462008-01-30 13:34:08 +010087 BUG_ON(irqs_disabled());
Ingo Molnar4c61afc2008-01-30 13:34:09 +010088 WARN_ON(PAGE_ALIGN(start) != start);
Thomas Gleixner57a6a462008-01-30 13:34:08 +010089
Thomas Gleixner3b233e52008-01-30 13:34:08 +010090 on_each_cpu(__cpa_flush_range, NULL, 1, 1);
Thomas Gleixner57a6a462008-01-30 13:34:08 +010091
Thomas Gleixner3b233e52008-01-30 13:34:08 +010092 /*
93 * We only need to flush on one CPU,
94 * clflush is a MESI-coherent instruction that
95 * will cause all other CPUs to flush the same
96 * cachelines:
97 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +010098 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
99 pte_t *pte = lookup_address(addr, &level);
100
101 /*
102 * Only flush present addresses:
103 */
104 if (pte && pte_present(*pte))
105 clflush_cache_range((void *) addr, PAGE_SIZE);
106 }
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100107}
108
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100109/*
Arjan van de Vened724be2008-01-30 13:34:04 +0100110 * Certain areas of memory on x86 require very specific protection flags,
111 * for example the BIOS area or kernel text. Callers don't always get this
112 * right (again, ioremap() on BIOS memory is not uncommon) so this function
113 * checks and fixes these known static required protection bits.
114 */
115static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
116{
117 pgprot_t forbidden = __pgprot(0);
118
Ingo Molnar687c4822008-01-30 13:34:04 +0100119 /*
Arjan van de Vened724be2008-01-30 13:34:04 +0100120 * The BIOS area between 640k and 1Mb needs to be executable for
121 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
Ingo Molnar687c4822008-01-30 13:34:04 +0100122 */
Arjan van de Vened724be2008-01-30 13:34:04 +0100123 if (within(__pa(address), BIOS_BEGIN, BIOS_END))
124 pgprot_val(forbidden) |= _PAGE_NX;
125
126 /*
127 * The kernel text needs to be executable for obvious reasons
128 * Does not cover __inittext since that is gone later on
129 */
130 if (within(address, (unsigned long)_text, (unsigned long)_etext))
131 pgprot_val(forbidden) |= _PAGE_NX;
132
133#ifdef CONFIG_DEBUG_RODATA
134 /* The .rodata section needs to be read-only */
135 if (within(address, (unsigned long)__start_rodata,
136 (unsigned long)__end_rodata))
137 pgprot_val(forbidden) |= _PAGE_RW;
138#endif
139
140 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
Ingo Molnar687c4822008-01-30 13:34:04 +0100141
142 return prot;
143}
144
Ingo Molnarf0646e42008-01-30 13:33:43 +0100145pte_t *lookup_address(unsigned long address, int *level)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100146{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 pgd_t *pgd = pgd_offset_k(address);
148 pud_t *pud;
149 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100150
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100151 *level = PG_LEVEL_NONE;
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 if (pgd_none(*pgd))
154 return NULL;
155 pud = pud_offset(pgd, address);
156 if (pud_none(*pud))
157 return NULL;
158 pmd = pmd_offset(pud, address);
159 if (pmd_none(*pmd))
160 return NULL;
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100161
162 *level = PG_LEVEL_2M;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 if (pmd_large(*pmd))
164 return (pte_t *)pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100166 *level = PG_LEVEL_4K;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100167 return pte_offset_kernel(pmd, address);
168}
169
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100170static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100171{
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100172 /* change init_mm */
173 set_pte_atomic(kpte, pte);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100174#ifdef CONFIG_X86_32
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100175 if (!SHARED_KERNEL_PMD) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100176 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Ingo Molnar44af6c42008-01-30 13:34:03 +0100178 for (page = pgd_list; page; page = (struct page *)page->index) {
179 pgd_t *pgd;
180 pud_t *pud;
181 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100182
Ingo Molnar44af6c42008-01-30 13:34:03 +0100183 pgd = (pgd_t *)page_address(page) + pgd_index(address);
184 pud = pud_offset(pgd, address);
185 pmd = pmd_offset(pud, address);
186 set_pte_atomic((pte_t *)pmd, pte);
187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100189#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100192static int split_large_page(pte_t *kpte, unsigned long address)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100193{
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100194 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
Ingo Molnar12d6f212008-01-30 13:33:58 +0100195 gfp_t gfp_flags = GFP_KERNEL;
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100196 unsigned long flags;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100197 unsigned long addr;
198 pte_t *pbase, *tmp;
199 struct page *base;
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100200 int i, level;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100201
Ingo Molnar12d6f212008-01-30 13:33:58 +0100202#ifdef CONFIG_DEBUG_PAGEALLOC
203 gfp_flags = GFP_ATOMIC;
204#endif
205 base = alloc_pages(gfp_flags, 0);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100206 if (!base)
207 return -ENOMEM;
208
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100209 spin_lock_irqsave(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100210 /*
211 * Check for races, another CPU might have split this page
212 * up for us already:
213 */
214 tmp = lookup_address(address, &level);
Ingo Molnar5508a7482008-01-30 13:33:56 +0100215 if (tmp != kpte) {
216 WARN_ON_ONCE(1);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100217 goto out_unlock;
Ingo Molnar5508a7482008-01-30 13:33:56 +0100218 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100219
220 address = __pa(address);
221 addr = address & LARGE_PAGE_MASK;
222 pbase = (pte_t *)page_address(base);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100223#ifdef CONFIG_X86_32
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100224 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
Ingo Molnar44af6c42008-01-30 13:34:03 +0100225#endif
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100226
227 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
228 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
229
230 /*
Huang, Ying4c881ca2008-01-30 13:34:04 +0100231 * Install the new, split up pagetable. Important detail here:
232 *
233 * On Intel the NX bit of all levels must be cleared to make a
234 * page executable. See section 4.13.2 of Intel 64 and IA-32
235 * Architectures Software Developer's Manual).
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100236 */
Huang, Ying4c881ca2008-01-30 13:34:04 +0100237 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100238 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100239 base = NULL;
240
241out_unlock:
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100242 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100243
244 if (base)
245 __free_pages(base, 0);
246
247 return 0;
248}
249
Ingo Molnar44af6c42008-01-30 13:34:03 +0100250static int
Ingo Molnar81922062008-01-30 13:34:04 +0100251__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100252{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 struct page *kpte_page;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100254 int level, err = 0;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100255 pte_t *kpte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Ingo Molnar81922062008-01-30 13:34:04 +0100257#ifdef CONFIG_X86_32
258 BUG_ON(pfn > max_low_pfn);
259#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Ingo Molnar97f99fe2008-01-30 13:33:55 +0100261repeat:
Ingo Molnarf0646e42008-01-30 13:33:43 +0100262 kpte = lookup_address(address, &level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 if (!kpte)
264 return -EINVAL;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 kpte_page = virt_to_page(kpte);
Andi Kleen65d2f0b2007-07-21 17:09:51 +0200267 BUG_ON(PageLRU(kpte_page));
268 BUG_ON(PageCompound(kpte_page));
269
Arjan van de Vened724be2008-01-30 13:34:04 +0100270 prot = static_protections(prot, address);
Andi Kleen65d2f0b2007-07-21 17:09:51 +0200271
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100272 if (level == PG_LEVEL_4K) {
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100273 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
Ingo Molnar81922062008-01-30 13:34:04 +0100274 set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
Ingo Molnar78c94ab2008-01-30 13:33:55 +0100275 } else {
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100276 /* Clear the PSE bit for the 4k level pages ! */
277 pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
278
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100279 err = split_large_page(kpte, address);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100280 if (!err)
281 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100283 return err;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100284}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Ingo Molnar44af6c42008-01-30 13:34:03 +0100286/**
287 * change_page_attr_addr - Change page table attributes in linear mapping
288 * @address: Virtual address in linear mapping.
Ingo Molnar44af6c42008-01-30 13:34:03 +0100289 * @prot: New page table attribute (PAGE_*)
290 *
291 * Change page attributes of a page in the direct mapping. This is a variant
292 * of change_page_attr() that also works on memory holes that do not have
293 * mem_map entry (pfn_valid() is false).
294 *
295 * See change_page_attr() documentation for more details.
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100296 *
297 * Modules and drivers should use the set_memory_* APIs instead.
Ingo Molnar44af6c42008-01-30 13:34:03 +0100298 */
299
Arjan van de Ven488fd992008-01-30 13:34:07 +0100300static int change_page_attr_addr(unsigned long address, pgprot_t prot)
Ingo Molnar44af6c42008-01-30 13:34:03 +0100301{
Arjan van de Ven488fd992008-01-30 13:34:07 +0100302 int err = 0, kernel_map = 0;
303 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100304
305#ifdef CONFIG_X86_64
306 if (address >= __START_KERNEL_map &&
307 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
308
309 address = (unsigned long)__va(__pa(address));
310 kernel_map = 1;
311 }
312#endif
313
Arjan van de Ven488fd992008-01-30 13:34:07 +0100314 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
315 err = __change_page_attr(address, pfn, prot);
316 if (err)
317 return err;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100318 }
319
Arjan van de Ven488fd992008-01-30 13:34:07 +0100320#ifdef CONFIG_X86_64
321 /*
322 * Handle kernel mapping too which aliases part of
323 * lowmem:
324 */
325 if (__pa(address) < KERNEL_TEXT_SIZE) {
326 unsigned long addr2;
327 pgprot_t prot2;
328
329 addr2 = __START_KERNEL_map + __pa(address);
330 /* Make sure the kernel mappings stay executable */
331 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
332 err = __change_page_attr(addr2, pfn, prot2);
333 }
334#endif
335
Ingo Molnar44af6c42008-01-30 13:34:03 +0100336 return err;
337}
338
Thomas Gleixnerff314522008-01-30 13:34:08 +0100339static int __change_page_attr_set_clr(unsigned long addr, int numpages,
340 pgprot_t mask_set, pgprot_t mask_clr)
341{
342 pgprot_t new_prot;
343 int level;
344 pte_t *pte;
345 int i, ret;
346
347 for (i = 0; i < numpages ; i++) {
348
349 pte = lookup_address(addr, &level);
350 if (!pte)
351 return -EINVAL;
352
353 new_prot = pte_pgprot(*pte);
354
355 pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
356 pgprot_val(new_prot) |= pgprot_val(mask_set);
357
358 ret = change_page_attr_addr(addr, new_prot);
359 if (ret)
360 return ret;
361 addr += PAGE_SIZE;
362 }
363
364 return 0;
365}
366
367static int change_page_attr_set_clr(unsigned long addr, int numpages,
368 pgprot_t mask_set, pgprot_t mask_clr)
369{
370 int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
371 mask_clr);
372
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100373 /*
374 * On success we use clflush, when the CPU supports it to
375 * avoid the wbindv. If the CPU does not support it and in the
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100376 * error case we fall back to cpa_flush_all (which uses
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100377 * wbindv):
378 */
379 if (!ret && cpu_has_clflush)
380 cpa_flush_range(addr, numpages);
381 else
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100382 cpa_flush_all();
Thomas Gleixnerff314522008-01-30 13:34:08 +0100383
384 return ret;
385}
386
Thomas Gleixner56744542008-01-30 13:34:08 +0100387static inline int change_page_attr_set(unsigned long addr, int numpages,
388 pgprot_t mask)
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100389{
Thomas Gleixner56744542008-01-30 13:34:08 +0100390 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100391}
392
Thomas Gleixner56744542008-01-30 13:34:08 +0100393static inline int change_page_attr_clear(unsigned long addr, int numpages,
394 pgprot_t mask)
Thomas Gleixner72932c72008-01-30 13:34:08 +0100395{
Thomas Gleixner56744542008-01-30 13:34:08 +0100396 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100397
398}
399
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100400int set_memory_uc(unsigned long addr, int numpages)
401{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100402 return change_page_attr_set(addr, numpages,
403 __pgprot(_PAGE_PCD | _PAGE_PWT));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100404}
405EXPORT_SYMBOL(set_memory_uc);
406
407int set_memory_wb(unsigned long addr, int numpages)
408{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100409 return change_page_attr_clear(addr, numpages,
410 __pgprot(_PAGE_PCD | _PAGE_PWT));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100411}
412EXPORT_SYMBOL(set_memory_wb);
413
414int set_memory_x(unsigned long addr, int numpages)
415{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100416 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100417}
418EXPORT_SYMBOL(set_memory_x);
419
420int set_memory_nx(unsigned long addr, int numpages)
421{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100422 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100423}
424EXPORT_SYMBOL(set_memory_nx);
425
426int set_memory_ro(unsigned long addr, int numpages)
427{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100428 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100429}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100430
431int set_memory_rw(unsigned long addr, int numpages)
432{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100433 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100434}
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100435
436int set_memory_np(unsigned long addr, int numpages)
437{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100438 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100439}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100440
441int set_pages_uc(struct page *page, int numpages)
442{
443 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100444
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100445 return set_memory_uc(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100446}
447EXPORT_SYMBOL(set_pages_uc);
448
449int set_pages_wb(struct page *page, int numpages)
450{
451 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100452
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100453 return set_memory_wb(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100454}
455EXPORT_SYMBOL(set_pages_wb);
456
457int set_pages_x(struct page *page, int numpages)
458{
459 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100460
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100461 return set_memory_x(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100462}
463EXPORT_SYMBOL(set_pages_x);
464
465int set_pages_nx(struct page *page, int numpages)
466{
467 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100468
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100469 return set_memory_nx(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100470}
471EXPORT_SYMBOL(set_pages_nx);
472
473int set_pages_ro(struct page *page, int numpages)
474{
475 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100476
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100477 return set_memory_ro(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100478}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100479
480int set_pages_rw(struct page *page, int numpages)
481{
482 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100483
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100484 return set_memory_rw(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100485}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Thomas Gleixner56744542008-01-30 13:34:08 +0100488#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
489static inline int __change_page_attr_set(unsigned long addr, int numpages,
490 pgprot_t mask)
491{
492 return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
493}
494
495static inline int __change_page_attr_clear(unsigned long addr, int numpages,
496 pgprot_t mask)
497{
498 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
499}
500#endif
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502#ifdef CONFIG_DEBUG_PAGEALLOC
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100503
504static int __set_pages_p(struct page *page, int numpages)
505{
506 unsigned long addr = (unsigned long)page_address(page);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100507
508 return __change_page_attr_set(addr, numpages,
509 __pgprot(_PAGE_PRESENT | _PAGE_RW));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100510}
511
512static int __set_pages_np(struct page *page, int numpages)
513{
514 unsigned long addr = (unsigned long)page_address(page);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100515
516 return __change_page_attr_clear(addr, numpages,
517 __pgprot(_PAGE_PRESENT));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100518}
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520void kernel_map_pages(struct page *page, int numpages, int enable)
521{
522 if (PageHighMem(page))
523 return;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100524 if (!enable) {
Ingo Molnarf9b84042006-06-27 02:54:49 -0700525 debug_check_no_locks_freed(page_address(page),
526 numpages * PAGE_SIZE);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100527 }
Ingo Molnarde5097c2006-01-09 15:59:21 -0800528
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100529 /*
Ingo Molnar12d6f212008-01-30 13:33:58 +0100530 * If page allocator is not up yet then do not call c_p_a():
531 */
532 if (!debug_pagealloc_enabled)
533 return;
534
535 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100536 * The return value is ignored - the calls cannot fail,
537 * large pages are disabled at boot time:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 */
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100539 if (enable)
540 __set_pages_p(page, numpages);
541 else
542 __set_pages_np(page, numpages);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100543
544 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100545 * We should perform an IPI and flush all tlbs,
546 * but that can deadlock->flush only current cpu:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 */
548 __flush_tlb_all();
549}
550#endif
Arjan van de Vend1028a12008-01-30 13:34:07 +0100551
552/*
553 * The testcases use internal knowledge of the implementation that shouldn't
554 * be exposed to the rest of the kernel. Include these directly here.
555 */
556#ifdef CONFIG_CPA_DEBUG
557#include "pageattr-test.c"
558#endif