blob: f7d5ca170c22141db4e3360b15fc57966399177f [file] [log] [blame]
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Thanks to Ben LaHaise for precious feedback.
Ingo Molnar9f4c8152008-01-30 13:33:41 +01004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/highmem.h>
Ingo Molnar81922062008-01-30 13:34:04 +01006#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/module.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01008#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/slab.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010010#include <linux/mm.h>
Thomas Gleixner76ebd052008-02-09 23:24:09 +010011#include <linux/interrupt.h>
Thomas Gleixneree7ae7a2008-04-17 17:40:45 +020012#include <linux/seq_file.h>
13#include <linux/debugfs.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010014
Thomas Gleixner950f9d92008-01-30 13:34:06 +010015#include <asm/e820.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/processor.h>
17#include <asm/tlbflush.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080018#include <asm/sections.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010019#include <asm/uaccess.h>
20#include <asm/pgalloc.h>
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010021#include <asm/proto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Ingo Molnar9df84992008-02-04 16:48:09 +010023/*
24 * The current flushing context - we pass it instead of 5 arguments:
25 */
Thomas Gleixner72e458d2008-02-04 16:48:07 +010026struct cpa_data {
27 unsigned long vaddr;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010028 pgprot_t mask_set;
29 pgprot_t mask_clr;
Thomas Gleixner65e074d2008-02-04 16:48:07 +010030 int numpages;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +010031 int flushtlb;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010032 unsigned long pfn;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010033};
34
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010035#ifdef CONFIG_X86_64
36
37static inline unsigned long highmap_start_pfn(void)
38{
39 return __pa(_text) >> PAGE_SHIFT;
40}
41
42static inline unsigned long highmap_end_pfn(void)
43{
44 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
45}
46
47#endif
48
Ingo Molnar92cb54a2008-02-13 14:37:52 +010049#ifdef CONFIG_DEBUG_PAGEALLOC
50# define debug_pagealloc 1
51#else
52# define debug_pagealloc 0
53#endif
54
Arjan van de Vened724be2008-01-30 13:34:04 +010055static inline int
56within(unsigned long addr, unsigned long start, unsigned long end)
Ingo Molnar687c4822008-01-30 13:34:04 +010057{
Arjan van de Vened724be2008-01-30 13:34:04 +010058 return addr >= start && addr < end;
59}
60
61/*
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010062 * Flushing functions
63 */
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010064
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010065/**
66 * clflush_cache_range - flush a cache range with clflush
67 * @addr: virtual start address
68 * @size: number of bytes to flush
69 *
70 * clflush is an unordered instruction which needs fencing with mfence
71 * to avoid ordering issues.
72 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +010073void clflush_cache_range(void *vaddr, unsigned int size)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010074{
Ingo Molnar4c61afc2008-01-30 13:34:09 +010075 void *vend = vaddr + size - 1;
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010076
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010077 mb();
Ingo Molnar4c61afc2008-01-30 13:34:09 +010078
79 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
80 clflush(vaddr);
81 /*
82 * Flush any possible final partial cacheline:
83 */
84 clflush(vend);
85
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010086 mb();
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010087}
88
Thomas Gleixneraf1e6842008-01-30 13:34:08 +010089static void __cpa_flush_all(void *arg)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010090{
Andi Kleen6bb83832008-02-04 16:48:06 +010091 unsigned long cache = (unsigned long)arg;
92
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010093 /*
94 * Flush all to work around Errata in early athlons regarding
95 * large page flushing.
96 */
97 __flush_tlb_all();
98
Andi Kleen6bb83832008-02-04 16:48:06 +010099 if (cache && boot_cpu_data.x86_model >= 4)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100100 wbinvd();
101}
102
Andi Kleen6bb83832008-02-04 16:48:06 +0100103static void cpa_flush_all(unsigned long cache)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100104{
105 BUG_ON(irqs_disabled());
106
Andi Kleen6bb83832008-02-04 16:48:06 +0100107 on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100108}
109
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100110static void __cpa_flush_range(void *arg)
111{
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100112 /*
113 * We could optimize that further and do individual per page
114 * tlb invalidates for a low number of pages. Caveat: we must
115 * flush the high aliases on 64bit as well.
116 */
117 __flush_tlb_all();
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100118}
119
Andi Kleen6bb83832008-02-04 16:48:06 +0100120static void cpa_flush_range(unsigned long start, int numpages, int cache)
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100121{
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100122 unsigned int i, level;
123 unsigned long addr;
124
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100125 BUG_ON(irqs_disabled());
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100126 WARN_ON(PAGE_ALIGN(start) != start);
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100127
Thomas Gleixner3b233e52008-01-30 13:34:08 +0100128 on_each_cpu(__cpa_flush_range, NULL, 1, 1);
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100129
Andi Kleen6bb83832008-02-04 16:48:06 +0100130 if (!cache)
131 return;
132
Thomas Gleixner3b233e52008-01-30 13:34:08 +0100133 /*
134 * We only need to flush on one CPU,
135 * clflush is a MESI-coherent instruction that
136 * will cause all other CPUs to flush the same
137 * cachelines:
138 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100139 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
140 pte_t *pte = lookup_address(addr, &level);
141
142 /*
143 * Only flush present addresses:
144 */
Thomas Gleixner7bfb72e2008-02-04 16:48:08 +0100145 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100146 clflush_cache_range((void *) addr, PAGE_SIZE);
147 }
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100148}
149
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100150/*
Arjan van de Vened724be2008-01-30 13:34:04 +0100151 * Certain areas of memory on x86 require very specific protection flags,
152 * for example the BIOS area or kernel text. Callers don't always get this
153 * right (again, ioremap() on BIOS memory is not uncommon) so this function
154 * checks and fixes these known static required protection bits.
155 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100156static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
157 unsigned long pfn)
Arjan van de Vened724be2008-01-30 13:34:04 +0100158{
159 pgprot_t forbidden = __pgprot(0);
160
Ingo Molnar687c4822008-01-30 13:34:04 +0100161 /*
Arjan van de Vened724be2008-01-30 13:34:04 +0100162 * The BIOS area between 640k and 1Mb needs to be executable for
163 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
Ingo Molnar687c4822008-01-30 13:34:04 +0100164 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100165 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
Arjan van de Vened724be2008-01-30 13:34:04 +0100166 pgprot_val(forbidden) |= _PAGE_NX;
167
168 /*
169 * The kernel text needs to be executable for obvious reasons
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100170 * Does not cover __inittext since that is gone later on. On
171 * 64bit we do not enforce !NX on the low mapping
Arjan van de Vened724be2008-01-30 13:34:04 +0100172 */
173 if (within(address, (unsigned long)_text, (unsigned long)_etext))
174 pgprot_val(forbidden) |= _PAGE_NX;
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100175
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100176 /*
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100177 * The .rodata section needs to be read-only. Using the pfn
178 * catches all aliases.
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100179 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100180 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
181 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100182 pgprot_val(forbidden) |= _PAGE_RW;
Arjan van de Vened724be2008-01-30 13:34:04 +0100183
184 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
Ingo Molnar687c4822008-01-30 13:34:04 +0100185
186 return prot;
187}
188
Thomas Gleixner9a14aef2008-02-04 16:48:07 +0100189/*
190 * Lookup the page table entry for a virtual address. Return a pointer
191 * to the entry and the level of the mapping.
192 *
193 * Note: We return pud and pmd either when the entry is marked large
194 * or when the present bit is not set. Otherwise we would return a
195 * pointer to a nonexisting mapping.
196 */
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100197pte_t *lookup_address(unsigned long address, unsigned int *level)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100198{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 pgd_t *pgd = pgd_offset_k(address);
200 pud_t *pud;
201 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100202
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100203 *level = PG_LEVEL_NONE;
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 if (pgd_none(*pgd))
206 return NULL;
Ingo Molnar9df84992008-02-04 16:48:09 +0100207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 pud = pud_offset(pgd, address);
209 if (pud_none(*pud))
210 return NULL;
Andi Kleenc2f71ee2008-02-04 16:48:09 +0100211
212 *level = PG_LEVEL_1G;
213 if (pud_large(*pud) || !pud_present(*pud))
214 return (pte_t *)pud;
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 pmd = pmd_offset(pud, address);
217 if (pmd_none(*pmd))
218 return NULL;
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100219
220 *level = PG_LEVEL_2M;
Thomas Gleixner9a14aef2008-02-04 16:48:07 +0100221 if (pmd_large(*pmd) || !pmd_present(*pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 return (pte_t *)pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100224 *level = PG_LEVEL_4K;
Ingo Molnar9df84992008-02-04 16:48:09 +0100225
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100226 return pte_offset_kernel(pmd, address);
227}
228
Ingo Molnar9df84992008-02-04 16:48:09 +0100229/*
230 * Set the new pmd in all the pgds we know about:
231 */
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100232static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100233{
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100234 /* change init_mm */
235 set_pte_atomic(kpte, pte);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100236#ifdef CONFIG_X86_32
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100237 if (!SHARED_KERNEL_PMD) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100238 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100240 list_for_each_entry(page, &pgd_list, lru) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100241 pgd_t *pgd;
242 pud_t *pud;
243 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100244
Ingo Molnar44af6c42008-01-30 13:34:03 +0100245 pgd = (pgd_t *)page_address(page) + pgd_index(address);
246 pud = pud_offset(pgd, address);
247 pmd = pmd_offset(pud, address);
248 set_pte_atomic((pte_t *)pmd, pte);
249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100251#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252}
253
Ingo Molnar9df84992008-02-04 16:48:09 +0100254static int
255try_preserve_large_page(pte_t *kpte, unsigned long address,
256 struct cpa_data *cpa)
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100257{
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100258 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100259 pte_t new_pte, old_pte, *tmp;
260 pgprot_t old_prot, new_prot;
Thomas Gleixnerfac84932008-02-09 23:24:09 +0100261 int i, do_split = 1;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100262 unsigned int level;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100263
264 spin_lock_irqsave(&pgd_lock, flags);
265 /*
266 * Check for races, another CPU might have split this page
267 * up already:
268 */
269 tmp = lookup_address(address, &level);
270 if (tmp != kpte)
271 goto out_unlock;
272
273 switch (level) {
274 case PG_LEVEL_2M:
Andi Kleen31422c52008-02-04 16:48:08 +0100275 psize = PMD_PAGE_SIZE;
276 pmask = PMD_PAGE_MASK;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100277 break;
Andi Kleenf07333f2008-02-04 16:48:09 +0100278#ifdef CONFIG_X86_64
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100279 case PG_LEVEL_1G:
Andi Kleen5d3c8b22008-02-13 16:20:35 +0100280 psize = PUD_PAGE_SIZE;
281 pmask = PUD_PAGE_MASK;
Andi Kleenf07333f2008-02-04 16:48:09 +0100282 break;
283#endif
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100284 default:
Ingo Molnarbeaff632008-02-04 16:48:09 +0100285 do_split = -EINVAL;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100286 goto out_unlock;
287 }
288
289 /*
290 * Calculate the number of pages, which fit into this large
291 * page starting at address:
292 */
293 nextpage_addr = (address + psize) & pmask;
294 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100295 if (numpages < cpa->numpages)
296 cpa->numpages = numpages;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100297
298 /*
299 * We are safe now. Check whether the new pgprot is the same:
300 */
301 old_pte = *kpte;
302 old_prot = new_prot = pte_pgprot(old_pte);
303
304 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
305 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100306
307 /*
308 * old_pte points to the large page base address. So we need
309 * to add the offset of the virtual address:
310 */
311 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
312 cpa->pfn = pfn;
313
314 new_prot = static_protections(new_prot, address, pfn);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100315
316 /*
Thomas Gleixnerfac84932008-02-09 23:24:09 +0100317 * We need to check the full range, whether
318 * static_protection() requires a different pgprot for one of
319 * the pages in the range we try to preserve:
320 */
321 addr = address + PAGE_SIZE;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100322 pfn++;
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100323 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100324 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
Thomas Gleixnerfac84932008-02-09 23:24:09 +0100325
326 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
327 goto out_unlock;
328 }
329
330 /*
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100331 * If there are no changes, return. maxpages has been updated
332 * above:
333 */
334 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
Ingo Molnarbeaff632008-02-04 16:48:09 +0100335 do_split = 0;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100336 goto out_unlock;
337 }
338
339 /*
340 * We need to change the attributes. Check, whether we can
341 * change the large page in one go. We request a split, when
342 * the address is not aligned and the number of pages is
343 * smaller than the number of pages in the large page. Note
344 * that we limited the number of possible pages already to
345 * the number of pages in the large page.
346 */
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100347 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100348 /*
349 * The address is aligned and the number of pages
350 * covers the full page.
351 */
352 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
353 __set_pmd_pte(kpte, address, new_pte);
354 cpa->flushtlb = 1;
Ingo Molnarbeaff632008-02-04 16:48:09 +0100355 do_split = 0;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100356 }
357
358out_unlock:
359 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnar9df84992008-02-04 16:48:09 +0100360
Ingo Molnarbeaff632008-02-04 16:48:09 +0100361 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100362}
363
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100364static LIST_HEAD(page_pool);
365static unsigned long pool_size, pool_pages, pool_low;
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100366static unsigned long pool_used, pool_failed;
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100367
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100368static void cpa_fill_pool(struct page **ret)
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100369{
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100370 gfp_t gfp = GFP_KERNEL;
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100371 unsigned long flags;
372 struct page *p;
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100373
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100374 /*
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100375 * Avoid recursion (on debug-pagealloc) and also signal
376 * our priority to get to these pagetables:
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100377 */
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100378 if (current->flags & PF_MEMALLOC)
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100379 return;
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100380 current->flags |= PF_MEMALLOC;
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100381
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100382 /*
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100383 * Allocate atomically from atomic contexts:
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100384 */
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100385 if (in_atomic() || irqs_disabled() || debug_pagealloc)
386 gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100387
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100388 while (pool_pages < pool_size || (ret && !*ret)) {
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100389 p = alloc_pages(gfp, 0);
390 if (!p) {
391 pool_failed++;
392 break;
393 }
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100394 /*
395 * If the call site needs a page right now, provide it:
396 */
397 if (ret && !*ret) {
398 *ret = p;
399 continue;
400 }
401 spin_lock_irqsave(&pgd_lock, flags);
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100402 list_add(&p->lru, &page_pool);
403 pool_pages++;
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100404 spin_unlock_irqrestore(&pgd_lock, flags);
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100405 }
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100406
407 current->flags &= ~PF_MEMALLOC;
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100408}
409
410#define SHIFT_MB (20 - PAGE_SHIFT)
411#define ROUND_MB_GB ((1 << 10) - 1)
412#define SHIFT_MB_GB 10
413#define POOL_PAGES_PER_GB 16
414
415void __init cpa_init(void)
416{
417 struct sysinfo si;
418 unsigned long gb;
419
420 si_meminfo(&si);
421 /*
422 * Calculate the number of pool pages:
423 *
424 * Convert totalram (nr of pages) to MiB and round to the next
425 * GiB. Shift MiB to Gib and multiply the result by
426 * POOL_PAGES_PER_GB:
427 */
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100428 if (debug_pagealloc) {
429 gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
430 pool_size = POOL_PAGES_PER_GB * gb;
431 } else {
432 pool_size = 1;
433 }
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100434 pool_low = pool_size;
435
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100436 cpa_fill_pool(NULL);
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100437 printk(KERN_DEBUG
438 "CPA: page pool initialized %lu of %lu pages preallocated\n",
439 pool_pages, pool_size);
440}
441
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100442static int split_large_page(pte_t *kpte, unsigned long address)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100443{
Thomas Gleixner7b610ee2008-02-04 16:48:10 +0100444 unsigned long flags, pfn, pfninc = 1;
Ingo Molnar86f03982008-01-30 13:34:09 +0100445 unsigned int i, level;
Ingo Molnar9df84992008-02-04 16:48:09 +0100446 pte_t *pbase, *tmp;
447 pgprot_t ref_prot;
448 struct page *base;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100449
Thomas Gleixnereb5b5f02008-02-09 23:24:09 +0100450 /*
451 * Get a page from the pool. The pool list is protected by the
452 * pgd_lock, which we have to take anyway for the split
453 * operation:
454 */
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100455 spin_lock_irqsave(&pgd_lock, flags);
Thomas Gleixnereb5b5f02008-02-09 23:24:09 +0100456 if (list_empty(&page_pool)) {
457 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100458 base = NULL;
459 cpa_fill_pool(&base);
460 if (!base)
461 return -ENOMEM;
462 spin_lock_irqsave(&pgd_lock, flags);
463 } else {
464 base = list_first_entry(&page_pool, struct page, lru);
465 list_del(&base->lru);
466 pool_pages--;
467
468 if (pool_pages < pool_low)
469 pool_low = pool_pages;
Thomas Gleixnereb5b5f02008-02-09 23:24:09 +0100470 }
471
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100472 /*
473 * Check for races, another CPU might have split this page
474 * up for us already:
475 */
476 tmp = lookup_address(address, &level);
Ingo Molnar6ce9fc12008-02-04 16:48:08 +0100477 if (tmp != kpte)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100478 goto out_unlock;
479
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100480 pbase = (pte_t *)page_address(base);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100481#ifdef CONFIG_X86_32
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100482 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
Ingo Molnar44af6c42008-01-30 13:34:03 +0100483#endif
Thomas Gleixner07cf89c2008-02-04 16:48:08 +0100484 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100485
Andi Kleenf07333f2008-02-04 16:48:09 +0100486#ifdef CONFIG_X86_64
487 if (level == PG_LEVEL_1G) {
488 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
489 pgprot_val(ref_prot) |= _PAGE_PSE;
Andi Kleenf07333f2008-02-04 16:48:09 +0100490 }
491#endif
492
Thomas Gleixner63c1dcf2008-02-04 16:48:05 +0100493 /*
494 * Get the target pfn from the original entry:
495 */
496 pfn = pte_pfn(*kpte);
Andi Kleenf07333f2008-02-04 16:48:09 +0100497 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
Thomas Gleixner63c1dcf2008-02-04 16:48:05 +0100498 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100499
500 /*
Thomas Gleixner07cf89c2008-02-04 16:48:08 +0100501 * Install the new, split up pagetable. Important details here:
Huang, Ying4c881ca2008-01-30 13:34:04 +0100502 *
503 * On Intel the NX bit of all levels must be cleared to make a
504 * page executable. See section 4.13.2 of Intel 64 and IA-32
505 * Architectures Software Developer's Manual).
Thomas Gleixner07cf89c2008-02-04 16:48:08 +0100506 *
507 * Mark the entry present. The current mapping might be
508 * set to not present, which we preserved above.
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100509 */
Huang, Ying4c881ca2008-01-30 13:34:04 +0100510 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
Thomas Gleixner07cf89c2008-02-04 16:48:08 +0100511 pgprot_val(ref_prot) |= _PAGE_PRESENT;
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100512 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100513 base = NULL;
514
515out_unlock:
Thomas Gleixnereb5b5f02008-02-09 23:24:09 +0100516 /*
517 * If we dropped out via the lookup_address check under
518 * pgd_lock then stick the page back into the pool:
519 */
520 if (base) {
521 list_add(&base->lru, &page_pool);
522 pool_pages++;
523 } else
524 pool_used++;
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100525 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100526
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100527 return 0;
528}
529
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100530static int __change_page_attr(struct cpa_data *cpa, int primary)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100531{
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100532 unsigned long address = cpa->vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100533 int do_split, err;
534 unsigned int level;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100535 pte_t *kpte, old_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Ingo Molnar97f99fe2008-01-30 13:33:55 +0100537repeat:
Ingo Molnarf0646e42008-01-30 13:33:43 +0100538 kpte = lookup_address(address, &level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 if (!kpte)
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100540 return primary ? -EINVAL : 0;
541
542 old_pte = *kpte;
543 if (!pte_val(old_pte)) {
544 if (!primary)
545 return 0;
546 printk(KERN_WARNING "CPA: called for zero pte. "
547 "vaddr = %lx cpa->vaddr = %lx\n", address,
548 cpa->vaddr);
549 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 return -EINVAL;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100551 }
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100552
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100553 if (level == PG_LEVEL_4K) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100554 pte_t new_pte;
Arjan van de Ven626c2c92008-02-04 16:48:05 +0100555 pgprot_t new_prot = pte_pgprot(old_pte);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100556 unsigned long pfn = pte_pfn(old_pte);
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100557
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100558 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
559 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
Ingo Molnar86f03982008-01-30 13:34:09 +0100560
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100561 new_prot = static_protections(new_prot, address, pfn);
Ingo Molnar86f03982008-01-30 13:34:09 +0100562
Arjan van de Ven626c2c92008-02-04 16:48:05 +0100563 /*
564 * We need to keep the pfn from the existing PTE,
565 * after all we're only going to change it's attributes
566 * not the memory it points to
567 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100568 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
569 cpa->pfn = pfn;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100570 /*
571 * Do we really change anything ?
572 */
573 if (pte_val(old_pte) != pte_val(new_pte)) {
574 set_pte_atomic(kpte, new_pte);
575 cpa->flushtlb = 1;
576 }
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100577 cpa->numpages = 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100578 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100580
581 /*
582 * Check, whether we can keep the large page intact
583 * and just change the pte:
584 */
Ingo Molnarbeaff632008-02-04 16:48:09 +0100585 do_split = try_preserve_large_page(kpte, address, cpa);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100586 /*
587 * When the range fits into the existing large page,
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100588 * return. cp->numpages and cpa->tlbflush have been updated in
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100589 * try_large_page:
590 */
Ingo Molnar87f7f8f2008-02-04 16:48:10 +0100591 if (do_split <= 0)
592 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100593
594 /*
595 * We have to split the large page:
596 */
Ingo Molnar87f7f8f2008-02-04 16:48:10 +0100597 err = split_large_page(kpte, address);
598 if (!err) {
599 cpa->flushtlb = 1;
600 goto repeat;
601 }
Ingo Molnarbeaff632008-02-04 16:48:09 +0100602
Ingo Molnar87f7f8f2008-02-04 16:48:10 +0100603 return err;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100604}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100606static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
607
608static int cpa_process_alias(struct cpa_data *cpa)
Ingo Molnar44af6c42008-01-30 13:34:03 +0100609{
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100610 struct cpa_data alias_cpa;
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100611 int ret = 0;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100612
613 if (cpa->pfn > max_pfn_mapped)
614 return 0;
615
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100616 /*
617 * No need to redo, when the primary call touched the direct
618 * mapping already:
619 */
620 if (!within(cpa->vaddr, PAGE_OFFSET,
621 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100622
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100623 alias_cpa = *cpa;
624 alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
625
626 ret = __change_page_attr_set_clr(&alias_cpa, 0);
627 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100628
Arjan van de Ven488fd992008-01-30 13:34:07 +0100629#ifdef CONFIG_X86_64
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100630 if (ret)
631 return ret;
Thomas Gleixner08797502008-01-30 13:34:09 +0100632 /*
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100633 * No need to redo, when the primary call touched the high
634 * mapping already:
635 */
636 if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end))
637 return 0;
638
639 /*
Thomas Gleixner08797502008-01-30 13:34:09 +0100640 * If the physical address is inside the kernel map, we need
641 * to touch the high mapped kernel as well:
642 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100643 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
644 return 0;
Thomas Gleixner08797502008-01-30 13:34:09 +0100645
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100646 alias_cpa = *cpa;
647 alias_cpa.vaddr =
648 (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
649
650 /*
651 * The high mapping range is imprecise, so ignore the return value.
652 */
653 __change_page_attr_set_clr(&alias_cpa, 0);
Thomas Gleixner08797502008-01-30 13:34:09 +0100654#endif
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100655 return ret;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100656}
657
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100658static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
Thomas Gleixnerff314522008-01-30 13:34:08 +0100659{
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100660 int ret, numpages = cpa->numpages;
Thomas Gleixnerff314522008-01-30 13:34:08 +0100661
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100662 while (numpages) {
663 /*
664 * Store the remaining nr of pages for the large page
665 * preservation check.
666 */
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100667 cpa->numpages = numpages;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100668
669 ret = __change_page_attr(cpa, checkalias);
Thomas Gleixnerff314522008-01-30 13:34:08 +0100670 if (ret)
671 return ret;
Thomas Gleixnerff314522008-01-30 13:34:08 +0100672
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100673 if (checkalias) {
674 ret = cpa_process_alias(cpa);
675 if (ret)
676 return ret;
677 }
678
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100679 /*
680 * Adjust the number of pages with the result of the
681 * CPA operation. Either a large page has been
682 * preserved or a single page update happened.
683 */
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100684 BUG_ON(cpa->numpages > numpages);
685 numpages -= cpa->numpages;
686 cpa->vaddr += cpa->numpages * PAGE_SIZE;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100687 }
Thomas Gleixnerff314522008-01-30 13:34:08 +0100688 return 0;
689}
690
Andi Kleen6bb83832008-02-04 16:48:06 +0100691static inline int cache_attr(pgprot_t attr)
692{
693 return pgprot_val(attr) &
694 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
695}
696
Thomas Gleixnerff314522008-01-30 13:34:08 +0100697static int change_page_attr_set_clr(unsigned long addr, int numpages,
698 pgprot_t mask_set, pgprot_t mask_clr)
699{
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100700 struct cpa_data cpa;
Thomas Gleixneraf96e442008-02-15 21:49:46 +0100701 int ret, cache, checkalias;
Thomas Gleixner331e4062008-02-04 16:48:06 +0100702
703 /*
704 * Check, if we are requested to change a not supported
705 * feature:
706 */
707 mask_set = canon_pgprot(mask_set);
708 mask_clr = canon_pgprot(mask_clr);
709 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
710 return 0;
711
Thomas Gleixner69b14152008-02-13 11:04:50 +0100712 /* Ensure we are PAGE_SIZE aligned */
713 if (addr & ~PAGE_MASK) {
714 addr &= PAGE_MASK;
715 /*
716 * People should not be passing in unaligned addresses:
717 */
718 WARN_ON_ONCE(1);
719 }
720
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100721 cpa.vaddr = addr;
722 cpa.numpages = numpages;
723 cpa.mask_set = mask_set;
724 cpa.mask_clr = mask_clr;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100725 cpa.flushtlb = 0;
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100726
Thomas Gleixneraf96e442008-02-15 21:49:46 +0100727 /* No alias checking for _NX bit modifications */
728 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
729
730 ret = __change_page_attr_set_clr(&cpa, checkalias);
Thomas Gleixnerff314522008-01-30 13:34:08 +0100731
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100732 /*
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100733 * Check whether we really changed something:
734 */
735 if (!cpa.flushtlb)
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100736 goto out;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100737
738 /*
Andi Kleen6bb83832008-02-04 16:48:06 +0100739 * No need to flush, when we did not set any of the caching
740 * attributes:
741 */
742 cache = cache_attr(mask_set);
743
744 /*
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100745 * On success we use clflush, when the CPU supports it to
746 * avoid the wbindv. If the CPU does not support it and in the
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100747 * error case we fall back to cpa_flush_all (which uses
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100748 * wbindv):
749 */
750 if (!ret && cpu_has_clflush)
Andi Kleen6bb83832008-02-04 16:48:06 +0100751 cpa_flush_range(addr, numpages, cache);
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100752 else
Andi Kleen6bb83832008-02-04 16:48:06 +0100753 cpa_flush_all(cache);
Thomas Gleixnerff314522008-01-30 13:34:08 +0100754
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100755out:
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100756 cpa_fill_pool(NULL);
757
Thomas Gleixnerff314522008-01-30 13:34:08 +0100758 return ret;
759}
760
Thomas Gleixner56744542008-01-30 13:34:08 +0100761static inline int change_page_attr_set(unsigned long addr, int numpages,
762 pgprot_t mask)
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100763{
Thomas Gleixner56744542008-01-30 13:34:08 +0100764 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100765}
766
Thomas Gleixner56744542008-01-30 13:34:08 +0100767static inline int change_page_attr_clear(unsigned long addr, int numpages,
768 pgprot_t mask)
Thomas Gleixner72932c72008-01-30 13:34:08 +0100769{
Huang, Ying58270402008-01-31 22:05:43 +0100770 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100771}
772
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100773int set_memory_uc(unsigned long addr, int numpages)
774{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100775 return change_page_attr_set(addr, numpages,
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700776 __pgprot(_PAGE_CACHE_UC));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100777}
778EXPORT_SYMBOL(set_memory_uc);
779
780int set_memory_wb(unsigned long addr, int numpages)
781{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100782 return change_page_attr_clear(addr, numpages,
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700783 __pgprot(_PAGE_CACHE_MASK));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100784}
785EXPORT_SYMBOL(set_memory_wb);
786
787int set_memory_x(unsigned long addr, int numpages)
788{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100789 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100790}
791EXPORT_SYMBOL(set_memory_x);
792
793int set_memory_nx(unsigned long addr, int numpages)
794{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100795 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100796}
797EXPORT_SYMBOL(set_memory_nx);
798
799int set_memory_ro(unsigned long addr, int numpages)
800{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100801 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100802}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100803
804int set_memory_rw(unsigned long addr, int numpages)
805{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100806 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100807}
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100808
809int set_memory_np(unsigned long addr, int numpages)
810{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100811 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100812}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100813
814int set_pages_uc(struct page *page, int numpages)
815{
816 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100817
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100818 return set_memory_uc(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100819}
820EXPORT_SYMBOL(set_pages_uc);
821
822int set_pages_wb(struct page *page, int numpages)
823{
824 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100825
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100826 return set_memory_wb(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100827}
828EXPORT_SYMBOL(set_pages_wb);
829
830int set_pages_x(struct page *page, int numpages)
831{
832 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100833
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100834 return set_memory_x(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100835}
836EXPORT_SYMBOL(set_pages_x);
837
838int set_pages_nx(struct page *page, int numpages)
839{
840 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100841
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100842 return set_memory_nx(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100843}
844EXPORT_SYMBOL(set_pages_nx);
845
846int set_pages_ro(struct page *page, int numpages)
847{
848 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100849
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100850 return set_memory_ro(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100851}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100852
853int set_pages_rw(struct page *page, int numpages)
854{
855 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100856
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100857 return set_memory_rw(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100858}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860#ifdef CONFIG_DEBUG_PAGEALLOC
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100861
862static int __set_pages_p(struct page *page, int numpages)
863{
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100864 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
865 .numpages = numpages,
866 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
867 .mask_clr = __pgprot(0)};
Thomas Gleixner72932c72008-01-30 13:34:08 +0100868
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100869 return __change_page_attr_set_clr(&cpa, 1);
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100870}
871
872static int __set_pages_np(struct page *page, int numpages)
873{
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100874 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
875 .numpages = numpages,
876 .mask_set = __pgprot(0),
877 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
Thomas Gleixner72932c72008-01-30 13:34:08 +0100878
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100879 return __change_page_attr_set_clr(&cpa, 1);
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100880}
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882void kernel_map_pages(struct page *page, int numpages, int enable)
883{
884 if (PageHighMem(page))
885 return;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100886 if (!enable) {
Ingo Molnarf9b84042006-06-27 02:54:49 -0700887 debug_check_no_locks_freed(page_address(page),
888 numpages * PAGE_SIZE);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100889 }
Ingo Molnarde5097c2006-01-09 15:59:21 -0800890
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100891 /*
Ingo Molnar12d6f212008-01-30 13:33:58 +0100892 * If page allocator is not up yet then do not call c_p_a():
893 */
894 if (!debug_pagealloc_enabled)
895 return;
896
897 /*
Ingo Molnarf8d84062008-02-13 14:09:53 +0100898 * The return value is ignored as the calls cannot fail.
899 * Large pages are kept enabled at boot time, and are
900 * split up quickly with DEBUG_PAGEALLOC. If a splitup
901 * fails here (due to temporary memory shortage) no damage
902 * is done because we just keep the largepage intact up
903 * to the next attempt when it will likely be split up:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 */
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100905 if (enable)
906 __set_pages_p(page, numpages);
907 else
908 __set_pages_np(page, numpages);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100909
910 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100911 * We should perform an IPI and flush all tlbs,
912 * but that can deadlock->flush only current cpu:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 */
914 __flush_tlb_all();
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100915
916 /*
917 * Try to refill the page pool here. We can do this only after
918 * the tlb flush.
919 */
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100920 cpa_fill_pool(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921}
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +0100922
Thomas Gleixneree7ae7a2008-04-17 17:40:45 +0200923#ifdef CONFIG_DEBUG_FS
924static int dpa_show(struct seq_file *m, void *v)
925{
926 seq_puts(m, "DEBUG_PAGEALLOC\n");
927 seq_printf(m, "pool_size : %lu\n", pool_size);
928 seq_printf(m, "pool_pages : %lu\n", pool_pages);
929 seq_printf(m, "pool_low : %lu\n", pool_low);
930 seq_printf(m, "pool_used : %lu\n", pool_used);
931 seq_printf(m, "pool_failed : %lu\n", pool_failed);
932
933 return 0;
934}
935
936static int dpa_open(struct inode *inode, struct file *filp)
937{
938 return single_open(filp, dpa_show, NULL);
939}
940
941static const struct file_operations dpa_fops = {
942 .open = dpa_open,
943 .read = seq_read,
944 .llseek = seq_lseek,
945 .release = single_release,
946};
947
948int __init debug_pagealloc_proc_init(void)
949{
950 struct dentry *de;
951
952 de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL,
953 &dpa_fops);
954 if (!de)
955 return -ENOMEM;
956
957 return 0;
958}
959__initcall(debug_pagealloc_proc_init);
960#endif
961
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +0100962#ifdef CONFIG_HIBERNATION
963
964bool kernel_page_present(struct page *page)
965{
966 unsigned int level;
967 pte_t *pte;
968
969 if (PageHighMem(page))
970 return false;
971
972 pte = lookup_address((unsigned long)page_address(page), &level);
973 return (pte_val(*pte) & _PAGE_PRESENT);
974}
975
976#endif /* CONFIG_HIBERNATION */
977
978#endif /* CONFIG_DEBUG_PAGEALLOC */
Arjan van de Vend1028a12008-01-30 13:34:07 +0100979
980/*
981 * The testcases use internal knowledge of the implementation that shouldn't
982 * be exposed to the rest of the kernel. Include these directly here.
983 */
984#ifdef CONFIG_CPA_DEBUG
985#include "pageattr-test.c"
986#endif