Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2002 Andi Kleen, SuSE Labs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Thanks to Ben LaHaise for precious feedback. |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 4 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/highmem.h> |
Ingo Molnar | 8192206 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 6 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/module.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 8 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/slab.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 10 | #include <linux/mm.h> |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 11 | #include <linux/interrupt.h> |
Thomas Gleixner | ee7ae7a | 2008-04-17 17:40:45 +0200 | [diff] [blame] | 12 | #include <linux/seq_file.h> |
| 13 | #include <linux/debugfs.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 14 | |
Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 15 | #include <asm/e820.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/processor.h> |
| 17 | #include <asm/tlbflush.h> |
Dave Jones | f8af095 | 2006-01-06 00:12:10 -0800 | [diff] [blame] | 18 | #include <asm/sections.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 19 | #include <asm/uaccess.h> |
| 20 | #include <asm/pgalloc.h> |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 21 | #include <asm/proto.h> |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame^] | 22 | #include <asm/pat.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 24 | /* |
| 25 | * The current flushing context - we pass it instead of 5 arguments: |
| 26 | */ |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 27 | struct cpa_data { |
| 28 | unsigned long vaddr; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 29 | pgprot_t mask_set; |
| 30 | pgprot_t mask_clr; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 31 | int numpages; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 32 | int flushtlb; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 33 | unsigned long pfn; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 34 | }; |
| 35 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 36 | #ifdef CONFIG_X86_64 |
| 37 | |
| 38 | static inline unsigned long highmap_start_pfn(void) |
| 39 | { |
| 40 | return __pa(_text) >> PAGE_SHIFT; |
| 41 | } |
| 42 | |
| 43 | static inline unsigned long highmap_end_pfn(void) |
| 44 | { |
| 45 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; |
| 46 | } |
| 47 | |
| 48 | #endif |
| 49 | |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 50 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 51 | # define debug_pagealloc 1 |
| 52 | #else |
| 53 | # define debug_pagealloc 0 |
| 54 | #endif |
| 55 | |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 56 | static inline int |
| 57 | within(unsigned long addr, unsigned long start, unsigned long end) |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 58 | { |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 59 | return addr >= start && addr < end; |
| 60 | } |
| 61 | |
| 62 | /* |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 63 | * Flushing functions |
| 64 | */ |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 65 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 66 | /** |
| 67 | * clflush_cache_range - flush a cache range with clflush |
| 68 | * @addr: virtual start address |
| 69 | * @size: number of bytes to flush |
| 70 | * |
| 71 | * clflush is an unordered instruction which needs fencing with mfence |
| 72 | * to avoid ordering issues. |
| 73 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 74 | void clflush_cache_range(void *vaddr, unsigned int size) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 75 | { |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 76 | void *vend = vaddr + size - 1; |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 77 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 78 | mb(); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 79 | |
| 80 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) |
| 81 | clflush(vaddr); |
| 82 | /* |
| 83 | * Flush any possible final partial cacheline: |
| 84 | */ |
| 85 | clflush(vend); |
| 86 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 87 | mb(); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 88 | } |
| 89 | |
Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 90 | static void __cpa_flush_all(void *arg) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 91 | { |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 92 | unsigned long cache = (unsigned long)arg; |
| 93 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 94 | /* |
| 95 | * Flush all to work around Errata in early athlons regarding |
| 96 | * large page flushing. |
| 97 | */ |
| 98 | __flush_tlb_all(); |
| 99 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 100 | if (cache && boot_cpu_data.x86_model >= 4) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 101 | wbinvd(); |
| 102 | } |
| 103 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 104 | static void cpa_flush_all(unsigned long cache) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 105 | { |
| 106 | BUG_ON(irqs_disabled()); |
| 107 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 108 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 109 | } |
| 110 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 111 | static void __cpa_flush_range(void *arg) |
| 112 | { |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 113 | /* |
| 114 | * We could optimize that further and do individual per page |
| 115 | * tlb invalidates for a low number of pages. Caveat: we must |
| 116 | * flush the high aliases on 64bit as well. |
| 117 | */ |
| 118 | __flush_tlb_all(); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 119 | } |
| 120 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 121 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 122 | { |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 123 | unsigned int i, level; |
| 124 | unsigned long addr; |
| 125 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 126 | BUG_ON(irqs_disabled()); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 127 | WARN_ON(PAGE_ALIGN(start) != start); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 128 | |
Thomas Gleixner | 3b233e5 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 129 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 130 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 131 | if (!cache) |
| 132 | return; |
| 133 | |
Thomas Gleixner | 3b233e5 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 134 | /* |
| 135 | * We only need to flush on one CPU, |
| 136 | * clflush is a MESI-coherent instruction that |
| 137 | * will cause all other CPUs to flush the same |
| 138 | * cachelines: |
| 139 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 140 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
| 141 | pte_t *pte = lookup_address(addr, &level); |
| 142 | |
| 143 | /* |
| 144 | * Only flush present addresses: |
| 145 | */ |
Thomas Gleixner | 7bfb72e | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 146 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 147 | clflush_cache_range((void *) addr, PAGE_SIZE); |
| 148 | } |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 149 | } |
| 150 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 151 | /* |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 152 | * Certain areas of memory on x86 require very specific protection flags, |
| 153 | * for example the BIOS area or kernel text. Callers don't always get this |
| 154 | * right (again, ioremap() on BIOS memory is not uncommon) so this function |
| 155 | * checks and fixes these known static required protection bits. |
| 156 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 157 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
| 158 | unsigned long pfn) |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 159 | { |
| 160 | pgprot_t forbidden = __pgprot(0); |
| 161 | |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 162 | /* |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 163 | * The BIOS area between 640k and 1Mb needs to be executable for |
| 164 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 165 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 166 | if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 167 | pgprot_val(forbidden) |= _PAGE_NX; |
| 168 | |
| 169 | /* |
| 170 | * The kernel text needs to be executable for obvious reasons |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 171 | * Does not cover __inittext since that is gone later on. On |
| 172 | * 64bit we do not enforce !NX on the low mapping |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 173 | */ |
| 174 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) |
| 175 | pgprot_val(forbidden) |= _PAGE_NX; |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 176 | |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 177 | /* |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 178 | * The .rodata section needs to be read-only. Using the pfn |
| 179 | * catches all aliases. |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 180 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 181 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, |
| 182 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 183 | pgprot_val(forbidden) |= _PAGE_RW; |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 184 | |
| 185 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 186 | |
| 187 | return prot; |
| 188 | } |
| 189 | |
Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 190 | /* |
| 191 | * Lookup the page table entry for a virtual address. Return a pointer |
| 192 | * to the entry and the level of the mapping. |
| 193 | * |
| 194 | * Note: We return pud and pmd either when the entry is marked large |
| 195 | * or when the present bit is not set. Otherwise we would return a |
| 196 | * pointer to a nonexisting mapping. |
| 197 | */ |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 198 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 199 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | pgd_t *pgd = pgd_offset_k(address); |
| 201 | pud_t *pud; |
| 202 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 203 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 204 | *level = PG_LEVEL_NONE; |
| 205 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | if (pgd_none(*pgd)) |
| 207 | return NULL; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 208 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | pud = pud_offset(pgd, address); |
| 210 | if (pud_none(*pud)) |
| 211 | return NULL; |
Andi Kleen | c2f71ee | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 212 | |
| 213 | *level = PG_LEVEL_1G; |
| 214 | if (pud_large(*pud) || !pud_present(*pud)) |
| 215 | return (pte_t *)pud; |
| 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | pmd = pmd_offset(pud, address); |
| 218 | if (pmd_none(*pmd)) |
| 219 | return NULL; |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 220 | |
| 221 | *level = PG_LEVEL_2M; |
Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 222 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | return (pte_t *)pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 225 | *level = PG_LEVEL_4K; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 226 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 227 | return pte_offset_kernel(pmd, address); |
| 228 | } |
| 229 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 230 | /* |
| 231 | * Set the new pmd in all the pgds we know about: |
| 232 | */ |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 233 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 234 | { |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 235 | /* change init_mm */ |
| 236 | set_pte_atomic(kpte, pte); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 237 | #ifdef CONFIG_X86_32 |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 238 | if (!SHARED_KERNEL_PMD) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 239 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
Jeremy Fitzhardinge | e3ed910 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 241 | list_for_each_entry(page, &pgd_list, lru) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 242 | pgd_t *pgd; |
| 243 | pud_t *pud; |
| 244 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 245 | |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 246 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
| 247 | pud = pud_offset(pgd, address); |
| 248 | pmd = pmd_offset(pud, address); |
| 249 | set_pte_atomic((pte_t *)pmd, pte); |
| 250 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 252 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | } |
| 254 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 255 | static int |
| 256 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
| 257 | struct cpa_data *cpa) |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 258 | { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 259 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 260 | pte_t new_pte, old_pte, *tmp; |
| 261 | pgprot_t old_prot, new_prot; |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 262 | int i, do_split = 1; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 263 | unsigned int level; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 264 | |
| 265 | spin_lock_irqsave(&pgd_lock, flags); |
| 266 | /* |
| 267 | * Check for races, another CPU might have split this page |
| 268 | * up already: |
| 269 | */ |
| 270 | tmp = lookup_address(address, &level); |
| 271 | if (tmp != kpte) |
| 272 | goto out_unlock; |
| 273 | |
| 274 | switch (level) { |
| 275 | case PG_LEVEL_2M: |
Andi Kleen | 31422c5 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 276 | psize = PMD_PAGE_SIZE; |
| 277 | pmask = PMD_PAGE_MASK; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 278 | break; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 279 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 280 | case PG_LEVEL_1G: |
Andi Kleen | 5d3c8b2 | 2008-02-13 16:20:35 +0100 | [diff] [blame] | 281 | psize = PUD_PAGE_SIZE; |
| 282 | pmask = PUD_PAGE_MASK; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 283 | break; |
| 284 | #endif |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 285 | default: |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 286 | do_split = -EINVAL; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 287 | goto out_unlock; |
| 288 | } |
| 289 | |
| 290 | /* |
| 291 | * Calculate the number of pages, which fit into this large |
| 292 | * page starting at address: |
| 293 | */ |
| 294 | nextpage_addr = (address + psize) & pmask; |
| 295 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 296 | if (numpages < cpa->numpages) |
| 297 | cpa->numpages = numpages; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 298 | |
| 299 | /* |
| 300 | * We are safe now. Check whether the new pgprot is the same: |
| 301 | */ |
| 302 | old_pte = *kpte; |
| 303 | old_prot = new_prot = pte_pgprot(old_pte); |
| 304 | |
| 305 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
| 306 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 307 | |
| 308 | /* |
| 309 | * old_pte points to the large page base address. So we need |
| 310 | * to add the offset of the virtual address: |
| 311 | */ |
| 312 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); |
| 313 | cpa->pfn = pfn; |
| 314 | |
| 315 | new_prot = static_protections(new_prot, address, pfn); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 316 | |
| 317 | /* |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 318 | * We need to check the full range, whether |
| 319 | * static_protection() requires a different pgprot for one of |
| 320 | * the pages in the range we try to preserve: |
| 321 | */ |
| 322 | addr = address + PAGE_SIZE; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 323 | pfn++; |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 324 | for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 325 | pgprot_t chk_prot = static_protections(new_prot, addr, pfn); |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 326 | |
| 327 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) |
| 328 | goto out_unlock; |
| 329 | } |
| 330 | |
| 331 | /* |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 332 | * If there are no changes, return. maxpages has been updated |
| 333 | * above: |
| 334 | */ |
| 335 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 336 | do_split = 0; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 337 | goto out_unlock; |
| 338 | } |
| 339 | |
| 340 | /* |
| 341 | * We need to change the attributes. Check, whether we can |
| 342 | * change the large page in one go. We request a split, when |
| 343 | * the address is not aligned and the number of pages is |
| 344 | * smaller than the number of pages in the large page. Note |
| 345 | * that we limited the number of possible pages already to |
| 346 | * the number of pages in the large page. |
| 347 | */ |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 348 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 349 | /* |
| 350 | * The address is aligned and the number of pages |
| 351 | * covers the full page. |
| 352 | */ |
| 353 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); |
| 354 | __set_pmd_pte(kpte, address, new_pte); |
| 355 | cpa->flushtlb = 1; |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 356 | do_split = 0; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | out_unlock: |
| 360 | spin_unlock_irqrestore(&pgd_lock, flags); |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 361 | |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 362 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 363 | } |
| 364 | |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 365 | static LIST_HEAD(page_pool); |
| 366 | static unsigned long pool_size, pool_pages, pool_low; |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 367 | static unsigned long pool_used, pool_failed; |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 368 | |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 369 | static void cpa_fill_pool(struct page **ret) |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 370 | { |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 371 | gfp_t gfp = GFP_KERNEL; |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 372 | unsigned long flags; |
| 373 | struct page *p; |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 374 | |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 375 | /* |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 376 | * Avoid recursion (on debug-pagealloc) and also signal |
| 377 | * our priority to get to these pagetables: |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 378 | */ |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 379 | if (current->flags & PF_MEMALLOC) |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 380 | return; |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 381 | current->flags |= PF_MEMALLOC; |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 382 | |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 383 | /* |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 384 | * Allocate atomically from atomic contexts: |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 385 | */ |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 386 | if (in_atomic() || irqs_disabled() || debug_pagealloc) |
| 387 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 388 | |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 389 | while (pool_pages < pool_size || (ret && !*ret)) { |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 390 | p = alloc_pages(gfp, 0); |
| 391 | if (!p) { |
| 392 | pool_failed++; |
| 393 | break; |
| 394 | } |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 395 | /* |
| 396 | * If the call site needs a page right now, provide it: |
| 397 | */ |
| 398 | if (ret && !*ret) { |
| 399 | *ret = p; |
| 400 | continue; |
| 401 | } |
| 402 | spin_lock_irqsave(&pgd_lock, flags); |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 403 | list_add(&p->lru, &page_pool); |
| 404 | pool_pages++; |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 405 | spin_unlock_irqrestore(&pgd_lock, flags); |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 406 | } |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 407 | |
| 408 | current->flags &= ~PF_MEMALLOC; |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 409 | } |
| 410 | |
| 411 | #define SHIFT_MB (20 - PAGE_SHIFT) |
| 412 | #define ROUND_MB_GB ((1 << 10) - 1) |
| 413 | #define SHIFT_MB_GB 10 |
| 414 | #define POOL_PAGES_PER_GB 16 |
| 415 | |
| 416 | void __init cpa_init(void) |
| 417 | { |
| 418 | struct sysinfo si; |
| 419 | unsigned long gb; |
| 420 | |
| 421 | si_meminfo(&si); |
| 422 | /* |
| 423 | * Calculate the number of pool pages: |
| 424 | * |
| 425 | * Convert totalram (nr of pages) to MiB and round to the next |
| 426 | * GiB. Shift MiB to Gib and multiply the result by |
| 427 | * POOL_PAGES_PER_GB: |
| 428 | */ |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 429 | if (debug_pagealloc) { |
| 430 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; |
| 431 | pool_size = POOL_PAGES_PER_GB * gb; |
| 432 | } else { |
| 433 | pool_size = 1; |
| 434 | } |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 435 | pool_low = pool_size; |
| 436 | |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 437 | cpa_fill_pool(NULL); |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 438 | printk(KERN_DEBUG |
| 439 | "CPA: page pool initialized %lu of %lu pages preallocated\n", |
| 440 | pool_pages, pool_size); |
| 441 | } |
| 442 | |
Ingo Molnar | 7afe15b | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 443 | static int split_large_page(pte_t *kpte, unsigned long address) |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 444 | { |
Thomas Gleixner | 7b610ee | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 445 | unsigned long flags, pfn, pfninc = 1; |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 446 | unsigned int i, level; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 447 | pte_t *pbase, *tmp; |
| 448 | pgprot_t ref_prot; |
| 449 | struct page *base; |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 450 | |
Thomas Gleixner | eb5b5f0 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 451 | /* |
| 452 | * Get a page from the pool. The pool list is protected by the |
| 453 | * pgd_lock, which we have to take anyway for the split |
| 454 | * operation: |
| 455 | */ |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 456 | spin_lock_irqsave(&pgd_lock, flags); |
Thomas Gleixner | eb5b5f0 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 457 | if (list_empty(&page_pool)) { |
| 458 | spin_unlock_irqrestore(&pgd_lock, flags); |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 459 | base = NULL; |
| 460 | cpa_fill_pool(&base); |
| 461 | if (!base) |
| 462 | return -ENOMEM; |
| 463 | spin_lock_irqsave(&pgd_lock, flags); |
| 464 | } else { |
| 465 | base = list_first_entry(&page_pool, struct page, lru); |
| 466 | list_del(&base->lru); |
| 467 | pool_pages--; |
| 468 | |
| 469 | if (pool_pages < pool_low) |
| 470 | pool_low = pool_pages; |
Thomas Gleixner | eb5b5f0 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 471 | } |
| 472 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 473 | /* |
| 474 | * Check for races, another CPU might have split this page |
| 475 | * up for us already: |
| 476 | */ |
| 477 | tmp = lookup_address(address, &level); |
Ingo Molnar | 6ce9fc1 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 478 | if (tmp != kpte) |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 479 | goto out_unlock; |
| 480 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 481 | pbase = (pte_t *)page_address(base); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 482 | #ifdef CONFIG_X86_32 |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 483 | paravirt_alloc_pt(&init_mm, page_to_pfn(base)); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 484 | #endif |
Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 485 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 486 | |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 487 | #ifdef CONFIG_X86_64 |
| 488 | if (level == PG_LEVEL_1G) { |
| 489 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; |
| 490 | pgprot_val(ref_prot) |= _PAGE_PSE; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 491 | } |
| 492 | #endif |
| 493 | |
Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 494 | /* |
| 495 | * Get the target pfn from the original entry: |
| 496 | */ |
| 497 | pfn = pte_pfn(*kpte); |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 498 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 499 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 500 | |
| 501 | /* |
Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 502 | * Install the new, split up pagetable. Important details here: |
Huang, Ying | 4c881ca | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 503 | * |
| 504 | * On Intel the NX bit of all levels must be cleared to make a |
| 505 | * page executable. See section 4.13.2 of Intel 64 and IA-32 |
| 506 | * Architectures Software Developer's Manual). |
Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 507 | * |
| 508 | * Mark the entry present. The current mapping might be |
| 509 | * set to not present, which we preserved above. |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 510 | */ |
Huang, Ying | 4c881ca | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 511 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); |
Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 512 | pgprot_val(ref_prot) |= _PAGE_PRESENT; |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 513 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 514 | base = NULL; |
| 515 | |
| 516 | out_unlock: |
Thomas Gleixner | eb5b5f0 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 517 | /* |
| 518 | * If we dropped out via the lookup_address check under |
| 519 | * pgd_lock then stick the page back into the pool: |
| 520 | */ |
| 521 | if (base) { |
| 522 | list_add(&base->lru, &page_pool); |
| 523 | pool_pages++; |
| 524 | } else |
| 525 | pool_used++; |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 526 | spin_unlock_irqrestore(&pgd_lock, flags); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 527 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 528 | return 0; |
| 529 | } |
| 530 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 531 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 532 | { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 533 | unsigned long address = cpa->vaddr; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 534 | int do_split, err; |
| 535 | unsigned int level; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 536 | pte_t *kpte, old_pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | |
Ingo Molnar | 97f99fe | 2008-01-30 13:33:55 +0100 | [diff] [blame] | 538 | repeat: |
Ingo Molnar | f0646e4 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 539 | kpte = lookup_address(address, &level); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | if (!kpte) |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 541 | return primary ? -EINVAL : 0; |
| 542 | |
| 543 | old_pte = *kpte; |
| 544 | if (!pte_val(old_pte)) { |
| 545 | if (!primary) |
| 546 | return 0; |
| 547 | printk(KERN_WARNING "CPA: called for zero pte. " |
| 548 | "vaddr = %lx cpa->vaddr = %lx\n", address, |
| 549 | cpa->vaddr); |
| 550 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | return -EINVAL; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 552 | } |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 553 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 554 | if (level == PG_LEVEL_4K) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 555 | pte_t new_pte; |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 556 | pgprot_t new_prot = pte_pgprot(old_pte); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 557 | unsigned long pfn = pte_pfn(old_pte); |
Thomas Gleixner | a72a08a | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 558 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 559 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
| 560 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 561 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 562 | new_prot = static_protections(new_prot, address, pfn); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 563 | |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 564 | /* |
| 565 | * We need to keep the pfn from the existing PTE, |
| 566 | * after all we're only going to change it's attributes |
| 567 | * not the memory it points to |
| 568 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 569 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); |
| 570 | cpa->pfn = pfn; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 571 | /* |
| 572 | * Do we really change anything ? |
| 573 | */ |
| 574 | if (pte_val(old_pte) != pte_val(new_pte)) { |
| 575 | set_pte_atomic(kpte, new_pte); |
| 576 | cpa->flushtlb = 1; |
| 577 | } |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 578 | cpa->numpages = 1; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 579 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | } |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 581 | |
| 582 | /* |
| 583 | * Check, whether we can keep the large page intact |
| 584 | * and just change the pte: |
| 585 | */ |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 586 | do_split = try_preserve_large_page(kpte, address, cpa); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 587 | /* |
| 588 | * When the range fits into the existing large page, |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 589 | * return. cp->numpages and cpa->tlbflush have been updated in |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 590 | * try_large_page: |
| 591 | */ |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 592 | if (do_split <= 0) |
| 593 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 594 | |
| 595 | /* |
| 596 | * We have to split the large page: |
| 597 | */ |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 598 | err = split_large_page(kpte, address); |
| 599 | if (!err) { |
| 600 | cpa->flushtlb = 1; |
| 601 | goto repeat; |
| 602 | } |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 603 | |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 604 | return err; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 605 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 607 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
| 608 | |
| 609 | static int cpa_process_alias(struct cpa_data *cpa) |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 610 | { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 611 | struct cpa_data alias_cpa; |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 612 | int ret = 0; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 613 | |
| 614 | if (cpa->pfn > max_pfn_mapped) |
| 615 | return 0; |
| 616 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 617 | /* |
| 618 | * No need to redo, when the primary call touched the direct |
| 619 | * mapping already: |
| 620 | */ |
| 621 | if (!within(cpa->vaddr, PAGE_OFFSET, |
| 622 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 623 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 624 | alias_cpa = *cpa; |
| 625 | alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); |
| 626 | |
| 627 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
| 628 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 629 | |
Arjan van de Ven | 488fd99 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 630 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 631 | if (ret) |
| 632 | return ret; |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 633 | /* |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 634 | * No need to redo, when the primary call touched the high |
| 635 | * mapping already: |
| 636 | */ |
| 637 | if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end)) |
| 638 | return 0; |
| 639 | |
| 640 | /* |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 641 | * If the physical address is inside the kernel map, we need |
| 642 | * to touch the high mapped kernel as well: |
| 643 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 644 | if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) |
| 645 | return 0; |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 646 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 647 | alias_cpa = *cpa; |
| 648 | alias_cpa.vaddr = |
| 649 | (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; |
| 650 | |
| 651 | /* |
| 652 | * The high mapping range is imprecise, so ignore the return value. |
| 653 | */ |
| 654 | __change_page_attr_set_clr(&alias_cpa, 0); |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 655 | #endif |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 656 | return ret; |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 657 | } |
| 658 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 659 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 660 | { |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 661 | int ret, numpages = cpa->numpages; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 662 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 663 | while (numpages) { |
| 664 | /* |
| 665 | * Store the remaining nr of pages for the large page |
| 666 | * preservation check. |
| 667 | */ |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 668 | cpa->numpages = numpages; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 669 | |
| 670 | ret = __change_page_attr(cpa, checkalias); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 671 | if (ret) |
| 672 | return ret; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 673 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 674 | if (checkalias) { |
| 675 | ret = cpa_process_alias(cpa); |
| 676 | if (ret) |
| 677 | return ret; |
| 678 | } |
| 679 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 680 | /* |
| 681 | * Adjust the number of pages with the result of the |
| 682 | * CPA operation. Either a large page has been |
| 683 | * preserved or a single page update happened. |
| 684 | */ |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 685 | BUG_ON(cpa->numpages > numpages); |
| 686 | numpages -= cpa->numpages; |
| 687 | cpa->vaddr += cpa->numpages * PAGE_SIZE; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 688 | } |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 689 | return 0; |
| 690 | } |
| 691 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 692 | static inline int cache_attr(pgprot_t attr) |
| 693 | { |
| 694 | return pgprot_val(attr) & |
| 695 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); |
| 696 | } |
| 697 | |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 698 | static int change_page_attr_set_clr(unsigned long addr, int numpages, |
| 699 | pgprot_t mask_set, pgprot_t mask_clr) |
| 700 | { |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 701 | struct cpa_data cpa; |
Thomas Gleixner | af96e44 | 2008-02-15 21:49:46 +0100 | [diff] [blame] | 702 | int ret, cache, checkalias; |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 703 | |
| 704 | /* |
| 705 | * Check, if we are requested to change a not supported |
| 706 | * feature: |
| 707 | */ |
| 708 | mask_set = canon_pgprot(mask_set); |
| 709 | mask_clr = canon_pgprot(mask_clr); |
| 710 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) |
| 711 | return 0; |
| 712 | |
Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 713 | /* Ensure we are PAGE_SIZE aligned */ |
| 714 | if (addr & ~PAGE_MASK) { |
| 715 | addr &= PAGE_MASK; |
| 716 | /* |
| 717 | * People should not be passing in unaligned addresses: |
| 718 | */ |
| 719 | WARN_ON_ONCE(1); |
| 720 | } |
| 721 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 722 | cpa.vaddr = addr; |
| 723 | cpa.numpages = numpages; |
| 724 | cpa.mask_set = mask_set; |
| 725 | cpa.mask_clr = mask_clr; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 726 | cpa.flushtlb = 0; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 727 | |
Thomas Gleixner | af96e44 | 2008-02-15 21:49:46 +0100 | [diff] [blame] | 728 | /* No alias checking for _NX bit modifications */ |
| 729 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; |
| 730 | |
| 731 | ret = __change_page_attr_set_clr(&cpa, checkalias); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 732 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 733 | /* |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 734 | * Check whether we really changed something: |
| 735 | */ |
| 736 | if (!cpa.flushtlb) |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 737 | goto out; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 738 | |
| 739 | /* |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 740 | * No need to flush, when we did not set any of the caching |
| 741 | * attributes: |
| 742 | */ |
| 743 | cache = cache_attr(mask_set); |
| 744 | |
| 745 | /* |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 746 | * On success we use clflush, when the CPU supports it to |
| 747 | * avoid the wbindv. If the CPU does not support it and in the |
Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 748 | * error case we fall back to cpa_flush_all (which uses |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 749 | * wbindv): |
| 750 | */ |
| 751 | if (!ret && cpu_has_clflush) |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 752 | cpa_flush_range(addr, numpages, cache); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 753 | else |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 754 | cpa_flush_all(cache); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 755 | |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 756 | out: |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 757 | cpa_fill_pool(NULL); |
| 758 | |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 759 | return ret; |
| 760 | } |
| 761 | |
Thomas Gleixner | 5674454 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 762 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
| 763 | pgprot_t mask) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 764 | { |
Thomas Gleixner | 5674454 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 765 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 766 | } |
| 767 | |
Thomas Gleixner | 5674454 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 768 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
| 769 | pgprot_t mask) |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 770 | { |
Huang, Ying | 5827040 | 2008-01-31 22:05:43 +0100 | [diff] [blame] | 771 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 772 | } |
| 773 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame^] | 774 | int _set_memory_uc(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 775 | { |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 776 | return change_page_attr_set(addr, numpages, |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 777 | __pgprot(_PAGE_CACHE_UC)); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 778 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame^] | 779 | |
| 780 | int set_memory_uc(unsigned long addr, int numpages) |
| 781 | { |
| 782 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, |
| 783 | _PAGE_CACHE_UC, NULL)) |
| 784 | return -EINVAL; |
| 785 | |
| 786 | return _set_memory_uc(addr, numpages); |
| 787 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 788 | EXPORT_SYMBOL(set_memory_uc); |
| 789 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame^] | 790 | int _set_memory_wb(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 791 | { |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 792 | return change_page_attr_clear(addr, numpages, |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 793 | __pgprot(_PAGE_CACHE_MASK)); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 794 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame^] | 795 | |
| 796 | int set_memory_wb(unsigned long addr, int numpages) |
| 797 | { |
| 798 | free_memtype(addr, addr + numpages * PAGE_SIZE); |
| 799 | |
| 800 | return _set_memory_wb(addr, numpages); |
| 801 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 802 | EXPORT_SYMBOL(set_memory_wb); |
| 803 | |
| 804 | int set_memory_x(unsigned long addr, int numpages) |
| 805 | { |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 806 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 807 | } |
| 808 | EXPORT_SYMBOL(set_memory_x); |
| 809 | |
| 810 | int set_memory_nx(unsigned long addr, int numpages) |
| 811 | { |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 812 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 813 | } |
| 814 | EXPORT_SYMBOL(set_memory_nx); |
| 815 | |
| 816 | int set_memory_ro(unsigned long addr, int numpages) |
| 817 | { |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 818 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 819 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 820 | |
| 821 | int set_memory_rw(unsigned long addr, int numpages) |
| 822 | { |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 823 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 824 | } |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 825 | |
| 826 | int set_memory_np(unsigned long addr, int numpages) |
| 827 | { |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 828 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 829 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 830 | |
| 831 | int set_pages_uc(struct page *page, int numpages) |
| 832 | { |
| 833 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 834 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 835 | return set_memory_uc(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 836 | } |
| 837 | EXPORT_SYMBOL(set_pages_uc); |
| 838 | |
| 839 | int set_pages_wb(struct page *page, int numpages) |
| 840 | { |
| 841 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 842 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 843 | return set_memory_wb(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 844 | } |
| 845 | EXPORT_SYMBOL(set_pages_wb); |
| 846 | |
| 847 | int set_pages_x(struct page *page, int numpages) |
| 848 | { |
| 849 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 850 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 851 | return set_memory_x(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 852 | } |
| 853 | EXPORT_SYMBOL(set_pages_x); |
| 854 | |
| 855 | int set_pages_nx(struct page *page, int numpages) |
| 856 | { |
| 857 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 858 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 859 | return set_memory_nx(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 860 | } |
| 861 | EXPORT_SYMBOL(set_pages_nx); |
| 862 | |
| 863 | int set_pages_ro(struct page *page, int numpages) |
| 864 | { |
| 865 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 866 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 867 | return set_memory_ro(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 868 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 869 | |
| 870 | int set_pages_rw(struct page *page, int numpages) |
| 871 | { |
| 872 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 873 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 874 | return set_memory_rw(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 875 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 876 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | #ifdef CONFIG_DEBUG_PAGEALLOC |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 878 | |
| 879 | static int __set_pages_p(struct page *page, int numpages) |
| 880 | { |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 881 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
| 882 | .numpages = numpages, |
| 883 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
| 884 | .mask_clr = __pgprot(0)}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 885 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 886 | return __change_page_attr_set_clr(&cpa, 1); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 887 | } |
| 888 | |
| 889 | static int __set_pages_np(struct page *page, int numpages) |
| 890 | { |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 891 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
| 892 | .numpages = numpages, |
| 893 | .mask_set = __pgprot(0), |
| 894 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 895 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 896 | return __change_page_attr_set_clr(&cpa, 1); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 897 | } |
| 898 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | void kernel_map_pages(struct page *page, int numpages, int enable) |
| 900 | { |
| 901 | if (PageHighMem(page)) |
| 902 | return; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 903 | if (!enable) { |
Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 904 | debug_check_no_locks_freed(page_address(page), |
| 905 | numpages * PAGE_SIZE); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 906 | } |
Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 907 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 908 | /* |
Ingo Molnar | 12d6f21 | 2008-01-30 13:33:58 +0100 | [diff] [blame] | 909 | * If page allocator is not up yet then do not call c_p_a(): |
| 910 | */ |
| 911 | if (!debug_pagealloc_enabled) |
| 912 | return; |
| 913 | |
| 914 | /* |
Ingo Molnar | f8d8406 | 2008-02-13 14:09:53 +0100 | [diff] [blame] | 915 | * The return value is ignored as the calls cannot fail. |
| 916 | * Large pages are kept enabled at boot time, and are |
| 917 | * split up quickly with DEBUG_PAGEALLOC. If a splitup |
| 918 | * fails here (due to temporary memory shortage) no damage |
| 919 | * is done because we just keep the largepage intact up |
| 920 | * to the next attempt when it will likely be split up: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | */ |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 922 | if (enable) |
| 923 | __set_pages_p(page, numpages); |
| 924 | else |
| 925 | __set_pages_np(page, numpages); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 926 | |
| 927 | /* |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 928 | * We should perform an IPI and flush all tlbs, |
| 929 | * but that can deadlock->flush only current cpu: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | */ |
| 931 | __flush_tlb_all(); |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 932 | |
| 933 | /* |
| 934 | * Try to refill the page pool here. We can do this only after |
| 935 | * the tlb flush. |
| 936 | */ |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 937 | cpa_fill_pool(NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | } |
Rafael J. Wysocki | 8a235ef | 2008-02-20 01:47:44 +0100 | [diff] [blame] | 939 | |
Thomas Gleixner | ee7ae7a | 2008-04-17 17:40:45 +0200 | [diff] [blame] | 940 | #ifdef CONFIG_DEBUG_FS |
| 941 | static int dpa_show(struct seq_file *m, void *v) |
| 942 | { |
| 943 | seq_puts(m, "DEBUG_PAGEALLOC\n"); |
| 944 | seq_printf(m, "pool_size : %lu\n", pool_size); |
| 945 | seq_printf(m, "pool_pages : %lu\n", pool_pages); |
| 946 | seq_printf(m, "pool_low : %lu\n", pool_low); |
| 947 | seq_printf(m, "pool_used : %lu\n", pool_used); |
| 948 | seq_printf(m, "pool_failed : %lu\n", pool_failed); |
| 949 | |
| 950 | return 0; |
| 951 | } |
| 952 | |
| 953 | static int dpa_open(struct inode *inode, struct file *filp) |
| 954 | { |
| 955 | return single_open(filp, dpa_show, NULL); |
| 956 | } |
| 957 | |
| 958 | static const struct file_operations dpa_fops = { |
| 959 | .open = dpa_open, |
| 960 | .read = seq_read, |
| 961 | .llseek = seq_lseek, |
| 962 | .release = single_release, |
| 963 | }; |
| 964 | |
| 965 | int __init debug_pagealloc_proc_init(void) |
| 966 | { |
| 967 | struct dentry *de; |
| 968 | |
| 969 | de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL, |
| 970 | &dpa_fops); |
| 971 | if (!de) |
| 972 | return -ENOMEM; |
| 973 | |
| 974 | return 0; |
| 975 | } |
| 976 | __initcall(debug_pagealloc_proc_init); |
| 977 | #endif |
| 978 | |
Rafael J. Wysocki | 8a235ef | 2008-02-20 01:47:44 +0100 | [diff] [blame] | 979 | #ifdef CONFIG_HIBERNATION |
| 980 | |
| 981 | bool kernel_page_present(struct page *page) |
| 982 | { |
| 983 | unsigned int level; |
| 984 | pte_t *pte; |
| 985 | |
| 986 | if (PageHighMem(page)) |
| 987 | return false; |
| 988 | |
| 989 | pte = lookup_address((unsigned long)page_address(page), &level); |
| 990 | return (pte_val(*pte) & _PAGE_PRESENT); |
| 991 | } |
| 992 | |
| 993 | #endif /* CONFIG_HIBERNATION */ |
| 994 | |
| 995 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
Arjan van de Ven | d1028a1 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 996 | |
| 997 | /* |
| 998 | * The testcases use internal knowledge of the implementation that shouldn't |
| 999 | * be exposed to the rest of the kernel. Include these directly here. |
| 1000 | */ |
| 1001 | #ifdef CONFIG_CPA_DEBUG |
| 1002 | #include "pageattr-test.c" |
| 1003 | #endif |