Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Copyright (C) 1995 Linus Torvalds |
| 4 | * |
| 5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
| 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/module.h> |
| 9 | #include <linux/signal.h> |
| 10 | #include <linux/sched.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/string.h> |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/ptrace.h> |
| 16 | #include <linux/mman.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/hugetlb.h> |
| 19 | #include <linux/swap.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/highmem.h> |
| 23 | #include <linux/pagemap.h> |
Jeremy Fitzhardinge | cfb80c9 | 2008-12-16 12:17:36 -0800 | [diff] [blame] | 24 | #include <linux/pci.h> |
Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 25 | #include <linux/pfn.h> |
Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 26 | #include <linux/poison.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/bootmem.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/proc_fs.h> |
Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 30 | #include <linux/memory_hotplug.h> |
Adrian Bunk | 27d99f7 | 2005-11-13 16:06:51 -0800 | [diff] [blame] | 31 | #include <linux/initrd.h> |
Shaohua Li | 55b2355 | 2006-06-23 02:04:49 -0700 | [diff] [blame] | 32 | #include <linux/cpumask.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
H. Peter Anvin | f832ff1 | 2008-02-04 16:47:58 +0100 | [diff] [blame] | 34 | #include <asm/asm.h> |
Ingo Molnar | 46eaa67 | 2008-10-12 15:06:29 +0200 | [diff] [blame] | 35 | #include <asm/bios_ebda.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/processor.h> |
| 37 | #include <asm/system.h> |
| 38 | #include <asm/uaccess.h> |
| 39 | #include <asm/pgtable.h> |
| 40 | #include <asm/dma.h> |
| 41 | #include <asm/fixmap.h> |
| 42 | #include <asm/e820.h> |
| 43 | #include <asm/apic.h> |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 44 | #include <asm/bugs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <asm/tlb.h> |
| 46 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | a5a19c6 | 2008-01-30 13:33:39 +0100 | [diff] [blame] | 47 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <asm/sections.h> |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 49 | #include <asm/paravirt.h> |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 50 | #include <asm/setup.h> |
Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 51 | #include <asm/cacheflush.h> |
Jaswinder Singh | a80495e | 2008-07-23 17:33:57 +0530 | [diff] [blame] | 52 | #include <asm/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
| 54 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
| 55 | |
Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 56 | unsigned long max_low_pfn_mapped; |
Thomas Gleixner | 6779429 | 2008-03-21 21:27:10 +0100 | [diff] [blame] | 57 | unsigned long max_pfn_mapped; |
Andi Kleen | 7d1116a | 2008-03-12 03:53:27 +0100 | [diff] [blame] | 58 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 60 | unsigned long highstart_pfn, highend_pfn; |
| 61 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 62 | static noinline int do_test_wp_bit(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 64 | |
| 65 | static unsigned long __initdata table_start; |
| 66 | static unsigned long __meminitdata table_end; |
| 67 | static unsigned long __meminitdata table_top; |
| 68 | |
| 69 | static int __initdata after_init_bootmem; |
| 70 | |
Jan Beulich | d6be89a | 2008-12-16 11:42:45 +0000 | [diff] [blame] | 71 | static __init void *alloc_low_page(void) |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 72 | { |
| 73 | unsigned long pfn = table_end++; |
| 74 | void *adr; |
| 75 | |
| 76 | if (pfn >= table_top) |
| 77 | panic("alloc_low_page: ran out of memory"); |
| 78 | |
| 79 | adr = __va(pfn * PAGE_SIZE); |
| 80 | memset(adr, 0, PAGE_SIZE); |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 81 | return adr; |
| 82 | } |
| 83 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | /* |
| 85 | * Creates a middle page table and puts a pointer to it in the |
| 86 | * given global directory entry. This only returns the gd entry |
| 87 | * in non-PAE compilation mode, since the middle layer is folded. |
| 88 | */ |
| 89 | static pmd_t * __init one_md_table_init(pgd_t *pgd) |
| 90 | { |
| 91 | pud_t *pud; |
| 92 | pmd_t *pmd_table; |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 93 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_X86_PAE |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 95 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 96 | if (after_init_bootmem) |
| 97 | pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
| 98 | else |
Jan Beulich | d6be89a | 2008-12-16 11:42:45 +0000 | [diff] [blame] | 99 | pmd_table = (pmd_t *)alloc_low_page(); |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 100 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 101 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
| 102 | pud = pud_offset(pgd, 0); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 103 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
Zhaolei | a376f30 | 2008-10-31 17:43:04 +0800 | [diff] [blame] | 104 | |
| 105 | return pmd_table; |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 106 | } |
| 107 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | pud = pud_offset(pgd, 0); |
| 109 | pmd_table = pmd_offset(pud, 0); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 110 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | return pmd_table; |
| 112 | } |
| 113 | |
| 114 | /* |
| 115 | * Create a page table and place a pointer to it in a middle page |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 116 | * directory entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | */ |
| 118 | static pte_t * __init one_page_table_init(pmd_t *pmd) |
| 119 | { |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 120 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
Ingo Molnar | 509a80c | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 121 | pte_t *page_table = NULL; |
| 122 | |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 123 | if (after_init_bootmem) { |
Ingo Molnar | 509a80c | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 124 | #ifdef CONFIG_DEBUG_PAGEALLOC |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 125 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); |
Ingo Molnar | 509a80c | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 126 | #endif |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 127 | if (!page_table) |
| 128 | page_table = |
Ingo Molnar | 509a80c | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 129 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
Jan Beulich | d6be89a | 2008-12-16 11:42:45 +0000 | [diff] [blame] | 130 | } else |
| 131 | page_table = (pte_t *)alloc_low_page(); |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 132 | |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 133 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 135 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | } |
Ingo Molnar | 509a80c | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 137 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | return pte_offset_kernel(pmd, 0); |
| 139 | } |
| 140 | |
Jan Beulich | a3c6018 | 2009-01-16 11:59:33 +0000 | [diff] [blame] | 141 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, |
| 142 | unsigned long vaddr, pte_t *lastpte) |
| 143 | { |
| 144 | #ifdef CONFIG_HIGHMEM |
| 145 | /* |
| 146 | * Something (early fixmap) may already have put a pte |
| 147 | * page here, which causes the page table allocation |
| 148 | * to become nonlinear. Attempt to fix it, and if it |
| 149 | * is still nonlinear then we have to bug. |
| 150 | */ |
| 151 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; |
| 152 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; |
| 153 | |
| 154 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end |
| 155 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin |
| 156 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end |
| 157 | && ((__pa(pte) >> PAGE_SHIFT) < table_start |
| 158 | || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { |
| 159 | pte_t *newpte; |
| 160 | int i; |
| 161 | |
| 162 | BUG_ON(after_init_bootmem); |
| 163 | newpte = alloc_low_page(); |
| 164 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 165 | set_pte(newpte + i, pte[i]); |
| 166 | |
| 167 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); |
| 168 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); |
| 169 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); |
| 170 | __flush_tlb_all(); |
| 171 | |
| 172 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); |
| 173 | pte = newpte; |
| 174 | } |
| 175 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) |
| 176 | && vaddr > fix_to_virt(FIX_KMAP_END) |
| 177 | && lastpte && lastpte + PTRS_PER_PTE != pte); |
| 178 | #endif |
| 179 | return pte; |
| 180 | } |
| 181 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | /* |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 183 | * This function initializes a certain range of kernel virtual memory |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | * with new bootmem page tables, everywhere page tables are missing in |
| 185 | * the given range. |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 186 | * |
| 187 | * NOTE: The pagetables are allocated contiguous on the physical space |
| 188 | * so we can cache the place of the first one and move around without |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | * checking the pgd every time. |
| 190 | */ |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 191 | static void __init |
| 192 | page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | int pgd_idx, pmd_idx; |
| 195 | unsigned long vaddr; |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 196 | pgd_t *pgd; |
| 197 | pmd_t *pmd; |
Jan Beulich | a3c6018 | 2009-01-16 11:59:33 +0000 | [diff] [blame] | 198 | pte_t *pte = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
| 200 | vaddr = start; |
| 201 | pgd_idx = pgd_index(vaddr); |
| 202 | pmd_idx = pmd_index(vaddr); |
| 203 | pgd = pgd_base + pgd_idx; |
| 204 | |
| 205 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 206 | pmd = one_md_table_init(pgd); |
| 207 | pmd = pmd + pmd_index(vaddr); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 208 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
| 209 | pmd++, pmd_idx++) { |
Jan Beulich | a3c6018 | 2009-01-16 11:59:33 +0000 | [diff] [blame] | 210 | pte = page_table_kmap_check(one_page_table_init(pmd), |
| 211 | pmd, vaddr, pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
| 213 | vaddr += PMD_SIZE; |
| 214 | } |
| 215 | pmd_idx = 0; |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | static inline int is_kernel_text(unsigned long addr) |
| 220 | { |
| 221 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) |
| 222 | return 1; |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | /* |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 227 | * This maps the physical memory to kernel virtual address space, a total |
| 228 | * of max_low_pfn pages, by creating page tables starting from address |
| 229 | * PAGE_OFFSET: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | */ |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 231 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base, |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 232 | unsigned long start_pfn, |
| 233 | unsigned long end_pfn, |
| 234 | int use_pse) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | { |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 236 | int pgd_idx, pmd_idx, pte_ofs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | unsigned long pfn; |
| 238 | pgd_t *pgd; |
| 239 | pmd_t *pmd; |
| 240 | pte_t *pte; |
Suresh Siddha | a2699e4 | 2008-09-23 14:00:38 -0700 | [diff] [blame] | 241 | unsigned pages_2m, pages_4k; |
| 242 | int mapping_iter; |
| 243 | |
| 244 | /* |
| 245 | * First iteration will setup identity mapping using large/small pages |
| 246 | * based on use_pse, with other attributes same as set by |
| 247 | * the early code in head_32.S |
| 248 | * |
| 249 | * Second iteration will setup the appropriate attributes (NX, GLOBAL..) |
| 250 | * as desired for the kernel identity mapping. |
| 251 | * |
| 252 | * This two pass mechanism conforms to the TLB app note which says: |
| 253 | * |
| 254 | * "Software should not write to a paging-structure entry in a way |
| 255 | * that would change, for any linear address, both the page size |
| 256 | * and either the page frame or attributes." |
| 257 | */ |
| 258 | mapping_iter = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 260 | if (!cpu_has_pse) |
| 261 | use_pse = 0; |
| 262 | |
Suresh Siddha | a2699e4 | 2008-09-23 14:00:38 -0700 | [diff] [blame] | 263 | repeat: |
| 264 | pages_2m = pages_4k = 0; |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 265 | pfn = start_pfn; |
| 266 | pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | pgd = pgd_base + pgd_idx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { |
| 269 | pmd = one_md_table_init(pgd); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 270 | |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 271 | if (pfn >= end_pfn) |
| 272 | continue; |
| 273 | #ifdef CONFIG_X86_PAE |
| 274 | pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); |
| 275 | pmd += pmd_idx; |
| 276 | #else |
| 277 | pmd_idx = 0; |
| 278 | #endif |
| 279 | for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; |
Jeremy Fitzhardinge | f3f20de | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 280 | pmd++, pmd_idx++) { |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 281 | unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 283 | /* |
| 284 | * Map with big pages if possible, otherwise |
| 285 | * create normal page tables: |
| 286 | */ |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 287 | if (use_pse) { |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 288 | unsigned int addr2; |
Jeremy Fitzhardinge | f3f20de | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 289 | pgprot_t prot = PAGE_KERNEL_LARGE; |
Suresh Siddha | a2699e4 | 2008-09-23 14:00:38 -0700 | [diff] [blame] | 290 | /* |
| 291 | * first pass will use the same initial |
| 292 | * identity mapping attribute + _PAGE_PSE. |
| 293 | */ |
| 294 | pgprot_t init_prot = |
| 295 | __pgprot(PTE_IDENT_ATTR | |
| 296 | _PAGE_PSE); |
Jeremy Fitzhardinge | f3f20de | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 297 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 298 | addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + |
Jeremy Fitzhardinge | f3f20de | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 299 | PAGE_OFFSET + PAGE_SIZE-1; |
| 300 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 301 | if (is_kernel_text(addr) || |
| 302 | is_kernel_text(addr2)) |
Jeremy Fitzhardinge | f3f20de | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 303 | prot = PAGE_KERNEL_LARGE_EXEC; |
| 304 | |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 305 | pages_2m++; |
Suresh Siddha | a2699e4 | 2008-09-23 14:00:38 -0700 | [diff] [blame] | 306 | if (mapping_iter == 1) |
| 307 | set_pmd(pmd, pfn_pmd(pfn, init_prot)); |
| 308 | else |
| 309 | set_pmd(pmd, pfn_pmd(pfn, prot)); |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 310 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | pfn += PTRS_PER_PTE; |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 312 | continue; |
| 313 | } |
| 314 | pte = one_page_table_init(pmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 316 | pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); |
| 317 | pte += pte_ofs; |
| 318 | for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 319 | pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { |
| 320 | pgprot_t prot = PAGE_KERNEL; |
Suresh Siddha | a2699e4 | 2008-09-23 14:00:38 -0700 | [diff] [blame] | 321 | /* |
| 322 | * first pass will use the same initial |
| 323 | * identity mapping attribute. |
| 324 | */ |
| 325 | pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); |
Jeremy Fitzhardinge | f3f20de | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 326 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 327 | if (is_kernel_text(addr)) |
| 328 | prot = PAGE_KERNEL_EXEC; |
Jeremy Fitzhardinge | f3f20de | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 329 | |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 330 | pages_4k++; |
Suresh Siddha | a2699e4 | 2008-09-23 14:00:38 -0700 | [diff] [blame] | 331 | if (mapping_iter == 1) |
| 332 | set_pte(pte, pfn_pte(pfn, init_prot)); |
| 333 | else |
| 334 | set_pte(pte, pfn_pte(pfn, prot)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | } |
| 336 | } |
| 337 | } |
Suresh Siddha | a2699e4 | 2008-09-23 14:00:38 -0700 | [diff] [blame] | 338 | if (mapping_iter == 1) { |
| 339 | /* |
| 340 | * update direct mapping page count only in the first |
| 341 | * iteration. |
| 342 | */ |
| 343 | update_page_count(PG_LEVEL_2M, pages_2m); |
| 344 | update_page_count(PG_LEVEL_4K, pages_4k); |
| 345 | |
| 346 | /* |
| 347 | * local global flush tlb, which will flush the previous |
| 348 | * mappings present in both small and large page TLB's. |
| 349 | */ |
| 350 | __flush_tlb_all(); |
| 351 | |
| 352 | /* |
| 353 | * Second iteration will set the actual desired PTE attributes. |
| 354 | */ |
| 355 | mapping_iter = 2; |
| 356 | goto repeat; |
| 357 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } |
| 359 | |
Arjan van de Ven | ae531c2 | 2008-04-24 23:40:47 +0200 | [diff] [blame] | 360 | /* |
| 361 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address |
| 362 | * is valid. The argument is a physical page number. |
| 363 | * |
| 364 | * |
| 365 | * On x86, access has to be given to the first megabyte of ram because that area |
| 366 | * contains bios code and data regions used by X and dosemu and similar apps. |
| 367 | * Access has to be given to non-kernel-ram areas as well, these contain the PCI |
| 368 | * mmio resources as well as potential bios/acpi data regions. |
| 369 | */ |
| 370 | int devmem_is_allowed(unsigned long pagenr) |
| 371 | { |
| 372 | if (pagenr <= 256) |
| 373 | return 1; |
Arjan van de Ven | e8de148 | 2008-10-22 19:55:31 -0700 | [diff] [blame] | 374 | if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) |
| 375 | return 0; |
Arjan van de Ven | ae531c2 | 2008-04-24 23:40:47 +0200 | [diff] [blame] | 376 | if (!page_is_ram(pagenr)) |
| 377 | return 1; |
| 378 | return 0; |
| 379 | } |
| 380 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | pte_t *kmap_pte; |
| 382 | pgprot_t kmap_prot; |
| 383 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 384 | static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) |
| 385 | { |
| 386 | return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), |
| 387 | vaddr), vaddr), vaddr); |
| 388 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | |
| 390 | static void __init kmap_init(void) |
| 391 | { |
| 392 | unsigned long kmap_vstart; |
| 393 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 394 | /* |
| 395 | * Cache the first kmap pte: |
| 396 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
| 398 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); |
| 399 | |
| 400 | kmap_prot = PAGE_KERNEL; |
| 401 | } |
| 402 | |
Keith Packard | fd94093 | 2008-10-30 19:37:09 -0700 | [diff] [blame] | 403 | #ifdef CONFIG_HIGHMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | static void __init permanent_kmaps_init(pgd_t *pgd_base) |
| 405 | { |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 406 | unsigned long vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | pgd_t *pgd; |
| 408 | pud_t *pud; |
| 409 | pmd_t *pmd; |
| 410 | pte_t *pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | |
| 412 | vaddr = PKMAP_BASE; |
| 413 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); |
| 414 | |
| 415 | pgd = swapper_pg_dir + pgd_index(vaddr); |
| 416 | pud = pud_offset(pgd, vaddr); |
| 417 | pmd = pmd_offset(pud, vaddr); |
| 418 | pte = pte_offset_kernel(pmd, vaddr); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 419 | pkmap_page_table = pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | } |
| 421 | |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 422 | static void __init add_one_highpage_init(struct page *page, int pfn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | { |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 424 | ClearPageReserved(page); |
| 425 | init_page_count(page); |
| 426 | __free_page(page); |
| 427 | totalhigh_pages++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | } |
| 429 | |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 430 | struct add_highpages_data { |
| 431 | unsigned long start_pfn; |
| 432 | unsigned long end_pfn; |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 433 | }; |
| 434 | |
Yinghai Lu | d52d53b | 2008-06-16 20:10:55 -0700 | [diff] [blame] | 435 | static int __init add_highpages_work_fn(unsigned long start_pfn, |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 436 | unsigned long end_pfn, void *datax) |
| 437 | { |
| 438 | int node_pfn; |
| 439 | struct page *page; |
| 440 | unsigned long final_start_pfn, final_end_pfn; |
| 441 | struct add_highpages_data *data; |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 442 | |
| 443 | data = (struct add_highpages_data *)datax; |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 444 | |
| 445 | final_start_pfn = max(start_pfn, data->start_pfn); |
| 446 | final_end_pfn = min(end_pfn, data->end_pfn); |
| 447 | if (final_start_pfn >= final_end_pfn) |
Yinghai Lu | d52d53b | 2008-06-16 20:10:55 -0700 | [diff] [blame] | 448 | return 0; |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 449 | |
| 450 | for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; |
| 451 | node_pfn++) { |
| 452 | if (!pfn_valid(node_pfn)) |
| 453 | continue; |
| 454 | page = pfn_to_page(node_pfn); |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 455 | add_one_highpage_init(page, node_pfn); |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 456 | } |
| 457 | |
Yinghai Lu | d52d53b | 2008-06-16 20:10:55 -0700 | [diff] [blame] | 458 | return 0; |
| 459 | |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 460 | } |
| 461 | |
| 462 | void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 463 | unsigned long end_pfn) |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 464 | { |
| 465 | struct add_highpages_data data; |
| 466 | |
| 467 | data.start_pfn = start_pfn; |
| 468 | data.end_pfn = end_pfn; |
Yinghai Lu | b5bc6c0 | 2008-06-14 18:32:52 -0700 | [diff] [blame] | 469 | |
| 470 | work_with_active_regions(nid, add_highpages_work_fn, &data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | } |
| 472 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 473 | #ifndef CONFIG_NUMA |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 474 | static void __init set_highmem_pages_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | { |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 476 | add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 477 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | totalram_pages += totalhigh_pages; |
| 479 | } |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 480 | #endif /* !CONFIG_NUMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | |
| 482 | #else |
Ingo Brueckl | e8e3232 | 2009-01-02 14:42:00 +0100 | [diff] [blame] | 483 | static inline void permanent_kmaps_init(pgd_t *pgd_base) |
| 484 | { |
| 485 | } |
| 486 | static inline void set_highmem_pages_init(void) |
| 487 | { |
| 488 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | #endif /* CONFIG_HIGHMEM */ |
| 490 | |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 491 | void __init native_pagetable_setup_start(pgd_t *base) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | { |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 493 | unsigned long pfn, va; |
| 494 | pgd_t *pgd; |
| 495 | pud_t *pud; |
| 496 | pmd_t *pmd; |
| 497 | pte_t *pte; |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 498 | |
| 499 | /* |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 500 | * Remove any mappings which extend past the end of physical |
| 501 | * memory from the boot time page table: |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 502 | */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 503 | for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { |
| 504 | va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); |
| 505 | pgd = base + pgd_index(va); |
| 506 | if (!pgd_present(*pgd)) |
| 507 | break; |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 508 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 509 | pud = pud_offset(pgd, va); |
| 510 | pmd = pmd_offset(pud, va); |
| 511 | if (!pmd_present(*pmd)) |
| 512 | break; |
| 513 | |
| 514 | pte = pte_offset_kernel(pmd, va); |
| 515 | if (!pte_present(*pte)) |
| 516 | break; |
| 517 | |
| 518 | pte_clear(NULL, va, pte); |
| 519 | } |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 520 | paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | void __init native_pagetable_setup_done(pgd_t *base) |
| 524 | { |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 525 | } |
| 526 | |
| 527 | /* |
| 528 | * Build a proper pagetable for the kernel mappings. Up until this |
| 529 | * point, we've been running on some set of pagetables constructed by |
| 530 | * the boot process. |
| 531 | * |
| 532 | * If we're booting on native hardware, this will be a pagetable |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 533 | * constructed in arch/x86/kernel/head_32.S. The root of the |
| 534 | * pagetable will be swapper_pg_dir. |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 535 | * |
| 536 | * If we're booting paravirtualized under a hypervisor, then there are |
| 537 | * more options: we may already be running PAE, and the pagetable may |
| 538 | * or may not be based in swapper_pg_dir. In any case, |
| 539 | * paravirt_pagetable_setup_start() will set up swapper_pg_dir |
| 540 | * appropriately for the rest of the initialization to work. |
| 541 | * |
| 542 | * In general, pagetable_init() assumes that the pagetable may already |
| 543 | * be partially populated, and so it avoids stomping on any existing |
| 544 | * mappings. |
| 545 | */ |
Yinghai Lu | e7b3789 | 2008-06-25 21:51:28 -0700 | [diff] [blame] | 546 | static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 547 | { |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 548 | unsigned long vaddr, end; |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 549 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | /* |
| 551 | * Fixed mappings, only the page table structure has to be |
| 552 | * created - mappings will be set by set_fixmap(): |
| 553 | */ |
| 554 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 555 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
| 556 | page_table_range_init(vaddr, end, pgd_base); |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 557 | early_ioremap_reset(); |
Yinghai Lu | e7b3789 | 2008-06-25 21:51:28 -0700 | [diff] [blame] | 558 | } |
| 559 | |
| 560 | static void __init pagetable_init(void) |
| 561 | { |
| 562 | pgd_t *pgd_base = swapper_pg_dir; |
| 563 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | permanent_kmaps_init(pgd_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | } |
| 566 | |
Rafael J. Wysocki | a6eb84b | 2008-02-01 15:28:16 +0100 | [diff] [blame] | 567 | #ifdef CONFIG_ACPI_SLEEP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | /* |
Rafael J. Wysocki | a6eb84b | 2008-02-01 15:28:16 +0100 | [diff] [blame] | 569 | * ACPI suspend needs this for resume, because things like the intel-agp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | * driver might have split up a kernel 4MB mapping. |
| 571 | */ |
Rafael J. Wysocki | a6eb84b | 2008-02-01 15:28:16 +0100 | [diff] [blame] | 572 | char swsusp_pg_dir[PAGE_SIZE] |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 573 | __attribute__ ((aligned(PAGE_SIZE))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | |
| 575 | static inline void save_pg_dir(void) |
| 576 | { |
| 577 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); |
| 578 | } |
Rafael J. Wysocki | a6eb84b | 2008-02-01 15:28:16 +0100 | [diff] [blame] | 579 | #else /* !CONFIG_ACPI_SLEEP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | static inline void save_pg_dir(void) |
| 581 | { |
| 582 | } |
Rafael J. Wysocki | a6eb84b | 2008-02-01 15:28:16 +0100 | [diff] [blame] | 583 | #endif /* !CONFIG_ACPI_SLEEP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 585 | void zap_low_mappings(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | { |
| 587 | int i; |
| 588 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | /* |
| 590 | * Zap initial low-memory mappings. |
| 591 | * |
| 592 | * Note that "pgd_clear()" doesn't do it for |
| 593 | * us, because pgd_clear() is a no-op on i386. |
| 594 | */ |
Jeremy Fitzhardinge | 68db065 | 2008-03-17 16:37:13 -0700 | [diff] [blame] | 595 | for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | #ifdef CONFIG_X86_PAE |
| 597 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); |
| 598 | #else |
| 599 | set_pgd(swapper_pg_dir+i, __pgd(0)); |
| 600 | #endif |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 601 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | flush_tlb_all(); |
| 603 | } |
| 604 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 605 | int nx_enabled; |
Jan Beulich | d5321ab | 2007-07-21 17:10:26 +0200 | [diff] [blame] | 606 | |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 607 | pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); |
Jeremy Fitzhardinge | 6fdc05d | 2008-01-30 13:32:57 +0100 | [diff] [blame] | 608 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
| 609 | |
Jan Beulich | d5321ab | 2007-07-21 17:10:26 +0200 | [diff] [blame] | 610 | #ifdef CONFIG_X86_PAE |
| 611 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 612 | static int disable_nx __initdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | |
| 614 | /* |
| 615 | * noexec = on|off |
| 616 | * |
| 617 | * Control non executable mappings. |
| 618 | * |
| 619 | * on Enable |
| 620 | * off Disable |
| 621 | */ |
Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 622 | static int __init noexec_setup(char *str) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | { |
Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 624 | if (!str || !strcmp(str, "on")) { |
| 625 | if (cpu_has_nx) { |
| 626 | __supported_pte_mask |= _PAGE_NX; |
| 627 | disable_nx = 0; |
| 628 | } |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 629 | } else { |
| 630 | if (!strcmp(str, "off")) { |
| 631 | disable_nx = 1; |
| 632 | __supported_pte_mask &= ~_PAGE_NX; |
| 633 | } else { |
| 634 | return -EINVAL; |
| 635 | } |
| 636 | } |
Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 637 | |
| 638 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | } |
Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 640 | early_param("noexec", noexec_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | static void __init set_nx(void) |
| 643 | { |
| 644 | unsigned int v[4], l, h; |
| 645 | |
| 646 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { |
| 647 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 648 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | if ((v[3] & (1 << 20)) && !disable_nx) { |
| 650 | rdmsr(MSR_EFER, l, h); |
| 651 | l |= EFER_NX; |
| 652 | wrmsr(MSR_EFER, l, h); |
| 653 | nx_enabled = 1; |
| 654 | __supported_pte_mask |= _PAGE_NX; |
| 655 | } |
| 656 | } |
| 657 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | #endif |
| 659 | |
Yinghai Lu | 90d967e | 2008-06-23 21:00:45 +0200 | [diff] [blame] | 660 | /* user-defined highmem size */ |
| 661 | static unsigned int highmem_pages = -1; |
| 662 | |
| 663 | /* |
| 664 | * highmem=size forces highmem to be exactly 'size' bytes. |
| 665 | * This works even on boxes that have no highmem otherwise. |
| 666 | * This also works to reduce highmem size on bigger boxes. |
| 667 | */ |
| 668 | static int __init parse_highmem(char *arg) |
| 669 | { |
| 670 | if (!arg) |
| 671 | return -EINVAL; |
| 672 | |
| 673 | highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; |
| 674 | return 0; |
| 675 | } |
| 676 | early_param("highmem", parse_highmem); |
| 677 | |
| 678 | /* |
| 679 | * Determine low and high memory ranges: |
| 680 | */ |
Yinghai Lu | 2ec65f8 | 2008-06-23 03:05:30 -0700 | [diff] [blame] | 681 | void __init find_low_pfn_range(void) |
Yinghai Lu | 90d967e | 2008-06-23 21:00:45 +0200 | [diff] [blame] | 682 | { |
Yinghai Lu | 2ec65f8 | 2008-06-23 03:05:30 -0700 | [diff] [blame] | 683 | /* it could update max_pfn */ |
| 684 | |
Yinghai Lu | 346cafe | 2008-06-23 03:06:14 -0700 | [diff] [blame] | 685 | /* max_low_pfn is 0, we already have early_res support */ |
Yinghai Lu | 90d967e | 2008-06-23 21:00:45 +0200 | [diff] [blame] | 686 | |
| 687 | max_low_pfn = max_pfn; |
| 688 | if (max_low_pfn > MAXMEM_PFN) { |
| 689 | if (highmem_pages == -1) |
| 690 | highmem_pages = max_pfn - MAXMEM_PFN; |
| 691 | if (highmem_pages + MAXMEM_PFN < max_pfn) |
| 692 | max_pfn = MAXMEM_PFN + highmem_pages; |
| 693 | if (highmem_pages + MAXMEM_PFN > max_pfn) { |
| 694 | printk(KERN_WARNING "only %luMB highmem pages " |
| 695 | "available, ignoring highmem size of %uMB.\n", |
| 696 | pages_to_mb(max_pfn - MAXMEM_PFN), |
| 697 | pages_to_mb(highmem_pages)); |
| 698 | highmem_pages = 0; |
| 699 | } |
| 700 | max_low_pfn = MAXMEM_PFN; |
| 701 | #ifndef CONFIG_HIGHMEM |
| 702 | /* Maximum memory usable is what is directly addressable */ |
| 703 | printk(KERN_WARNING "Warning only %ldMB will be used.\n", |
| 704 | MAXMEM>>20); |
| 705 | if (max_pfn > MAX_NONPAE_PFN) |
| 706 | printk(KERN_WARNING |
| 707 | "Use a HIGHMEM64G enabled kernel.\n"); |
| 708 | else |
| 709 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); |
| 710 | max_pfn = MAXMEM_PFN; |
| 711 | #else /* !CONFIG_HIGHMEM */ |
| 712 | #ifndef CONFIG_HIGHMEM64G |
| 713 | if (max_pfn > MAX_NONPAE_PFN) { |
| 714 | max_pfn = MAX_NONPAE_PFN; |
| 715 | printk(KERN_WARNING "Warning only 4GB will be used." |
| 716 | "Use a HIGHMEM64G enabled kernel.\n"); |
| 717 | } |
| 718 | #endif /* !CONFIG_HIGHMEM64G */ |
| 719 | #endif /* !CONFIG_HIGHMEM */ |
| 720 | } else { |
| 721 | if (highmem_pages == -1) |
| 722 | highmem_pages = 0; |
| 723 | #ifdef CONFIG_HIGHMEM |
| 724 | if (highmem_pages >= max_pfn) { |
| 725 | printk(KERN_ERR "highmem size specified (%uMB) is " |
| 726 | "bigger than pages available (%luMB)!.\n", |
| 727 | pages_to_mb(highmem_pages), |
| 728 | pages_to_mb(max_pfn)); |
| 729 | highmem_pages = 0; |
| 730 | } |
| 731 | if (highmem_pages) { |
| 732 | if (max_low_pfn - highmem_pages < |
| 733 | 64*1024*1024/PAGE_SIZE){ |
| 734 | printk(KERN_ERR "highmem size %uMB results in " |
| 735 | "smaller than 64MB lowmem, ignoring it.\n" |
| 736 | , pages_to_mb(highmem_pages)); |
| 737 | highmem_pages = 0; |
| 738 | } |
| 739 | max_low_pfn -= highmem_pages; |
| 740 | } |
| 741 | #else |
| 742 | if (highmem_pages) |
| 743 | printk(KERN_ERR "ignoring highmem size on non-highmem" |
| 744 | " kernel!\n"); |
| 745 | #endif |
| 746 | } |
Yinghai Lu | 90d967e | 2008-06-23 21:00:45 +0200 | [diff] [blame] | 747 | } |
| 748 | |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 749 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
Yinghai Lu | 2ec65f8 | 2008-06-23 03:05:30 -0700 | [diff] [blame] | 750 | void __init initmem_init(unsigned long start_pfn, |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 751 | unsigned long end_pfn) |
| 752 | { |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 753 | #ifdef CONFIG_HIGHMEM |
| 754 | highstart_pfn = highend_pfn = max_pfn; |
| 755 | if (max_pfn > max_low_pfn) |
| 756 | highstart_pfn = max_low_pfn; |
| 757 | memory_present(0, 0, highend_pfn); |
Yinghai Lu | cb95a13 | 2008-07-02 00:31:02 -0700 | [diff] [blame] | 758 | e820_register_active_regions(0, 0, highend_pfn); |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 759 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
| 760 | pages_to_mb(highend_pfn - highstart_pfn)); |
| 761 | num_physpages = highend_pfn; |
| 762 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
| 763 | #else |
| 764 | memory_present(0, 0, max_low_pfn); |
Yinghai Lu | cb95a13 | 2008-07-02 00:31:02 -0700 | [diff] [blame] | 765 | e820_register_active_regions(0, 0, max_low_pfn); |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 766 | num_physpages = max_low_pfn; |
| 767 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
| 768 | #endif |
| 769 | #ifdef CONFIG_FLATMEM |
| 770 | max_mapnr = num_physpages; |
| 771 | #endif |
| 772 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
| 773 | pages_to_mb(max_low_pfn)); |
| 774 | |
| 775 | setup_bootmem_allocator(); |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 776 | } |
Yinghai Lu | cb95a13 | 2008-07-02 00:31:02 -0700 | [diff] [blame] | 777 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 778 | |
Yinghai Lu | cb95a13 | 2008-07-02 00:31:02 -0700 | [diff] [blame] | 779 | static void __init zone_sizes_init(void) |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 780 | { |
| 781 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 782 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 783 | max_zone_pfns[ZONE_DMA] = |
| 784 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
| 785 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 786 | #ifdef CONFIG_HIGHMEM |
| 787 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 788 | #endif |
| 789 | |
| 790 | free_area_init_nodes(max_zone_pfns); |
| 791 | } |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 792 | |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 793 | void __init setup_bootmem_allocator(void) |
| 794 | { |
| 795 | int i; |
| 796 | unsigned long bootmap_size, bootmap; |
| 797 | /* |
| 798 | * Initialize the boot-time allocator (with low memory only): |
| 799 | */ |
| 800 | bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT; |
| 801 | bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT, |
| 802 | max_pfn_mapped<<PAGE_SHIFT, bootmap_size, |
| 803 | PAGE_SIZE); |
| 804 | if (bootmap == -1L) |
| 805 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); |
| 806 | reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); |
Yinghai Lu | 225c37d | 2008-06-22 02:46:58 -0700 | [diff] [blame] | 807 | |
Yinghai Lu | 346cafe | 2008-06-23 03:06:14 -0700 | [diff] [blame] | 808 | /* don't touch min_low_pfn */ |
| 809 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT, |
| 810 | min_low_pfn, max_low_pfn); |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 811 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", |
| 812 | max_pfn_mapped<<PAGE_SHIFT); |
| 813 | printk(KERN_INFO " low ram: %08lx - %08lx\n", |
| 814 | min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT); |
| 815 | printk(KERN_INFO " bootmap %08lx - %08lx\n", |
| 816 | bootmap, bootmap + bootmap_size); |
| 817 | for_each_online_node(i) |
| 818 | free_bootmem_with_active_regions(i, max_low_pfn); |
| 819 | early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT); |
| 820 | |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 821 | after_init_bootmem = 1; |
Yinghai Lu | b2ac82a | 2008-06-22 02:45:39 -0700 | [diff] [blame] | 822 | } |
| 823 | |
Suresh Siddha | 0b8fdcb | 2008-09-23 14:00:39 -0700 | [diff] [blame] | 824 | static void __init find_early_table_space(unsigned long end, int use_pse) |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 825 | { |
Yinghai Lu | 7482b0e | 2008-06-28 03:30:39 -0700 | [diff] [blame] | 826 | unsigned long puds, pmds, ptes, tables, start; |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 827 | |
| 828 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
| 829 | tables = PAGE_ALIGN(puds * sizeof(pud_t)); |
| 830 | |
| 831 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
| 832 | tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); |
| 833 | |
Suresh Siddha | 0b8fdcb | 2008-09-23 14:00:39 -0700 | [diff] [blame] | 834 | if (use_pse) { |
Yinghai Lu | 7482b0e | 2008-06-28 03:30:39 -0700 | [diff] [blame] | 835 | unsigned long extra; |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 836 | |
| 837 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); |
| 838 | extra += PMD_SIZE; |
Yinghai Lu | 7482b0e | 2008-06-28 03:30:39 -0700 | [diff] [blame] | 839 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 840 | } else |
| 841 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 842 | |
| 843 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); |
Jeremy Fitzhardinge | 8207c25 | 2008-06-24 17:32:48 -0400 | [diff] [blame] | 844 | |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 845 | /* for fixmap */ |
Jan Beulich | a3c6018 | 2009-01-16 11:59:33 +0000 | [diff] [blame] | 846 | tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 847 | |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 848 | /* |
| 849 | * RED-PEN putting page tables only on node 0 could |
| 850 | * cause a hotspot and fill up ZONE_DMA. The page tables |
| 851 | * need roughly 0.5KB per GB. |
| 852 | */ |
| 853 | start = 0x7000; |
| 854 | table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT, |
| 855 | tables, PAGE_SIZE); |
| 856 | if (table_start == -1UL) |
| 857 | panic("Cannot find space for the kernel page tables"); |
| 858 | |
| 859 | table_start >>= PAGE_SHIFT; |
| 860 | table_end = table_start; |
| 861 | table_top = table_start + (tables>>PAGE_SHIFT); |
| 862 | |
| 863 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", |
| 864 | end, table_start << PAGE_SHIFT, |
| 865 | (table_start << PAGE_SHIFT) + tables); |
| 866 | } |
| 867 | |
| 868 | unsigned long __init_refok init_memory_mapping(unsigned long start, |
| 869 | unsigned long end) |
| 870 | { |
| 871 | pgd_t *pgd_base = swapper_pg_dir; |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 872 | unsigned long start_pfn, end_pfn; |
| 873 | unsigned long big_page_start; |
Suresh Siddha | 0b8fdcb | 2008-09-23 14:00:39 -0700 | [diff] [blame] | 874 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 875 | /* |
| 876 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. |
| 877 | * This will simplify cpa(), which otherwise needs to support splitting |
| 878 | * large pages into small in interrupt context, etc. |
| 879 | */ |
| 880 | int use_pse = 0; |
| 881 | #else |
| 882 | int use_pse = cpu_has_pse; |
| 883 | #endif |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 884 | |
| 885 | /* |
| 886 | * Find space for the kernel direct mapping tables. |
| 887 | */ |
| 888 | if (!after_init_bootmem) |
Suresh Siddha | 0b8fdcb | 2008-09-23 14:00:39 -0700 | [diff] [blame] | 889 | find_early_table_space(end, use_pse); |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 890 | |
| 891 | #ifdef CONFIG_X86_PAE |
| 892 | set_nx(); |
| 893 | if (nx_enabled) |
| 894 | printk(KERN_INFO "NX (Execute Disable) protection: active\n"); |
| 895 | #endif |
| 896 | |
| 897 | /* Enable PSE if available */ |
| 898 | if (cpu_has_pse) |
| 899 | set_in_cr4(X86_CR4_PSE); |
| 900 | |
| 901 | /* Enable PGE if available */ |
| 902 | if (cpu_has_pge) { |
| 903 | set_in_cr4(X86_CR4_PGE); |
Jeremy Fitzhardinge | ef5e94a | 2008-07-01 16:46:36 -0700 | [diff] [blame] | 904 | __supported_pte_mask |= _PAGE_GLOBAL; |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 905 | } |
| 906 | |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 907 | /* |
| 908 | * Don't use a large page for the first 2/4MB of memory |
| 909 | * because there are often fixed size MTRRs in there |
| 910 | * and overlapping MTRRs into large pages can cause |
| 911 | * slowdowns. |
| 912 | */ |
| 913 | big_page_start = PMD_SIZE; |
| 914 | |
| 915 | if (start < big_page_start) { |
| 916 | start_pfn = start >> PAGE_SHIFT; |
| 917 | end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT); |
| 918 | } else { |
| 919 | /* head is not big page alignment ? */ |
| 920 | start_pfn = start >> PAGE_SHIFT; |
| 921 | end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) |
| 922 | << (PMD_SHIFT - PAGE_SHIFT); |
| 923 | } |
| 924 | if (start_pfn < end_pfn) |
| 925 | kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0); |
| 926 | |
| 927 | /* big page range */ |
| 928 | start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) |
| 929 | << (PMD_SHIFT - PAGE_SHIFT); |
| 930 | if (start_pfn < (big_page_start >> PAGE_SHIFT)) |
| 931 | start_pfn = big_page_start >> PAGE_SHIFT; |
| 932 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
| 933 | if (start_pfn < end_pfn) |
| 934 | kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, |
Suresh Siddha | 0b8fdcb | 2008-09-23 14:00:39 -0700 | [diff] [blame] | 935 | use_pse); |
Yinghai Lu | a04ad82 | 2008-06-29 00:39:06 -0700 | [diff] [blame] | 936 | |
| 937 | /* tail is not big page alignment ? */ |
| 938 | start_pfn = end_pfn; |
| 939 | if (start_pfn > (big_page_start>>PAGE_SHIFT)) { |
| 940 | end_pfn = end >> PAGE_SHIFT; |
| 941 | if (start_pfn < end_pfn) |
| 942 | kernel_physical_mapping_init(pgd_base, start_pfn, |
| 943 | end_pfn, 0); |
| 944 | } |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 945 | |
Yinghai Lu | e7b3789 | 2008-06-25 21:51:28 -0700 | [diff] [blame] | 946 | early_ioremap_page_table_range_init(pgd_base); |
| 947 | |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 948 | load_cr3(swapper_pg_dir); |
| 949 | |
| 950 | __flush_tlb_all(); |
| 951 | |
| 952 | if (!after_init_bootmem) |
| 953 | reserve_early(table_start << PAGE_SHIFT, |
| 954 | table_end << PAGE_SHIFT, "PGTABLE"); |
| 955 | |
Yinghai Lu | caadbdc | 2008-07-15 00:03:44 -0700 | [diff] [blame] | 956 | if (!after_init_bootmem) |
| 957 | early_memtest(start, end); |
| 958 | |
Yinghai Lu | 4e29684 | 2008-06-24 12:18:14 -0700 | [diff] [blame] | 959 | return end >> PAGE_SHIFT; |
| 960 | } |
| 961 | |
Yinghai Lu | e7b3789 | 2008-06-25 21:51:28 -0700 | [diff] [blame] | 962 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | /* |
| 964 | * paging_init() sets up the page tables - note that the first 8MB are |
| 965 | * already mapped by head.S. |
| 966 | * |
| 967 | * This routines also unmaps the page at virtual kernel address 0, so |
| 968 | * that we can trap those pesky NULL-reference errors in the kernel. |
| 969 | */ |
| 970 | void __init paging_init(void) |
| 971 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 | pagetable_init(); |
| 973 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | __flush_tlb_all(); |
| 975 | |
| 976 | kmap_init(); |
Yinghai Lu | 11cd0bc | 2008-06-23 19:51:10 -0700 | [diff] [blame] | 977 | |
| 978 | /* |
| 979 | * NOTE: at this point the bootmem allocator is fully available. |
| 980 | */ |
Yinghai Lu | 11cd0bc | 2008-06-23 19:51:10 -0700 | [diff] [blame] | 981 | sparse_init(); |
| 982 | zone_sizes_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | } |
| 984 | |
| 985 | /* |
| 986 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's |
Dmitri Vorobiev | f7f17a6 | 2008-04-21 00:47:55 +0400 | [diff] [blame] | 987 | * and also on some strange 486's. All 586+'s are OK. This used to involve |
| 988 | * black magic jumps to work around some nasty CPU bugs, but fortunately the |
| 989 | * switch to using exceptions got rid of all that. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 990 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | static void __init test_wp_bit(void) |
| 992 | { |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 993 | printk(KERN_INFO |
| 994 | "Checking if this processor honours the WP bit even in supervisor mode..."); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | |
| 996 | /* Any page-aligned address will do, the test is non-destructive */ |
| 997 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); |
| 998 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); |
| 999 | clear_fixmap(FIX_WP_TEST); |
| 1000 | |
| 1001 | if (!boot_cpu_data.wp_works_ok) { |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1002 | printk(KERN_CONT "No.\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | #ifdef CONFIG_X86_WP_WORKS_OK |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1004 | panic( |
| 1005 | "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | #endif |
| 1007 | } else { |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1008 | printk(KERN_CONT "Ok.\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | } |
| 1010 | } |
| 1011 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1012 | static struct kcore_list kcore_mem, kcore_vmalloc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | |
| 1014 | void __init mem_init(void) |
| 1015 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | int codesize, reservedpages, datasize, initsize; |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 1017 | int tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1018 | |
Jeremy Fitzhardinge | cfb80c9 | 2008-12-16 12:17:36 -0800 | [diff] [blame] | 1019 | pci_iommu_alloc(); |
| 1020 | |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 1021 | #ifdef CONFIG_FLATMEM |
Eric Sesterhenn | 8d8f3cb | 2006-10-03 23:34:58 +0200 | [diff] [blame] | 1022 | BUG_ON(!mem_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | /* this will put all low memory onto the freelists */ |
| 1025 | totalram_pages += free_all_bootmem(); |
| 1026 | |
| 1027 | reservedpages = 0; |
| 1028 | for (tmp = 0; tmp < max_low_pfn; tmp++) |
| 1029 | /* |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1030 | * Only count reserved RAM pages: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | */ |
| 1032 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) |
| 1033 | reservedpages++; |
| 1034 | |
Yinghai Lu | cc9f7a0 | 2008-06-16 16:11:08 -0700 | [diff] [blame] | 1035 | set_highmem_pages_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | |
| 1037 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
| 1038 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
| 1039 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
| 1040 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1041 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
| 1042 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | VMALLOC_END-VMALLOC_START); |
| 1044 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1045 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
| 1046 | "%dk reserved, %dk data, %dk init, %ldk highmem)\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
| 1048 | num_physpages << (PAGE_SHIFT-10), |
| 1049 | codesize >> 10, |
| 1050 | reservedpages << (PAGE_SHIFT-10), |
| 1051 | datasize >> 10, |
| 1052 | initsize >> 10, |
| 1053 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) |
| 1054 | ); |
| 1055 | |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1056 | printk(KERN_INFO "virtual kernel memory layout:\n" |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1057 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1058 | #ifdef CONFIG_HIGHMEM |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1059 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1060 | #endif |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1061 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
| 1062 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" |
| 1063 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 1064 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 1065 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", |
| 1066 | FIXADDR_START, FIXADDR_TOP, |
| 1067 | (FIXADDR_TOP - FIXADDR_START) >> 10, |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1068 | |
| 1069 | #ifdef CONFIG_HIGHMEM |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1070 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
| 1071 | (LAST_PKMAP*PAGE_SIZE) >> 10, |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1072 | #endif |
| 1073 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1074 | VMALLOC_START, VMALLOC_END, |
| 1075 | (VMALLOC_END - VMALLOC_START) >> 20, |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1076 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1077 | (unsigned long)__va(0), (unsigned long)high_memory, |
| 1078 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1079 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1080 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
| 1081 | ((unsigned long)&__init_end - |
| 1082 | (unsigned long)&__init_begin) >> 10, |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1083 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1084 | (unsigned long)&_etext, (unsigned long)&_edata, |
| 1085 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1086 | |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1087 | (unsigned long)&_text, (unsigned long)&_etext, |
| 1088 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1089 | |
Jan Beulich | beeb419 | 2008-12-16 11:45:56 +0000 | [diff] [blame] | 1090 | /* |
| 1091 | * Check boundaries twice: Some fundamental inconsistencies can |
| 1092 | * be detected at build time already. |
| 1093 | */ |
| 1094 | #define __FIXADDR_TOP (-PAGE_SIZE) |
| 1095 | #ifdef CONFIG_HIGHMEM |
| 1096 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
| 1097 | BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); |
| 1098 | #endif |
| 1099 | #define high_memory (-128UL << 20) |
| 1100 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); |
| 1101 | #undef high_memory |
| 1102 | #undef __FIXADDR_TOP |
| 1103 | |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1104 | #ifdef CONFIG_HIGHMEM |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1105 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
| 1106 | BUG_ON(VMALLOC_END > PKMAP_BASE); |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1107 | #endif |
Jan Beulich | beeb419 | 2008-12-16 11:45:56 +0000 | [diff] [blame] | 1108 | BUG_ON(VMALLOC_START >= VMALLOC_END); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1109 | BUG_ON((unsigned long)high_memory > VMALLOC_START); |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 1110 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | if (boot_cpu_data.wp_works_ok < 0) |
| 1112 | test_wp_bit(); |
| 1113 | |
Hugh Dickins | 61165d7 | 2008-05-13 14:26:57 +0100 | [diff] [blame] | 1114 | save_pg_dir(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1115 | zap_low_mappings(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | } |
| 1117 | |
KAMEZAWA Hiroyuki | ad8f579 | 2006-05-20 15:00:03 -0700 | [diff] [blame] | 1118 | #ifdef CONFIG_MEMORY_HOTPLUG |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 1119 | int arch_add_memory(int nid, u64 start, u64 size) |
Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 1120 | { |
Yasunori Goto | 7c7e942 | 2006-12-22 01:11:13 -0800 | [diff] [blame] | 1121 | struct pglist_data *pgdata = NODE_DATA(nid); |
Christoph Lameter | 776ed98 | 2006-09-25 23:31:09 -0700 | [diff] [blame] | 1122 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 1123 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 1124 | unsigned long nr_pages = size >> PAGE_SHIFT; |
| 1125 | |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 1126 | return __add_pages(nid, zone, start_pfn, nr_pages); |
Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 1127 | } |
Andi Kleen | 9d99aaa | 2006-04-07 19:49:15 +0200 | [diff] [blame] | 1128 | #endif |
Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 1129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | /* |
| 1131 | * This function cannot be __init, since exceptions don't work in that |
| 1132 | * section. Put this after the callers, so that it cannot be inlined. |
| 1133 | */ |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1134 | static noinline int do_test_wp_bit(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | { |
| 1136 | char tmp_reg; |
| 1137 | int flag; |
| 1138 | |
| 1139 | __asm__ __volatile__( |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1140 | " movb %0, %1 \n" |
| 1141 | "1: movb %1, %0 \n" |
| 1142 | " xorl %2, %2 \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1143 | "2: \n" |
H. Peter Anvin | f832ff1 | 2008-02-04 16:47:58 +0100 | [diff] [blame] | 1144 | _ASM_EXTABLE(1b,2b) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), |
| 1146 | "=q" (tmp_reg), |
| 1147 | "=r" (flag) |
| 1148 | :"2" (1) |
| 1149 | :"memory"); |
Ingo Molnar | 8550eb9 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | return flag; |
| 1152 | } |
| 1153 | |
Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 1154 | #ifdef CONFIG_DEBUG_RODATA |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1155 | const int rodata_test_data = 0xC3; |
| 1156 | EXPORT_SYMBOL_GPL(rodata_test_data); |
Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 1157 | |
Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 1158 | void mark_rodata_ro(void) |
| 1159 | { |
Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 1160 | unsigned long start = PFN_ALIGN(_text); |
| 1161 | unsigned long size = PFN_ALIGN(_etext) - start; |
Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 1162 | |
Steven Rostedt | 8f0f996 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 1163 | #ifndef CONFIG_DYNAMIC_FTRACE |
| 1164 | /* Dynamic tracing modifies the kernel text section */ |
Mathieu Desnoyers | 4e4eee0 | 2008-02-02 15:42:20 -0500 | [diff] [blame] | 1165 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
| 1166 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", |
| 1167 | size >> 10); |
Andi Kleen | 0c42f39 | 2008-01-30 13:33:42 +0100 | [diff] [blame] | 1168 | |
| 1169 | #ifdef CONFIG_CPA_DEBUG |
Mathieu Desnoyers | 4e4eee0 | 2008-02-02 15:42:20 -0500 | [diff] [blame] | 1170 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", |
| 1171 | start, start+size); |
| 1172 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); |
Andi Kleen | 0c42f39 | 2008-01-30 13:33:42 +0100 | [diff] [blame] | 1173 | |
Mathieu Desnoyers | 4e4eee0 | 2008-02-02 15:42:20 -0500 | [diff] [blame] | 1174 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
| 1175 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); |
Linus Torvalds | 602033e | 2007-07-26 12:07:21 -0700 | [diff] [blame] | 1176 | #endif |
Steven Rostedt | 8f0f996 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 1177 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 1178 | |
Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 1179 | start += size; |
| 1180 | size = (unsigned long)__end_rodata - start; |
Arjan van de Ven | 6d238cc | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1181 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1182 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
| 1183 | size >> 10); |
Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1184 | rodata_test(); |
Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 1185 | |
Andi Kleen | 0c42f39 | 2008-01-30 13:33:42 +0100 | [diff] [blame] | 1186 | #ifdef CONFIG_CPA_DEBUG |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1187 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); |
Arjan van de Ven | 6d238cc | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1188 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); |
Andi Kleen | 0c42f39 | 2008-01-30 13:33:42 +0100 | [diff] [blame] | 1189 | |
Ingo Molnar | d7d119d | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1190 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
Arjan van de Ven | 6d238cc | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1191 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
Andi Kleen | 0c42f39 | 2008-01-30 13:33:42 +0100 | [diff] [blame] | 1192 | #endif |
Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 1193 | } |
| 1194 | #endif |
| 1195 | |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 1196 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
| 1197 | { |
Ingo Molnar | ee01f11 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1198 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 1199 | /* |
| 1200 | * If debugging page accesses then do not free this memory but |
| 1201 | * mark them not present - any buggy init-section access will |
| 1202 | * create a kernel page fault: |
| 1203 | */ |
| 1204 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", |
| 1205 | begin, PAGE_ALIGN(end)); |
| 1206 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); |
| 1207 | #else |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1208 | unsigned long addr; |
| 1209 | |
Arjan van de Ven | 3c1df68 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1210 | /* |
| 1211 | * We just marked the kernel text read only above, now that |
| 1212 | * we are going to free part of that, we need to make that |
| 1213 | * writeable first. |
| 1214 | */ |
| 1215 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); |
| 1216 | |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 1217 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 1218 | ClearPageReserved(virt_to_page(addr)); |
| 1219 | init_page_count(virt_to_page(addr)); |
| 1220 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
| 1221 | free_page(addr); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 1222 | totalram_pages++; |
| 1223 | } |
Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 1224 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
Ingo Molnar | ee01f11 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1225 | #endif |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 1226 | } |
| 1227 | |
| 1228 | void free_initmem(void) |
| 1229 | { |
| 1230 | free_init_pages("unused kernel memory", |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 1231 | (unsigned long)(&__init_begin), |
| 1232 | (unsigned long)(&__init_end)); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 1233 | } |
Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 1234 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | #ifdef CONFIG_BLK_DEV_INITRD |
| 1236 | void free_initrd_mem(unsigned long start, unsigned long end) |
| 1237 | { |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 1238 | free_init_pages("initrd memory", start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 | } |
| 1240 | #endif |
Yinghai Lu | d2dbf34 | 2008-06-13 02:00:56 -0700 | [diff] [blame] | 1241 | |
| 1242 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, |
| 1243 | int flags) |
| 1244 | { |
| 1245 | return reserve_bootmem(phys, len, flags); |
| 1246 | } |