Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 1 | #include <linux/gfp.h> |
Jaswinder Singh Rajput | 2c1b284 | 2009-04-11 00:03:10 +0530 | [diff] [blame] | 2 | #include <linux/initrd.h> |
Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 3 | #include <linux/ioport.h> |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 4 | #include <linux/swap.h> |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 5 | #include <linux/memblock.h> |
Pekka Enberg | 1762391 | 2011-11-01 15:58:22 +0200 | [diff] [blame] | 6 | #include <linux/bootmem.h> /* for max_low_pfn */ |
Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 7 | |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 8 | #include <asm/cacheflush.h> |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 9 | #include <asm/e820.h> |
Pekka Enberg | 4fcb208 | 2009-03-05 14:55:08 +0200 | [diff] [blame] | 10 | #include <asm/init.h> |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 11 | #include <asm/page.h> |
Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 12 | #include <asm/page_types.h> |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 13 | #include <asm/sections.h> |
Jan Beulich | 4983439 | 2009-05-06 13:06:47 +0100 | [diff] [blame] | 14 | #include <asm/setup.h> |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 15 | #include <asm/tlbflush.h> |
Pekka Enberg | 9518e0e | 2009-04-28 16:00:50 +0300 | [diff] [blame] | 16 | #include <asm/tlb.h> |
Jaswinder Singh Rajput | 76c0692 | 2009-07-01 19:54:23 +0530 | [diff] [blame] | 17 | #include <asm/proto.h> |
Pekka Enberg | 1762391 | 2011-11-01 15:58:22 +0200 | [diff] [blame] | 18 | #include <asm/dma.h> /* for MAX_DMA_PFN */ |
Pekka Enberg | 9518e0e | 2009-04-28 16:00:50 +0300 | [diff] [blame] | 19 | |
Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 20 | unsigned long __initdata pgt_buf_start; |
| 21 | unsigned long __meminitdata pgt_buf_end; |
| 22 | unsigned long __meminitdata pgt_buf_top; |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 23 | |
| 24 | int after_bootmem; |
| 25 | |
| 26 | int direct_gbpages |
| 27 | #ifdef CONFIG_DIRECT_GBPAGES |
| 28 | = 1 |
| 29 | #endif |
| 30 | ; |
| 31 | |
| 32 | static void __init find_early_table_space(unsigned long end, int use_pse, |
| 33 | int use_gbpages) |
| 34 | { |
Yinghai Lu | 4b239f4 | 2010-12-17 16:58:28 -0800 | [diff] [blame] | 35 | unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 36 | phys_addr_t base; |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 37 | |
| 38 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
| 39 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
| 40 | |
| 41 | if (use_gbpages) { |
| 42 | unsigned long extra; |
| 43 | |
| 44 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); |
| 45 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; |
| 46 | } else |
| 47 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
| 48 | |
| 49 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
| 50 | |
| 51 | if (use_pse) { |
| 52 | unsigned long extra; |
| 53 | |
| 54 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); |
| 55 | #ifdef CONFIG_X86_32 |
| 56 | extra += PMD_SIZE; |
| 57 | #endif |
| 58 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 59 | } else |
| 60 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 61 | |
| 62 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
| 63 | |
| 64 | #ifdef CONFIG_X86_32 |
| 65 | /* for fixmap */ |
| 66 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
Yinghai Lu | 80989ce | 2009-05-09 23:47:42 -0700 | [diff] [blame] | 67 | #endif |
Takashi Iwai | 8548c84 | 2011-10-23 23:19:12 +0200 | [diff] [blame] | 68 | good_end = max_pfn_mapped << PAGE_SHIFT; |
Yinghai Lu | 1411e0e | 2010-12-27 16:48:17 -0800 | [diff] [blame] | 69 | |
Yinghai Lu | 4b239f4 | 2010-12-17 16:58:28 -0800 | [diff] [blame] | 70 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); |
Tejun Heo | 1f5026a | 2011-07-12 09:58:09 +0200 | [diff] [blame] | 71 | if (!base) |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 72 | panic("Cannot find space for the kernel page tables"); |
| 73 | |
Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 74 | pgt_buf_start = base >> PAGE_SHIFT; |
| 75 | pgt_buf_end = pgt_buf_start; |
| 76 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 77 | |
| 78 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", |
Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 79 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 80 | } |
| 81 | |
Sedat Dilek | 53f8023 | 2011-04-17 16:17:34 +0200 | [diff] [blame] | 82 | void __init native_pagetable_reserve(u64 start, u64 end) |
Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 83 | { |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 84 | memblock_reserve(start, end - start); |
Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 85 | } |
| 86 | |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 87 | struct map_range { |
| 88 | unsigned long start; |
| 89 | unsigned long end; |
| 90 | unsigned page_size_mask; |
| 91 | }; |
| 92 | |
| 93 | #ifdef CONFIG_X86_32 |
| 94 | #define NR_RANGE_MR 3 |
| 95 | #else /* CONFIG_X86_64 */ |
| 96 | #define NR_RANGE_MR 5 |
| 97 | #endif |
| 98 | |
Jan Beulich | dc9dd5c | 2009-03-12 12:40:06 +0000 | [diff] [blame] | 99 | static int __meminit save_mr(struct map_range *mr, int nr_range, |
| 100 | unsigned long start_pfn, unsigned long end_pfn, |
| 101 | unsigned long page_size_mask) |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 102 | { |
| 103 | if (start_pfn < end_pfn) { |
| 104 | if (nr_range >= NR_RANGE_MR) |
| 105 | panic("run out of range for init_memory_mapping\n"); |
| 106 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; |
| 107 | mr[nr_range].end = end_pfn<<PAGE_SHIFT; |
| 108 | mr[nr_range].page_size_mask = page_size_mask; |
| 109 | nr_range++; |
| 110 | } |
| 111 | |
| 112 | return nr_range; |
| 113 | } |
| 114 | |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 115 | /* |
| 116 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
| 117 | * This runs before bootmem is initialized and gets pages directly from |
| 118 | * the physical memory. To access them they are temporarily mapped. |
| 119 | */ |
| 120 | unsigned long __init_refok init_memory_mapping(unsigned long start, |
| 121 | unsigned long end) |
| 122 | { |
| 123 | unsigned long page_size_mask = 0; |
| 124 | unsigned long start_pfn, end_pfn; |
Pekka Enberg | c77a3b5 | 2009-03-05 17:04:26 +0200 | [diff] [blame] | 125 | unsigned long ret = 0; |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 126 | unsigned long pos; |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 127 | |
| 128 | struct map_range mr[NR_RANGE_MR]; |
| 129 | int nr_range, i; |
| 130 | int use_pse, use_gbpages; |
| 131 | |
| 132 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); |
| 133 | |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 134 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 135 | /* |
| 136 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. |
| 137 | * This will simplify cpa(), which otherwise needs to support splitting |
| 138 | * large pages into small in interrupt context, etc. |
| 139 | */ |
| 140 | use_pse = use_gbpages = 0; |
| 141 | #else |
| 142 | use_pse = cpu_has_pse; |
| 143 | use_gbpages = direct_gbpages; |
| 144 | #endif |
| 145 | |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 146 | /* Enable PSE if available */ |
| 147 | if (cpu_has_pse) |
| 148 | set_in_cr4(X86_CR4_PSE); |
| 149 | |
| 150 | /* Enable PGE if available */ |
| 151 | if (cpu_has_pge) { |
| 152 | set_in_cr4(X86_CR4_PGE); |
| 153 | __supported_pte_mask |= _PAGE_GLOBAL; |
| 154 | } |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 155 | |
| 156 | if (use_gbpages) |
| 157 | page_size_mask |= 1 << PG_LEVEL_1G; |
| 158 | if (use_pse) |
| 159 | page_size_mask |= 1 << PG_LEVEL_2M; |
| 160 | |
| 161 | memset(mr, 0, sizeof(mr)); |
| 162 | nr_range = 0; |
| 163 | |
| 164 | /* head if not big page alignment ? */ |
| 165 | start_pfn = start >> PAGE_SHIFT; |
| 166 | pos = start_pfn << PAGE_SHIFT; |
| 167 | #ifdef CONFIG_X86_32 |
| 168 | /* |
| 169 | * Don't use a large page for the first 2/4MB of memory |
| 170 | * because there are often fixed size MTRRs in there |
| 171 | * and overlapping MTRRs into large pages can cause |
| 172 | * slowdowns. |
| 173 | */ |
| 174 | if (pos == 0) |
| 175 | end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT); |
| 176 | else |
| 177 | end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
| 178 | << (PMD_SHIFT - PAGE_SHIFT); |
| 179 | #else /* CONFIG_X86_64 */ |
| 180 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) |
| 181 | << (PMD_SHIFT - PAGE_SHIFT); |
| 182 | #endif |
| 183 | if (end_pfn > (end >> PAGE_SHIFT)) |
| 184 | end_pfn = end >> PAGE_SHIFT; |
| 185 | if (start_pfn < end_pfn) { |
| 186 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
| 187 | pos = end_pfn << PAGE_SHIFT; |
| 188 | } |
| 189 | |
| 190 | /* big page (2M) range */ |
| 191 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
| 192 | << (PMD_SHIFT - PAGE_SHIFT); |
| 193 | #ifdef CONFIG_X86_32 |
| 194 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
| 195 | #else /* CONFIG_X86_64 */ |
| 196 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) |
| 197 | << (PUD_SHIFT - PAGE_SHIFT); |
| 198 | if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) |
| 199 | end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); |
| 200 | #endif |
| 201 | |
| 202 | if (start_pfn < end_pfn) { |
| 203 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
| 204 | page_size_mask & (1<<PG_LEVEL_2M)); |
| 205 | pos = end_pfn << PAGE_SHIFT; |
| 206 | } |
| 207 | |
| 208 | #ifdef CONFIG_X86_64 |
| 209 | /* big page (1G) range */ |
| 210 | start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) |
| 211 | << (PUD_SHIFT - PAGE_SHIFT); |
| 212 | end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); |
| 213 | if (start_pfn < end_pfn) { |
| 214 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
| 215 | page_size_mask & |
| 216 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); |
| 217 | pos = end_pfn << PAGE_SHIFT; |
| 218 | } |
| 219 | |
| 220 | /* tail is not big page (1G) alignment */ |
| 221 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
| 222 | << (PMD_SHIFT - PAGE_SHIFT); |
| 223 | end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
| 224 | if (start_pfn < end_pfn) { |
| 225 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
| 226 | page_size_mask & (1<<PG_LEVEL_2M)); |
| 227 | pos = end_pfn << PAGE_SHIFT; |
| 228 | } |
| 229 | #endif |
| 230 | |
| 231 | /* tail is not big page (2M) alignment */ |
| 232 | start_pfn = pos>>PAGE_SHIFT; |
| 233 | end_pfn = end>>PAGE_SHIFT; |
| 234 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
| 235 | |
| 236 | /* try to merge same page size and continuous */ |
| 237 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { |
| 238 | unsigned long old_start; |
| 239 | if (mr[i].end != mr[i+1].start || |
| 240 | mr[i].page_size_mask != mr[i+1].page_size_mask) |
| 241 | continue; |
| 242 | /* move it */ |
| 243 | old_start = mr[i].start; |
| 244 | memmove(&mr[i], &mr[i+1], |
| 245 | (nr_range - 1 - i) * sizeof(struct map_range)); |
| 246 | mr[i--].start = old_start; |
| 247 | nr_range--; |
| 248 | } |
| 249 | |
| 250 | for (i = 0; i < nr_range; i++) |
| 251 | printk(KERN_DEBUG " %010lx - %010lx page %s\n", |
| 252 | mr[i].start, mr[i].end, |
| 253 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( |
| 254 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); |
| 255 | |
| 256 | /* |
| 257 | * Find space for the kernel direct mapping tables. |
| 258 | * |
| 259 | * Later we should allocate these tables in the local node of the |
| 260 | * memory mapped. Unfortunately this is done currently before the |
| 261 | * nodes are discovered. |
| 262 | */ |
| 263 | if (!after_bootmem) |
| 264 | find_early_table_space(end, use_pse, use_gbpages); |
| 265 | |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 266 | for (i = 0; i < nr_range; i++) |
| 267 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, |
| 268 | mr[i].page_size_mask); |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 269 | |
| 270 | #ifdef CONFIG_X86_32 |
| 271 | early_ioremap_page_table_range_init(); |
| 272 | |
| 273 | load_cr3(swapper_pg_dir); |
| 274 | #endif |
| 275 | |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 276 | __flush_tlb_all(); |
| 277 | |
Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 278 | /* |
| 279 | * Reserve the kernel pagetable pages we used (pgt_buf_start - |
| 280 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) |
| 281 | * so that they can be reused for other purposes. |
| 282 | * |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 283 | * On native it just means calling memblock_reserve, on Xen it also |
| 284 | * means marking RW the pagetable pages that we allocated before |
Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 285 | * but that haven't been used. |
| 286 | * |
| 287 | * In fact on xen we mark RO the whole range pgt_buf_start - |
| 288 | * pgt_buf_top, because we have to make sure that when |
| 289 | * init_memory_mapping reaches the pagetable pages area, it maps |
| 290 | * RO all the pagetable pages, including the ones that are beyond |
| 291 | * pgt_buf_end at that time. |
| 292 | */ |
Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 293 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) |
Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 294 | x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), |
| 295 | PFN_PHYS(pgt_buf_end)); |
Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 296 | |
| 297 | if (!after_bootmem) |
| 298 | early_memtest(start, end); |
| 299 | |
| 300 | return ret >> PAGE_SHIFT; |
| 301 | } |
| 302 | |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 303 | |
Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 304 | /* |
| 305 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address |
| 306 | * is valid. The argument is a physical page number. |
| 307 | * |
| 308 | * |
| 309 | * On x86, access has to be given to the first megabyte of ram because that area |
| 310 | * contains bios code and data regions used by X and dosemu and similar apps. |
| 311 | * Access has to be given to non-kernel-ram areas as well, these contain the PCI |
| 312 | * mmio resources as well as potential bios/acpi data regions. |
| 313 | */ |
| 314 | int devmem_is_allowed(unsigned long pagenr) |
| 315 | { |
| 316 | if (pagenr <= 256) |
| 317 | return 1; |
| 318 | if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) |
| 319 | return 0; |
| 320 | if (!page_is_ram(pagenr)) |
| 321 | return 1; |
| 322 | return 0; |
| 323 | } |
| 324 | |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 325 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
| 326 | { |
Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 327 | unsigned long addr; |
| 328 | unsigned long begin_aligned, end_aligned; |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 329 | |
Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 330 | /* Make sure boundaries are page aligned */ |
| 331 | begin_aligned = PAGE_ALIGN(begin); |
| 332 | end_aligned = end & PAGE_MASK; |
| 333 | |
| 334 | if (WARN_ON(begin_aligned != begin || end_aligned != end)) { |
| 335 | begin = begin_aligned; |
| 336 | end = end_aligned; |
| 337 | } |
| 338 | |
| 339 | if (begin >= end) |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 340 | return; |
| 341 | |
Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 342 | addr = begin; |
| 343 | |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 344 | /* |
| 345 | * If debugging page accesses then do not free this memory but |
| 346 | * mark them not present - any buggy init-section access will |
| 347 | * create a kernel page fault: |
| 348 | */ |
| 349 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 350 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", |
Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 351 | begin, end); |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 352 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); |
| 353 | #else |
| 354 | /* |
| 355 | * We just marked the kernel text read only above, now that |
| 356 | * we are going to free part of that, we need to make that |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 357 | * writeable and non-executable first. |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 358 | */ |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 359 | set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 360 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); |
| 361 | |
| 362 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
| 363 | |
| 364 | for (; addr < end; addr += PAGE_SIZE) { |
| 365 | ClearPageReserved(virt_to_page(addr)); |
| 366 | init_page_count(virt_to_page(addr)); |
Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 367 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 368 | free_page(addr); |
| 369 | totalram_pages++; |
| 370 | } |
| 371 | #endif |
| 372 | } |
| 373 | |
| 374 | void free_initmem(void) |
| 375 | { |
| 376 | free_init_pages("unused kernel memory", |
| 377 | (unsigned long)(&__init_begin), |
| 378 | (unsigned long)(&__init_end)); |
| 379 | } |
Pekka Enberg | 731ddea | 2009-03-04 11:13:40 +0200 | [diff] [blame] | 380 | |
| 381 | #ifdef CONFIG_BLK_DEV_INITRD |
| 382 | void free_initrd_mem(unsigned long start, unsigned long end) |
| 383 | { |
Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 384 | /* |
| 385 | * end could be not aligned, and We can not align that, |
| 386 | * decompresser could be confused by aligned initrd_end |
| 387 | * We already reserve the end partial page before in |
| 388 | * - i386_start_kernel() |
| 389 | * - x86_64_start_kernel() |
| 390 | * - relocate_initrd() |
| 391 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed |
| 392 | */ |
| 393 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); |
Pekka Enberg | 731ddea | 2009-03-04 11:13:40 +0200 | [diff] [blame] | 394 | } |
| 395 | #endif |
Pekka Enberg | 1762391 | 2011-11-01 15:58:22 +0200 | [diff] [blame] | 396 | |
| 397 | void __init zone_sizes_init(void) |
| 398 | { |
| 399 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 400 | |
| 401 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 402 | |
| 403 | #ifdef CONFIG_ZONE_DMA |
| 404 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; |
| 405 | #endif |
| 406 | #ifdef CONFIG_ZONE_DMA32 |
| 407 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; |
| 408 | #endif |
| 409 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 410 | #ifdef CONFIG_HIGHMEM |
| 411 | max_zone_pfns[ZONE_HIGHMEM] = max_pfn; |
| 412 | #endif |
| 413 | |
| 414 | free_area_init_nodes(max_zone_pfns); |
| 415 | } |
| 416 | |