blob: 9d36eb9ebd53c24110a7dfb51018a6c6a33dbc6b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/hugetlb.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/highmem.h>
23#include <linux/pagemap.h>
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080024#include <linux/pci.h>
Jan Beulich6fb14752007-05-02 19:27:10 +020025#include <linux/pfn.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070026#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/bootmem.h>
28#include <linux/slab.h>
29#include <linux/proc_fs.h>
Dave Hansen05039b92005-10-29 18:16:57 -070030#include <linux/memory_hotplug.h>
Adrian Bunk27d99f72005-11-13 16:06:51 -080031#include <linux/initrd.h>
Shaohua Li55b23552006-06-23 02:04:49 -070032#include <linux/cpumask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
H. Peter Anvinf832ff12008-02-04 16:47:58 +010034#include <asm/asm.h>
Ingo Molnar46eaa672008-10-12 15:06:29 +020035#include <asm/bios_ebda.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/processor.h>
37#include <asm/system.h>
38#include <asm/uaccess.h>
39#include <asm/pgtable.h>
40#include <asm/dma.h>
41#include <asm/fixmap.h>
42#include <asm/e820.h>
43#include <asm/apic.h>
Ingo Molnar8550eb92008-01-30 13:34:10 +010044#include <asm/bugs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <asm/tlb.h>
46#include <asm/tlbflush.h>
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010047#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/sections.h>
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +020049#include <asm/paravirt.h>
Ian Campbell551889a2008-02-09 23:24:09 +010050#include <asm/setup.h>
Harvey Harrison7bfeab92008-02-12 12:12:01 -080051#include <asm/cacheflush.h>
Jaswinder Singha80495e2008-07-23 17:33:57 +053052#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54unsigned int __VMALLOC_RESERVE = 128 << 20;
55
Yinghai Luf361a452008-07-10 20:38:26 -070056unsigned long max_low_pfn_mapped;
Thomas Gleixner67794292008-03-21 21:27:10 +010057unsigned long max_pfn_mapped;
Andi Kleen7d1116a2008-03-12 03:53:27 +010058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
60unsigned long highstart_pfn, highend_pfn;
61
Ingo Molnar8550eb92008-01-30 13:34:10 +010062static noinline int do_test_wp_bit(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Yinghai Lu4e296842008-06-24 12:18:14 -070064
65static unsigned long __initdata table_start;
66static unsigned long __meminitdata table_end;
67static unsigned long __meminitdata table_top;
68
69static int __initdata after_init_bootmem;
70
Jan Beulichd6be89a2008-12-16 11:42:45 +000071static __init void *alloc_low_page(void)
Yinghai Lu4e296842008-06-24 12:18:14 -070072{
73 unsigned long pfn = table_end++;
74 void *adr;
75
76 if (pfn >= table_top)
77 panic("alloc_low_page: ran out of memory");
78
79 adr = __va(pfn * PAGE_SIZE);
80 memset(adr, 0, PAGE_SIZE);
Yinghai Lu4e296842008-06-24 12:18:14 -070081 return adr;
82}
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*
85 * Creates a middle page table and puts a pointer to it in the
86 * given global directory entry. This only returns the gd entry
87 * in non-PAE compilation mode, since the middle layer is folded.
88 */
89static pmd_t * __init one_md_table_init(pgd_t *pgd)
90{
91 pud_t *pud;
92 pmd_t *pmd_table;
Ingo Molnar8550eb92008-01-30 13:34:10 +010093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +020095 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
Yinghai Lu4e296842008-06-24 12:18:14 -070096 if (after_init_bootmem)
97 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
98 else
Jan Beulichd6be89a2008-12-16 11:42:45 +000099 pmd_table = (pmd_t *)alloc_low_page();
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700100 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200101 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
102 pud = pud_offset(pgd, 0);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100103 BUG_ON(pmd_table != pmd_offset(pud, 0));
Zhaoleia376f302008-10-31 17:43:04 +0800104
105 return pmd_table;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200106 }
107#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 pud = pud_offset(pgd, 0);
109 pmd_table = pmd_offset(pud, 0);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return pmd_table;
112}
113
114/*
115 * Create a page table and place a pointer to it in a middle page
Ingo Molnar8550eb92008-01-30 13:34:10 +0100116 * directory entry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 */
118static pte_t * __init one_page_table_init(pmd_t *pmd)
119{
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200120 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
Ingo Molnar509a80c2007-10-17 18:04:34 +0200121 pte_t *page_table = NULL;
122
Yinghai Lu4e296842008-06-24 12:18:14 -0700123 if (after_init_bootmem) {
Ingo Molnar509a80c2007-10-17 18:04:34 +0200124#ifdef CONFIG_DEBUG_PAGEALLOC
Yinghai Lu4e296842008-06-24 12:18:14 -0700125 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
Ingo Molnar509a80c2007-10-17 18:04:34 +0200126#endif
Yinghai Lu4e296842008-06-24 12:18:14 -0700127 if (!page_table)
128 page_table =
Ingo Molnar509a80c2007-10-17 18:04:34 +0200129 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
Jan Beulichd6be89a2008-12-16 11:42:45 +0000130 } else
131 page_table = (pte_t *)alloc_low_page();
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200132
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700133 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200135 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 }
Ingo Molnar509a80c2007-10-17 18:04:34 +0200137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 return pte_offset_kernel(pmd, 0);
139}
140
Jan Beulicha3c60182009-01-16 11:59:33 +0000141static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
142 unsigned long vaddr, pte_t *lastpte)
143{
144#ifdef CONFIG_HIGHMEM
145 /*
146 * Something (early fixmap) may already have put a pte
147 * page here, which causes the page table allocation
148 * to become nonlinear. Attempt to fix it, and if it
149 * is still nonlinear then we have to bug.
150 */
151 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
152 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
153
154 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
155 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
156 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
157 && ((__pa(pte) >> PAGE_SHIFT) < table_start
158 || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
159 pte_t *newpte;
160 int i;
161
162 BUG_ON(after_init_bootmem);
163 newpte = alloc_low_page();
164 for (i = 0; i < PTRS_PER_PTE; i++)
165 set_pte(newpte + i, pte[i]);
166
167 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
168 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
169 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
170 __flush_tlb_all();
171
172 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
173 pte = newpte;
174 }
175 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
176 && vaddr > fix_to_virt(FIX_KMAP_END)
177 && lastpte && lastpte + PTRS_PER_PTE != pte);
178#endif
179 return pte;
180}
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182/*
Ingo Molnar8550eb92008-01-30 13:34:10 +0100183 * This function initializes a certain range of kernel virtual memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * with new bootmem page tables, everywhere page tables are missing in
185 * the given range.
Ingo Molnar8550eb92008-01-30 13:34:10 +0100186 *
187 * NOTE: The pagetables are allocated contiguous on the physical space
188 * so we can cache the place of the first one and move around without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * checking the pgd every time.
190 */
Ingo Molnar8550eb92008-01-30 13:34:10 +0100191static void __init
192page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 int pgd_idx, pmd_idx;
195 unsigned long vaddr;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100196 pgd_t *pgd;
197 pmd_t *pmd;
Jan Beulicha3c60182009-01-16 11:59:33 +0000198 pte_t *pte = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 vaddr = start;
201 pgd_idx = pgd_index(vaddr);
202 pmd_idx = pmd_index(vaddr);
203 pgd = pgd_base + pgd_idx;
204
205 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200206 pmd = one_md_table_init(pgd);
207 pmd = pmd + pmd_index(vaddr);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100208 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
209 pmd++, pmd_idx++) {
Jan Beulicha3c60182009-01-16 11:59:33 +0000210 pte = page_table_kmap_check(one_page_table_init(pmd),
211 pmd, vaddr, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213 vaddr += PMD_SIZE;
214 }
215 pmd_idx = 0;
216 }
217}
218
219static inline int is_kernel_text(unsigned long addr)
220{
221 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
222 return 1;
223 return 0;
224}
225
226/*
Ingo Molnar8550eb92008-01-30 13:34:10 +0100227 * This maps the physical memory to kernel virtual address space, a total
228 * of max_low_pfn pages, by creating page tables starting from address
229 * PAGE_OFFSET:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 */
Yinghai Lu4e296842008-06-24 12:18:14 -0700231static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
Yinghai Lua04ad822008-06-29 00:39:06 -0700232 unsigned long start_pfn,
233 unsigned long end_pfn,
234 int use_pse)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
Ingo Molnar8550eb92008-01-30 13:34:10 +0100236 int pgd_idx, pmd_idx, pte_ofs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 unsigned long pfn;
238 pgd_t *pgd;
239 pmd_t *pmd;
240 pte_t *pte;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700241 unsigned pages_2m, pages_4k;
242 int mapping_iter;
243
244 /*
245 * First iteration will setup identity mapping using large/small pages
246 * based on use_pse, with other attributes same as set by
247 * the early code in head_32.S
248 *
249 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
250 * as desired for the kernel identity mapping.
251 *
252 * This two pass mechanism conforms to the TLB app note which says:
253 *
254 * "Software should not write to a paging-structure entry in a way
255 * that would change, for any linear address, both the page size
256 * and either the page frame or attributes."
257 */
258 mapping_iter = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Yinghai Lua04ad822008-06-29 00:39:06 -0700260 if (!cpu_has_pse)
261 use_pse = 0;
262
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700263repeat:
264 pages_2m = pages_4k = 0;
Yinghai Lua04ad822008-06-29 00:39:06 -0700265 pfn = start_pfn;
266 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 pgd = pgd_base + pgd_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
269 pmd = one_md_table_init(pgd);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100270
Yinghai Lua04ad822008-06-29 00:39:06 -0700271 if (pfn >= end_pfn)
272 continue;
273#ifdef CONFIG_X86_PAE
274 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
275 pmd += pmd_idx;
276#else
277 pmd_idx = 0;
278#endif
279 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100280 pmd++, pmd_idx++) {
Ingo Molnar8550eb92008-01-30 13:34:10 +0100281 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Ingo Molnar8550eb92008-01-30 13:34:10 +0100283 /*
284 * Map with big pages if possible, otherwise
285 * create normal page tables:
286 */
Yinghai Lua04ad822008-06-29 00:39:06 -0700287 if (use_pse) {
Ingo Molnar8550eb92008-01-30 13:34:10 +0100288 unsigned int addr2;
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100289 pgprot_t prot = PAGE_KERNEL_LARGE;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700290 /*
291 * first pass will use the same initial
292 * identity mapping attribute + _PAGE_PSE.
293 */
294 pgprot_t init_prot =
295 __pgprot(PTE_IDENT_ATTR |
296 _PAGE_PSE);
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100297
Ingo Molnar8550eb92008-01-30 13:34:10 +0100298 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100299 PAGE_OFFSET + PAGE_SIZE-1;
300
Ingo Molnar8550eb92008-01-30 13:34:10 +0100301 if (is_kernel_text(addr) ||
302 is_kernel_text(addr2))
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100303 prot = PAGE_KERNEL_LARGE_EXEC;
304
Andi Kleence0c0e52008-05-02 11:46:49 +0200305 pages_2m++;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700306 if (mapping_iter == 1)
307 set_pmd(pmd, pfn_pmd(pfn, init_prot));
308 else
309 set_pmd(pmd, pfn_pmd(pfn, prot));
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 pfn += PTRS_PER_PTE;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100312 continue;
313 }
314 pte = one_page_table_init(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Yinghai Lua04ad822008-06-29 00:39:06 -0700316 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
317 pte += pte_ofs;
318 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100319 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
320 pgprot_t prot = PAGE_KERNEL;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700321 /*
322 * first pass will use the same initial
323 * identity mapping attribute.
324 */
325 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100326
Ingo Molnar8550eb92008-01-30 13:34:10 +0100327 if (is_kernel_text(addr))
328 prot = PAGE_KERNEL_EXEC;
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100329
Andi Kleence0c0e52008-05-02 11:46:49 +0200330 pages_4k++;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700331 if (mapping_iter == 1)
332 set_pte(pte, pfn_pte(pfn, init_prot));
333 else
334 set_pte(pte, pfn_pte(pfn, prot));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336 }
337 }
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700338 if (mapping_iter == 1) {
339 /*
340 * update direct mapping page count only in the first
341 * iteration.
342 */
343 update_page_count(PG_LEVEL_2M, pages_2m);
344 update_page_count(PG_LEVEL_4K, pages_4k);
345
346 /*
347 * local global flush tlb, which will flush the previous
348 * mappings present in both small and large page TLB's.
349 */
350 __flush_tlb_all();
351
352 /*
353 * Second iteration will set the actual desired PTE attributes.
354 */
355 mapping_iter = 2;
356 goto repeat;
357 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
Arjan van de Venae531c22008-04-24 23:40:47 +0200360/*
361 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
362 * is valid. The argument is a physical page number.
363 *
364 *
365 * On x86, access has to be given to the first megabyte of ram because that area
366 * contains bios code and data regions used by X and dosemu and similar apps.
367 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
368 * mmio resources as well as potential bios/acpi data regions.
369 */
370int devmem_is_allowed(unsigned long pagenr)
371{
372 if (pagenr <= 256)
373 return 1;
Arjan van de Vene8de1482008-10-22 19:55:31 -0700374 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
375 return 0;
Arjan van de Venae531c22008-04-24 23:40:47 +0200376 if (!page_is_ram(pagenr))
377 return 1;
378 return 0;
379}
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381pte_t *kmap_pte;
382pgprot_t kmap_prot;
383
Ingo Molnar8550eb92008-01-30 13:34:10 +0100384static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
385{
386 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
387 vaddr), vaddr), vaddr);
388}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
390static void __init kmap_init(void)
391{
392 unsigned long kmap_vstart;
393
Ingo Molnar8550eb92008-01-30 13:34:10 +0100394 /*
395 * Cache the first kmap pte:
396 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
398 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
399
400 kmap_prot = PAGE_KERNEL;
401}
402
Keith Packardfd940932008-10-30 19:37:09 -0700403#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static void __init permanent_kmaps_init(pgd_t *pgd_base)
405{
Ingo Molnar8550eb92008-01-30 13:34:10 +0100406 unsigned long vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 pgd_t *pgd;
408 pud_t *pud;
409 pmd_t *pmd;
410 pte_t *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 vaddr = PKMAP_BASE;
413 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
414
415 pgd = swapper_pg_dir + pgd_index(vaddr);
416 pud = pud_offset(pgd, vaddr);
417 pmd = pmd_offset(pud, vaddr);
418 pte = pte_offset_kernel(pmd, vaddr);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100419 pkmap_page_table = pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
Yinghai Lucc9f7a02008-06-16 16:11:08 -0700422static void __init add_one_highpage_init(struct page *page, int pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
Yinghai Lucc9f7a02008-06-16 16:11:08 -0700424 ClearPageReserved(page);
425 init_page_count(page);
426 __free_page(page);
427 totalhigh_pages++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700430struct add_highpages_data {
431 unsigned long start_pfn;
432 unsigned long end_pfn;
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700433};
434
Yinghai Lud52d53b2008-06-16 20:10:55 -0700435static int __init add_highpages_work_fn(unsigned long start_pfn,
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700436 unsigned long end_pfn, void *datax)
437{
438 int node_pfn;
439 struct page *page;
440 unsigned long final_start_pfn, final_end_pfn;
441 struct add_highpages_data *data;
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700442
443 data = (struct add_highpages_data *)datax;
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700444
445 final_start_pfn = max(start_pfn, data->start_pfn);
446 final_end_pfn = min(end_pfn, data->end_pfn);
447 if (final_start_pfn >= final_end_pfn)
Yinghai Lud52d53b2008-06-16 20:10:55 -0700448 return 0;
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700449
450 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
451 node_pfn++) {
452 if (!pfn_valid(node_pfn))
453 continue;
454 page = pfn_to_page(node_pfn);
Yinghai Lucc9f7a02008-06-16 16:11:08 -0700455 add_one_highpage_init(page, node_pfn);
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700456 }
457
Yinghai Lud52d53b2008-06-16 20:10:55 -0700458 return 0;
459
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700460}
461
462void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
Yinghai Lucc9f7a02008-06-16 16:11:08 -0700463 unsigned long end_pfn)
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700464{
465 struct add_highpages_data data;
466
467 data.start_pfn = start_pfn;
468 data.end_pfn = end_pfn;
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700469
470 work_with_active_regions(nid, add_highpages_work_fn, &data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471}
472
Ingo Molnar8550eb92008-01-30 13:34:10 +0100473#ifndef CONFIG_NUMA
Yinghai Lucc9f7a02008-06-16 16:11:08 -0700474static void __init set_highmem_pages_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
Yinghai Lucc9f7a02008-06-16 16:11:08 -0700476 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 totalram_pages += totalhigh_pages;
479}
Ingo Molnar8550eb92008-01-30 13:34:10 +0100480#endif /* !CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482#else
Ingo Brueckle8e32322009-01-02 14:42:00 +0100483static inline void permanent_kmaps_init(pgd_t *pgd_base)
484{
485}
486static inline void set_highmem_pages_init(void)
487{
488}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489#endif /* CONFIG_HIGHMEM */
490
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200491void __init native_pagetable_setup_start(pgd_t *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492{
Ian Campbell551889a2008-02-09 23:24:09 +0100493 unsigned long pfn, va;
494 pgd_t *pgd;
495 pud_t *pud;
496 pmd_t *pmd;
497 pte_t *pte;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200498
499 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100500 * Remove any mappings which extend past the end of physical
501 * memory from the boot time page table:
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200502 */
Ian Campbell551889a2008-02-09 23:24:09 +0100503 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
504 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
505 pgd = base + pgd_index(va);
506 if (!pgd_present(*pgd))
507 break;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200508
Ian Campbell551889a2008-02-09 23:24:09 +0100509 pud = pud_offset(pgd, va);
510 pmd = pmd_offset(pud, va);
511 if (!pmd_present(*pmd))
512 break;
513
514 pte = pte_offset_kernel(pmd, va);
515 if (!pte_present(*pte))
516 break;
517
518 pte_clear(NULL, va, pte);
519 }
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700520 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200521}
522
523void __init native_pagetable_setup_done(pgd_t *base)
524{
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200525}
526
527/*
528 * Build a proper pagetable for the kernel mappings. Up until this
529 * point, we've been running on some set of pagetables constructed by
530 * the boot process.
531 *
532 * If we're booting on native hardware, this will be a pagetable
Ian Campbell551889a2008-02-09 23:24:09 +0100533 * constructed in arch/x86/kernel/head_32.S. The root of the
534 * pagetable will be swapper_pg_dir.
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200535 *
536 * If we're booting paravirtualized under a hypervisor, then there are
537 * more options: we may already be running PAE, and the pagetable may
538 * or may not be based in swapper_pg_dir. In any case,
539 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
540 * appropriately for the rest of the initialization to work.
541 *
542 * In general, pagetable_init() assumes that the pagetable may already
543 * be partially populated, and so it avoids stomping on any existing
544 * mappings.
545 */
Yinghai Lue7b37892008-06-25 21:51:28 -0700546static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200547{
Ingo Molnar8550eb92008-01-30 13:34:10 +0100548 unsigned long vaddr, end;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 /*
551 * Fixed mappings, only the page table structure has to be
552 * created - mappings will be set by set_fixmap():
553 */
554 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200555 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
556 page_table_range_init(vaddr, end, pgd_base);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100557 early_ioremap_reset();
Yinghai Lue7b37892008-06-25 21:51:28 -0700558}
559
560static void __init pagetable_init(void)
561{
562 pgd_t *pgd_base = swapper_pg_dir;
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 permanent_kmaps_init(pgd_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
Rafael J. Wysockia6eb84b2008-02-01 15:28:16 +0100567#ifdef CONFIG_ACPI_SLEEP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568/*
Rafael J. Wysockia6eb84b2008-02-01 15:28:16 +0100569 * ACPI suspend needs this for resume, because things like the intel-agp
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 * driver might have split up a kernel 4MB mapping.
571 */
Rafael J. Wysockia6eb84b2008-02-01 15:28:16 +0100572char swsusp_pg_dir[PAGE_SIZE]
Ingo Molnar8550eb92008-01-30 13:34:10 +0100573 __attribute__ ((aligned(PAGE_SIZE)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575static inline void save_pg_dir(void)
576{
577 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
578}
Rafael J. Wysockia6eb84b2008-02-01 15:28:16 +0100579#else /* !CONFIG_ACPI_SLEEP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580static inline void save_pg_dir(void)
581{
582}
Rafael J. Wysockia6eb84b2008-02-01 15:28:16 +0100583#endif /* !CONFIG_ACPI_SLEEP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Ingo Molnar8550eb92008-01-30 13:34:10 +0100585void zap_low_mappings(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
587 int i;
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 /*
590 * Zap initial low-memory mappings.
591 *
592 * Note that "pgd_clear()" doesn't do it for
593 * us, because pgd_clear() is a no-op on i386.
594 */
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700595 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596#ifdef CONFIG_X86_PAE
597 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
598#else
599 set_pgd(swapper_pg_dir+i, __pgd(0));
600#endif
Ingo Molnar8550eb92008-01-30 13:34:10 +0100601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 flush_tlb_all();
603}
604
Ingo Molnar8550eb92008-01-30 13:34:10 +0100605int nx_enabled;
Jan Beulichd5321ab2007-07-21 17:10:26 +0200606
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700607pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100608EXPORT_SYMBOL_GPL(__supported_pte_mask);
609
Jan Beulichd5321ab2007-07-21 17:10:26 +0200610#ifdef CONFIG_X86_PAE
611
Ingo Molnar8550eb92008-01-30 13:34:10 +0100612static int disable_nx __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614/*
615 * noexec = on|off
616 *
617 * Control non executable mappings.
618 *
619 * on Enable
620 * off Disable
621 */
Rusty Russell1a3f2392006-09-26 10:52:32 +0200622static int __init noexec_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
Rusty Russell1a3f2392006-09-26 10:52:32 +0200624 if (!str || !strcmp(str, "on")) {
625 if (cpu_has_nx) {
626 __supported_pte_mask |= _PAGE_NX;
627 disable_nx = 0;
628 }
Ingo Molnar8550eb92008-01-30 13:34:10 +0100629 } else {
630 if (!strcmp(str, "off")) {
631 disable_nx = 1;
632 __supported_pte_mask &= ~_PAGE_NX;
633 } else {
634 return -EINVAL;
635 }
636 }
Rusty Russell1a3f2392006-09-26 10:52:32 +0200637
638 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639}
Rusty Russell1a3f2392006-09-26 10:52:32 +0200640early_param("noexec", noexec_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642static void __init set_nx(void)
643{
644 unsigned int v[4], l, h;
645
646 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
647 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if ((v[3] & (1 << 20)) && !disable_nx) {
650 rdmsr(MSR_EFER, l, h);
651 l |= EFER_NX;
652 wrmsr(MSR_EFER, l, h);
653 nx_enabled = 1;
654 __supported_pte_mask |= _PAGE_NX;
655 }
656 }
657}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658#endif
659
Yinghai Lu90d967e2008-06-23 21:00:45 +0200660/* user-defined highmem size */
661static unsigned int highmem_pages = -1;
662
663/*
664 * highmem=size forces highmem to be exactly 'size' bytes.
665 * This works even on boxes that have no highmem otherwise.
666 * This also works to reduce highmem size on bigger boxes.
667 */
668static int __init parse_highmem(char *arg)
669{
670 if (!arg)
671 return -EINVAL;
672
673 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
674 return 0;
675}
676early_param("highmem", parse_highmem);
677
Ingo Molnar47698432009-02-12 13:31:41 +0100678#define MSG_HIGHMEM_TOO_BIG \
679 "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
680
681#define MSG_LOWMEM_TOO_SMALL \
682 "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
683/*
684 * All of RAM fits into lowmem - but if user wants highmem
685 * artificially via the highmem=x boot parameter then create
686 * it:
687 */
688void __init lowmem_pfn_init(void)
689{
690 if (highmem_pages == -1)
691 highmem_pages = 0;
692#ifdef CONFIG_HIGHMEM
693 if (highmem_pages >= max_pfn) {
694 printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
695 pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
696 highmem_pages = 0;
697 }
698 if (highmem_pages) {
699 if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
700 printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
701 pages_to_mb(highmem_pages));
702 highmem_pages = 0;
703 }
704 max_low_pfn -= highmem_pages;
705 }
706#else
707 if (highmem_pages)
708 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
709#endif
710}
711
712#define MSG_HIGHMEM_TOO_SMALL \
713 "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
714
715#define MSG_HIGHMEM_TRIMMED \
716 "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
717/*
718 * We have more RAM than fits into lowmem - we try to put it into
719 * highmem, also taking the highmem=x boot parameter into account:
720 */
721void __init highmem_pfn_init(void)
722{
723 if (highmem_pages == -1)
724 highmem_pages = max_pfn - MAXMEM_PFN;
725
726 if (highmem_pages + MAXMEM_PFN < max_pfn)
727 max_pfn = MAXMEM_PFN + highmem_pages;
728
729 if (highmem_pages + MAXMEM_PFN > max_pfn) {
730 printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
731 pages_to_mb(max_pfn - MAXMEM_PFN),
732 pages_to_mb(highmem_pages));
733 highmem_pages = 0;
734 }
735 max_low_pfn = MAXMEM_PFN;
736#ifndef CONFIG_HIGHMEM
737 /* Maximum memory usable is what is directly addressable */
738 printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
739 if (max_pfn > MAX_NONPAE_PFN)
740 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
741 else
742 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
743 max_pfn = MAXMEM_PFN;
744#else /* !CONFIG_HIGHMEM */
745#ifndef CONFIG_HIGHMEM64G
746 if (max_pfn > MAX_NONPAE_PFN) {
747 max_pfn = MAX_NONPAE_PFN;
748 printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
749 }
750#endif /* !CONFIG_HIGHMEM64G */
751#endif /* !CONFIG_HIGHMEM */
752}
753
Yinghai Lu90d967e2008-06-23 21:00:45 +0200754/*
755 * Determine low and high memory ranges:
756 */
Yinghai Lu2ec65f82008-06-23 03:05:30 -0700757void __init find_low_pfn_range(void)
Yinghai Lu90d967e2008-06-23 21:00:45 +0200758{
Yinghai Lu2ec65f82008-06-23 03:05:30 -0700759 /* it could update max_pfn */
760
Yinghai Lu346cafe2008-06-23 03:06:14 -0700761 /* max_low_pfn is 0, we already have early_res support */
Yinghai Lu90d967e2008-06-23 21:00:45 +0200762 max_low_pfn = max_pfn;
Ingo Molnar47698432009-02-12 13:31:41 +0100763
764 if (max_low_pfn > MAXMEM_PFN)
765 highmem_pfn_init();
766 else
767 lowmem_pfn_init();
Yinghai Lu90d967e2008-06-23 21:00:45 +0200768}
769
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700770#ifndef CONFIG_NEED_MULTIPLE_NODES
Yinghai Lu2ec65f82008-06-23 03:05:30 -0700771void __init initmem_init(unsigned long start_pfn,
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700772 unsigned long end_pfn)
773{
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700774#ifdef CONFIG_HIGHMEM
775 highstart_pfn = highend_pfn = max_pfn;
776 if (max_pfn > max_low_pfn)
777 highstart_pfn = max_low_pfn;
778 memory_present(0, 0, highend_pfn);
Yinghai Lucb95a132008-07-02 00:31:02 -0700779 e820_register_active_regions(0, 0, highend_pfn);
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700780 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
781 pages_to_mb(highend_pfn - highstart_pfn));
782 num_physpages = highend_pfn;
783 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
784#else
785 memory_present(0, 0, max_low_pfn);
Yinghai Lucb95a132008-07-02 00:31:02 -0700786 e820_register_active_regions(0, 0, max_low_pfn);
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700787 num_physpages = max_low_pfn;
788 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
789#endif
790#ifdef CONFIG_FLATMEM
791 max_mapnr = num_physpages;
792#endif
793 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
794 pages_to_mb(max_low_pfn));
795
796 setup_bootmem_allocator();
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700797}
Yinghai Lucb95a132008-07-02 00:31:02 -0700798#endif /* !CONFIG_NEED_MULTIPLE_NODES */
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700799
Yinghai Lucb95a132008-07-02 00:31:02 -0700800static void __init zone_sizes_init(void)
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700801{
802 unsigned long max_zone_pfns[MAX_NR_ZONES];
803 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
804 max_zone_pfns[ZONE_DMA] =
805 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
806 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700807#ifdef CONFIG_HIGHMEM
808 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700809#endif
810
811 free_area_init_nodes(max_zone_pfns);
812}
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700813
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700814void __init setup_bootmem_allocator(void)
815{
816 int i;
817 unsigned long bootmap_size, bootmap;
818 /*
819 * Initialize the boot-time allocator (with low memory only):
820 */
821 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
822 bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
823 max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
824 PAGE_SIZE);
825 if (bootmap == -1L)
826 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
827 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
Yinghai Lu225c37d2008-06-22 02:46:58 -0700828
Yinghai Lu346cafe2008-06-23 03:06:14 -0700829 /* don't touch min_low_pfn */
830 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
831 min_low_pfn, max_low_pfn);
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700832 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
833 max_pfn_mapped<<PAGE_SHIFT);
834 printk(KERN_INFO " low ram: %08lx - %08lx\n",
835 min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
836 printk(KERN_INFO " bootmap %08lx - %08lx\n",
837 bootmap, bootmap + bootmap_size);
838 for_each_online_node(i)
839 free_bootmem_with_active_regions(i, max_low_pfn);
840 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
841
Yinghai Lu4e296842008-06-24 12:18:14 -0700842 after_init_bootmem = 1;
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700843}
844
Suresh Siddha0b8fdcb2008-09-23 14:00:39 -0700845static void __init find_early_table_space(unsigned long end, int use_pse)
Yinghai Lu4e296842008-06-24 12:18:14 -0700846{
Yinghai Lu7482b0e2008-06-28 03:30:39 -0700847 unsigned long puds, pmds, ptes, tables, start;
Yinghai Lu4e296842008-06-24 12:18:14 -0700848
849 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
850 tables = PAGE_ALIGN(puds * sizeof(pud_t));
851
852 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
853 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
854
Suresh Siddha0b8fdcb2008-09-23 14:00:39 -0700855 if (use_pse) {
Yinghai Lu7482b0e2008-06-28 03:30:39 -0700856 unsigned long extra;
Yinghai Lua04ad822008-06-29 00:39:06 -0700857
858 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
859 extra += PMD_SIZE;
Yinghai Lu7482b0e2008-06-28 03:30:39 -0700860 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
861 } else
862 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
863
864 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
Jeremy Fitzhardinge8207c252008-06-24 17:32:48 -0400865
Yinghai Lua04ad822008-06-29 00:39:06 -0700866 /* for fixmap */
Jan Beulicha3c60182009-01-16 11:59:33 +0000867 tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
Yinghai Lua04ad822008-06-29 00:39:06 -0700868
Yinghai Lu4e296842008-06-24 12:18:14 -0700869 /*
870 * RED-PEN putting page tables only on node 0 could
871 * cause a hotspot and fill up ZONE_DMA. The page tables
872 * need roughly 0.5KB per GB.
873 */
874 start = 0x7000;
875 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
876 tables, PAGE_SIZE);
877 if (table_start == -1UL)
878 panic("Cannot find space for the kernel page tables");
879
880 table_start >>= PAGE_SHIFT;
881 table_end = table_start;
882 table_top = table_start + (tables>>PAGE_SHIFT);
883
884 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
885 end, table_start << PAGE_SHIFT,
886 (table_start << PAGE_SHIFT) + tables);
887}
888
889unsigned long __init_refok init_memory_mapping(unsigned long start,
890 unsigned long end)
891{
892 pgd_t *pgd_base = swapper_pg_dir;
Yinghai Lua04ad822008-06-29 00:39:06 -0700893 unsigned long start_pfn, end_pfn;
894 unsigned long big_page_start;
Suresh Siddha0b8fdcb2008-09-23 14:00:39 -0700895#ifdef CONFIG_DEBUG_PAGEALLOC
896 /*
897 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
898 * This will simplify cpa(), which otherwise needs to support splitting
899 * large pages into small in interrupt context, etc.
900 */
901 int use_pse = 0;
902#else
903 int use_pse = cpu_has_pse;
904#endif
Yinghai Lu4e296842008-06-24 12:18:14 -0700905
906 /*
907 * Find space for the kernel direct mapping tables.
908 */
909 if (!after_init_bootmem)
Suresh Siddha0b8fdcb2008-09-23 14:00:39 -0700910 find_early_table_space(end, use_pse);
Yinghai Lu4e296842008-06-24 12:18:14 -0700911
912#ifdef CONFIG_X86_PAE
913 set_nx();
914 if (nx_enabled)
915 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
916#endif
917
918 /* Enable PSE if available */
919 if (cpu_has_pse)
920 set_in_cr4(X86_CR4_PSE);
921
922 /* Enable PGE if available */
923 if (cpu_has_pge) {
924 set_in_cr4(X86_CR4_PGE);
Jeremy Fitzhardingeef5e94a2008-07-01 16:46:36 -0700925 __supported_pte_mask |= _PAGE_GLOBAL;
Yinghai Lu4e296842008-06-24 12:18:14 -0700926 }
927
Yinghai Lua04ad822008-06-29 00:39:06 -0700928 /*
929 * Don't use a large page for the first 2/4MB of memory
930 * because there are often fixed size MTRRs in there
931 * and overlapping MTRRs into large pages can cause
932 * slowdowns.
933 */
934 big_page_start = PMD_SIZE;
935
936 if (start < big_page_start) {
937 start_pfn = start >> PAGE_SHIFT;
938 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
939 } else {
940 /* head is not big page alignment ? */
941 start_pfn = start >> PAGE_SHIFT;
942 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
943 << (PMD_SHIFT - PAGE_SHIFT);
944 }
945 if (start_pfn < end_pfn)
946 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
947
948 /* big page range */
949 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
950 << (PMD_SHIFT - PAGE_SHIFT);
951 if (start_pfn < (big_page_start >> PAGE_SHIFT))
952 start_pfn = big_page_start >> PAGE_SHIFT;
953 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
954 if (start_pfn < end_pfn)
955 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
Suresh Siddha0b8fdcb2008-09-23 14:00:39 -0700956 use_pse);
Yinghai Lua04ad822008-06-29 00:39:06 -0700957
958 /* tail is not big page alignment ? */
959 start_pfn = end_pfn;
960 if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
961 end_pfn = end >> PAGE_SHIFT;
962 if (start_pfn < end_pfn)
963 kernel_physical_mapping_init(pgd_base, start_pfn,
964 end_pfn, 0);
965 }
Yinghai Lu4e296842008-06-24 12:18:14 -0700966
Yinghai Lue7b37892008-06-25 21:51:28 -0700967 early_ioremap_page_table_range_init(pgd_base);
968
Yinghai Lu4e296842008-06-24 12:18:14 -0700969 load_cr3(swapper_pg_dir);
970
971 __flush_tlb_all();
972
973 if (!after_init_bootmem)
974 reserve_early(table_start << PAGE_SHIFT,
975 table_end << PAGE_SHIFT, "PGTABLE");
976
Yinghai Lucaadbdc2008-07-15 00:03:44 -0700977 if (!after_init_bootmem)
978 early_memtest(start, end);
979
Yinghai Lu4e296842008-06-24 12:18:14 -0700980 return end >> PAGE_SHIFT;
981}
982
Yinghai Lue7b37892008-06-25 21:51:28 -0700983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984/*
985 * paging_init() sets up the page tables - note that the first 8MB are
986 * already mapped by head.S.
987 *
988 * This routines also unmaps the page at virtual kernel address 0, so
989 * that we can trap those pesky NULL-reference errors in the kernel.
990 */
991void __init paging_init(void)
992{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 pagetable_init();
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 __flush_tlb_all();
996
997 kmap_init();
Yinghai Lu11cd0bc2008-06-23 19:51:10 -0700998
999 /*
1000 * NOTE: at this point the bootmem allocator is fully available.
1001 */
Yinghai Lu11cd0bc2008-06-23 19:51:10 -07001002 sparse_init();
1003 zone_sizes_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
1005
1006/*
1007 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
Dmitri Vorobievf7f17a62008-04-21 00:47:55 +04001008 * and also on some strange 486's. All 586+'s are OK. This used to involve
1009 * black magic jumps to work around some nasty CPU bugs, but fortunately the
1010 * switch to using exceptions got rid of all that.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012static void __init test_wp_bit(void)
1013{
Ingo Molnard7d119d2008-01-30 13:34:10 +01001014 printk(KERN_INFO
1015 "Checking if this processor honours the WP bit even in supervisor mode...");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 /* Any page-aligned address will do, the test is non-destructive */
1018 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
1019 boot_cpu_data.wp_works_ok = do_test_wp_bit();
1020 clear_fixmap(FIX_WP_TEST);
1021
1022 if (!boot_cpu_data.wp_works_ok) {
Ingo Molnard7d119d2008-01-30 13:34:10 +01001023 printk(KERN_CONT "No.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024#ifdef CONFIG_X86_WP_WORKS_OK
Ingo Molnard7d119d2008-01-30 13:34:10 +01001025 panic(
1026 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027#endif
1028 } else {
Ingo Molnard7d119d2008-01-30 13:34:10 +01001029 printk(KERN_CONT "Ok.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 }
1031}
1032
Ingo Molnar8550eb92008-01-30 13:34:10 +01001033static struct kcore_list kcore_mem, kcore_vmalloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035void __init mem_init(void)
1036{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 int codesize, reservedpages, datasize, initsize;
Yinghai Lucc9f7a02008-06-16 16:11:08 -07001038 int tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -08001040 pci_iommu_alloc();
1041
Andy Whitcroft05b79bd2005-06-23 00:07:57 -07001042#ifdef CONFIG_FLATMEM
Eric Sesterhenn8d8f3cb2006-10-03 23:34:58 +02001043 BUG_ON(!mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 /* this will put all low memory onto the freelists */
1046 totalram_pages += free_all_bootmem();
1047
1048 reservedpages = 0;
1049 for (tmp = 0; tmp < max_low_pfn; tmp++)
1050 /*
Ingo Molnar8550eb92008-01-30 13:34:10 +01001051 * Only count reserved RAM pages:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 */
1053 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
1054 reservedpages++;
1055
Yinghai Lucc9f7a02008-06-16 16:11:08 -07001056 set_highmem_pages_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
1058 codesize = (unsigned long) &_etext - (unsigned long) &_text;
1059 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
1060 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
1061
Ingo Molnar8550eb92008-01-30 13:34:10 +01001062 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
1063 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 VMALLOC_END-VMALLOC_START);
1065
Ingo Molnar8550eb92008-01-30 13:34:10 +01001066 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
1067 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
1069 num_physpages << (PAGE_SHIFT-10),
1070 codesize >> 10,
1071 reservedpages << (PAGE_SHIFT-10),
1072 datasize >> 10,
1073 initsize >> 10,
1074 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
1075 );
1076
Ingo Molnard7d119d2008-01-30 13:34:10 +01001077 printk(KERN_INFO "virtual kernel memory layout:\n"
Ingo Molnar8550eb92008-01-30 13:34:10 +01001078 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001079#ifdef CONFIG_HIGHMEM
Ingo Molnar8550eb92008-01-30 13:34:10 +01001080 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001081#endif
Ingo Molnar8550eb92008-01-30 13:34:10 +01001082 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
1083 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
1084 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
1085 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
1086 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
1087 FIXADDR_START, FIXADDR_TOP,
1088 (FIXADDR_TOP - FIXADDR_START) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001089
1090#ifdef CONFIG_HIGHMEM
Ingo Molnar8550eb92008-01-30 13:34:10 +01001091 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
1092 (LAST_PKMAP*PAGE_SIZE) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001093#endif
1094
Ingo Molnar8550eb92008-01-30 13:34:10 +01001095 VMALLOC_START, VMALLOC_END,
1096 (VMALLOC_END - VMALLOC_START) >> 20,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001097
Ingo Molnar8550eb92008-01-30 13:34:10 +01001098 (unsigned long)__va(0), (unsigned long)high_memory,
1099 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001100
Ingo Molnar8550eb92008-01-30 13:34:10 +01001101 (unsigned long)&__init_begin, (unsigned long)&__init_end,
1102 ((unsigned long)&__init_end -
1103 (unsigned long)&__init_begin) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001104
Ingo Molnar8550eb92008-01-30 13:34:10 +01001105 (unsigned long)&_etext, (unsigned long)&_edata,
1106 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001107
Ingo Molnar8550eb92008-01-30 13:34:10 +01001108 (unsigned long)&_text, (unsigned long)&_etext,
1109 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001110
Jan Beulichbeeb4192008-12-16 11:45:56 +00001111 /*
1112 * Check boundaries twice: Some fundamental inconsistencies can
1113 * be detected at build time already.
1114 */
1115#define __FIXADDR_TOP (-PAGE_SIZE)
1116#ifdef CONFIG_HIGHMEM
1117 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1118 BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
1119#endif
1120#define high_memory (-128UL << 20)
1121 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
1122#undef high_memory
1123#undef __FIXADDR_TOP
1124
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001125#ifdef CONFIG_HIGHMEM
Ingo Molnar8550eb92008-01-30 13:34:10 +01001126 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1127 BUG_ON(VMALLOC_END > PKMAP_BASE);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001128#endif
Jan Beulichbeeb4192008-12-16 11:45:56 +00001129 BUG_ON(VMALLOC_START >= VMALLOC_END);
Ingo Molnar8550eb92008-01-30 13:34:10 +01001130 BUG_ON((unsigned long)high_memory > VMALLOC_START);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -07001131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 if (boot_cpu_data.wp_works_ok < 0)
1133 test_wp_bit();
1134
Hugh Dickins61165d72008-05-13 14:26:57 +01001135 save_pg_dir();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 zap_low_mappings();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137}
1138
KAMEZAWA Hiroyukiad8f5792006-05-20 15:00:03 -07001139#ifdef CONFIG_MEMORY_HOTPLUG
Yasunori Gotobc02af92006-06-27 02:53:30 -07001140int arch_add_memory(int nid, u64 start, u64 size)
Dave Hansen05039b92005-10-29 18:16:57 -07001141{
Yasunori Goto7c7e9422006-12-22 01:11:13 -08001142 struct pglist_data *pgdata = NODE_DATA(nid);
Christoph Lameter776ed982006-09-25 23:31:09 -07001143 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
Dave Hansen05039b92005-10-29 18:16:57 -07001144 unsigned long start_pfn = start >> PAGE_SHIFT;
1145 unsigned long nr_pages = size >> PAGE_SHIFT;
1146
Gary Hadec04fc582009-01-06 14:39:14 -08001147 return __add_pages(nid, zone, start_pfn, nr_pages);
Dave Hansen05039b92005-10-29 18:16:57 -07001148}
Andi Kleen9d99aaa2006-04-07 19:49:15 +02001149#endif
Dave Hansen05039b92005-10-29 18:16:57 -07001150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151/*
1152 * This function cannot be __init, since exceptions don't work in that
1153 * section. Put this after the callers, so that it cannot be inlined.
1154 */
Ingo Molnar8550eb92008-01-30 13:34:10 +01001155static noinline int do_test_wp_bit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156{
1157 char tmp_reg;
1158 int flag;
1159
1160 __asm__ __volatile__(
Ingo Molnar8550eb92008-01-30 13:34:10 +01001161 " movb %0, %1 \n"
1162 "1: movb %1, %0 \n"
1163 " xorl %2, %2 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 "2: \n"
H. Peter Anvinf832ff12008-02-04 16:47:58 +01001165 _ASM_EXTABLE(1b,2b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1167 "=q" (tmp_reg),
1168 "=r" (flag)
1169 :"2" (1)
1170 :"memory");
Ingo Molnar8550eb92008-01-30 13:34:10 +01001171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 return flag;
1173}
1174
Arjan van de Ven63aaf302006-01-06 00:12:02 -08001175#ifdef CONFIG_DEBUG_RODATA
Arjan van de Venedeed302008-01-30 13:34:08 +01001176const int rodata_test_data = 0xC3;
1177EXPORT_SYMBOL_GPL(rodata_test_data);
Arjan van de Ven63aaf302006-01-06 00:12:02 -08001178
Arjan van de Ven63aaf302006-01-06 00:12:02 -08001179void mark_rodata_ro(void)
1180{
Jan Beulich6fb14752007-05-02 19:27:10 +02001181 unsigned long start = PFN_ALIGN(_text);
1182 unsigned long size = PFN_ALIGN(_etext) - start;
Arjan van de Ven63aaf302006-01-06 00:12:02 -08001183
Steven Rostedt8f0f9962008-05-12 21:20:56 +02001184#ifndef CONFIG_DYNAMIC_FTRACE
1185 /* Dynamic tracing modifies the kernel text section */
Mathieu Desnoyers4e4eee02008-02-02 15:42:20 -05001186 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1187 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1188 size >> 10);
Andi Kleen0c42f392008-01-30 13:33:42 +01001189
1190#ifdef CONFIG_CPA_DEBUG
Mathieu Desnoyers4e4eee02008-02-02 15:42:20 -05001191 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1192 start, start+size);
1193 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +01001194
Mathieu Desnoyers4e4eee02008-02-02 15:42:20 -05001195 printk(KERN_INFO "Testing CPA: write protecting again\n");
1196 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
Linus Torvalds602033e2007-07-26 12:07:21 -07001197#endif
Steven Rostedt8f0f9962008-05-12 21:20:56 +02001198#endif /* CONFIG_DYNAMIC_FTRACE */
1199
Jan Beulich6fb14752007-05-02 19:27:10 +02001200 start += size;
1201 size = (unsigned long)__end_rodata - start;
Arjan van de Ven6d238cc2008-01-30 13:34:06 +01001202 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
Ingo Molnard7d119d2008-01-30 13:34:10 +01001203 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1204 size >> 10);
Arjan van de Venedeed302008-01-30 13:34:08 +01001205 rodata_test();
Arjan van de Ven63aaf302006-01-06 00:12:02 -08001206
Andi Kleen0c42f392008-01-30 13:33:42 +01001207#ifdef CONFIG_CPA_DEBUG
Ingo Molnard7d119d2008-01-30 13:34:10 +01001208 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
Arjan van de Ven6d238cc2008-01-30 13:34:06 +01001209 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +01001210
Ingo Molnard7d119d2008-01-30 13:34:10 +01001211 printk(KERN_INFO "Testing CPA: write protecting again\n");
Arjan van de Ven6d238cc2008-01-30 13:34:06 +01001212 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +01001213#endif
Arjan van de Ven63aaf302006-01-06 00:12:02 -08001214}
1215#endif
1216
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001217void free_init_pages(char *what, unsigned long begin, unsigned long end)
1218{
Ingo Molnaree01f112008-01-30 13:34:09 +01001219#ifdef CONFIG_DEBUG_PAGEALLOC
1220 /*
1221 * If debugging page accesses then do not free this memory but
1222 * mark them not present - any buggy init-section access will
1223 * create a kernel page fault:
1224 */
1225 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1226 begin, PAGE_ALIGN(end));
1227 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1228#else
Ingo Molnar86f03982008-01-30 13:34:09 +01001229 unsigned long addr;
1230
Arjan van de Ven3c1df682008-01-30 13:34:07 +01001231 /*
1232 * We just marked the kernel text read only above, now that
1233 * we are going to free part of that, we need to make that
1234 * writeable first.
1235 */
1236 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1237
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001238 for (addr = begin; addr < end; addr += PAGE_SIZE) {
Linus Torvaldse3ebadd2007-05-07 08:44:24 -07001239 ClearPageReserved(virt_to_page(addr));
1240 init_page_count(virt_to_page(addr));
1241 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1242 free_page(addr);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001243 totalram_pages++;
1244 }
Jan Beulich6fb14752007-05-02 19:27:10 +02001245 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
Ingo Molnaree01f112008-01-30 13:34:09 +01001246#endif
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001247}
1248
1249void free_initmem(void)
1250{
1251 free_init_pages("unused kernel memory",
Linus Torvaldse3ebadd2007-05-07 08:44:24 -07001252 (unsigned long)(&__init_begin),
1253 (unsigned long)(&__init_end));
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001254}
Arjan van de Ven63aaf302006-01-06 00:12:02 -08001255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256#ifdef CONFIG_BLK_DEV_INITRD
1257void free_initrd_mem(unsigned long start, unsigned long end)
1258{
Linus Torvaldse3ebadd2007-05-07 08:44:24 -07001259 free_init_pages("initrd memory", start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260}
1261#endif
Yinghai Lud2dbf342008-06-13 02:00:56 -07001262
1263int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1264 int flags)
1265{
1266 return reserve_bootmem(phys, len, flags);
1267}