blob: 40ed13d263cd5ccccb6a210cee4b0005927822fc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010025#include <linux/pci.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010026#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010027#include <linux/module.h>
28#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010043#include <asm/sections.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010044#include <asm/dma-mapping.h>
45#include <asm/swiotlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#ifndef Dprintk
48#define Dprintk(x...)
49#endif
50
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010051struct dma_mapping_ops* dma_ops;
52EXPORT_SYMBOL(dma_ops);
53
Andi Kleene18c6872005-11-05 17:25:53 +010054static unsigned long dma_reserve __initdata;
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57
58/*
59 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
60 * physical space so we can cache the place of the first one and move
61 * around without checking the pgd every time.
62 */
63
64void show_mem(void)
65{
Andi Kleene92343c2005-09-12 18:49:24 +020066 long i, total = 0, reserved = 0;
67 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 pg_data_t *pgdat;
69 struct page *page;
70
Andi Kleene92343c2005-09-12 18:49:24 +020071 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020073 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 for_each_pgdat(pgdat) {
76 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
77 page = pfn_to_page(pgdat->node_start_pfn + i);
78 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020079 if (PageReserved(page))
80 reserved++;
81 else if (PageSwapCache(page))
82 cached++;
83 else if (page_count(page))
84 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 }
86 }
Andi Kleene92343c2005-09-12 18:49:24 +020087 printk(KERN_INFO "%lu pages of RAM\n", total);
88 printk(KERN_INFO "%lu reserved pages\n",reserved);
89 printk(KERN_INFO "%lu pages shared\n",shared);
90 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
93/* References to section boundaries */
94
Linus Torvalds1da177e2005-04-16 15:20:36 -070095int after_bootmem;
96
97static void *spp_getpage(void)
98{
99 void *ptr;
100 if (after_bootmem)
101 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
102 else
103 ptr = alloc_bootmem_pages(PAGE_SIZE);
104 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
105 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
106
107 Dprintk("spp_getpage %p\n", ptr);
108 return ptr;
109}
110
111static void set_pte_phys(unsigned long vaddr,
112 unsigned long phys, pgprot_t prot)
113{
114 pgd_t *pgd;
115 pud_t *pud;
116 pmd_t *pmd;
117 pte_t *pte, new_pte;
118
119 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
120
121 pgd = pgd_offset_k(vaddr);
122 if (pgd_none(*pgd)) {
123 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
124 return;
125 }
126 pud = pud_offset(pgd, vaddr);
127 if (pud_none(*pud)) {
128 pmd = (pmd_t *) spp_getpage();
129 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
130 if (pmd != pmd_offset(pud, 0)) {
131 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
132 return;
133 }
134 }
135 pmd = pmd_offset(pud, vaddr);
136 if (pmd_none(*pmd)) {
137 pte = (pte_t *) spp_getpage();
138 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
139 if (pte != pte_offset_kernel(pmd, 0)) {
140 printk("PAGETABLE BUG #02!\n");
141 return;
142 }
143 }
144 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
145
146 pte = pte_offset_kernel(pmd, vaddr);
147 if (!pte_none(*pte) &&
148 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
149 pte_ERROR(*pte);
150 set_pte(pte, new_pte);
151
152 /*
153 * It's enough to flush this one mapping.
154 * (PGE mappings get flushed as well)
155 */
156 __flush_tlb_one(vaddr);
157}
158
159/* NOTE: this is meant to be run only at boot */
160void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
161{
162 unsigned long address = __fix_to_virt(idx);
163
164 if (idx >= __end_of_fixed_addresses) {
165 printk("Invalid __set_fixmap\n");
166 return;
167 }
168 set_pte_phys(address, phys, prot);
169}
170
171unsigned long __initdata table_start, table_end;
172
173extern pmd_t temp_boot_pmds[];
174
175static struct temp_map {
176 pmd_t *pmd;
177 void *address;
178 int allocated;
179} temp_mappings[] __initdata = {
180 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
181 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 {}
183};
184
Matt Tolentino44df75e2006-01-17 07:03:41 +0100185static __meminit void *alloc_low_page(int *index, unsigned long *phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct temp_map *ti;
188 int i;
189 unsigned long pfn = table_end++, paddr;
190 void *adr;
191
Matt Tolentino44df75e2006-01-17 07:03:41 +0100192 if (after_bootmem) {
193 adr = (void *)get_zeroed_page(GFP_ATOMIC);
194 *phys = __pa(adr);
195 return adr;
196 }
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 if (pfn >= end_pfn)
199 panic("alloc_low_page: ran out of memory");
200 for (i = 0; temp_mappings[i].allocated; i++) {
201 if (!temp_mappings[i].pmd)
202 panic("alloc_low_page: ran out of temp mappings");
203 }
204 ti = &temp_mappings[i];
205 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
206 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
207 ti->allocated = 1;
208 __flush_tlb();
209 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100210 memset(adr, 0, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 *index = i;
212 *phys = pfn * PAGE_SIZE;
213 return adr;
214}
215
Matt Tolentino44df75e2006-01-17 07:03:41 +0100216static __meminit void unmap_low_page(int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100218 struct temp_map *ti;
219
220 if (after_bootmem)
221 return;
222
223 ti = &temp_mappings[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 set_pmd(ti->pmd, __pmd(0));
225 ti->allocated = 0;
226}
227
Matt Tolentino44df75e2006-01-17 07:03:41 +0100228static void __meminit
229phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
230{
231 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Matt Tolentino44df75e2006-01-17 07:03:41 +0100233 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
234 unsigned long entry;
235
236 if (address > end) {
237 for (; i < PTRS_PER_PMD; i++, pmd++)
238 set_pmd(pmd, __pmd(0));
239 break;
240 }
241 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
242 entry &= __supported_pte_mask;
243 set_pmd(pmd, __pmd(entry));
244 }
245}
246
247static void __meminit
248phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
249{
250 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
251
252 if (pmd_none(*pmd)) {
253 spin_lock(&init_mm.page_table_lock);
254 phys_pmd_init(pmd, address, end);
255 spin_unlock(&init_mm.page_table_lock);
256 __flush_tlb_all();
257 }
258}
259
260static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
261{
262 long i = pud_index(address);
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 pud = pud + i;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100265
266 if (after_bootmem && pud_val(*pud)) {
267 phys_pmd_update(pud, address, end);
268 return;
269 }
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 for (; i < PTRS_PER_PUD; pud++, i++) {
272 int map;
273 unsigned long paddr, pmd_phys;
274 pmd_t *pmd;
275
Matt Tolentino44df75e2006-01-17 07:03:41 +0100276 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
277 if (paddr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Matt Tolentino44df75e2006-01-17 07:03:41 +0100280 if (!after_bootmem && !e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 set_pud(pud, __pud(0));
282 continue;
283 }
284
285 pmd = alloc_low_page(&map, &pmd_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100286 spin_lock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100288 phys_pmd_init(pmd, paddr, end);
289 spin_unlock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 unmap_low_page(map);
291 }
292 __flush_tlb();
293}
294
295static void __init find_early_table_space(unsigned long end)
296{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100297 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
300 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
301 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
302 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
303
Andi Kleenee408c72006-01-16 01:56:51 +0100304 /* RED-PEN putting page tables only on node 0 could
305 cause a hotspot and fill up ZONE_DMA. The page tables
306 need roughly 0.5KB per GB. */
307 start = 0x8000;
308 table_start = find_e820_area(start, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 if (table_start == -1UL)
310 panic("Cannot find space for the kernel page tables");
311
312 table_start >>= PAGE_SHIFT;
313 table_end = table_start;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100314
315 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
316 end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
320 This runs before bootmem is initialized and gets pages directly from the
321 physical memory. To access them they are temporarily mapped. */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100322void __meminit init_memory_mapping(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
324 unsigned long next;
325
326 Dprintk("init_memory_mapping\n");
327
328 /*
329 * Find space for the kernel direct mapping tables.
330 * Later we should allocate these tables in the local node of the memory
331 * mapped. Unfortunately this is done currently before the nodes are
332 * discovered.
333 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100334 if (!after_bootmem)
335 find_early_table_space(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 start = (unsigned long)__va(start);
338 end = (unsigned long)__va(end);
339
340 for (; start < end; start = next) {
341 int map;
342 unsigned long pud_phys;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100343 pgd_t *pgd = pgd_offset_k(start);
344 pud_t *pud;
345
346 if (after_bootmem)
347 pud = pud_offset_k(pgd, __PAGE_OFFSET);
348 else
349 pud = alloc_low_page(&map, &pud_phys);
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 next = start + PGDIR_SIZE;
352 if (next > end)
353 next = end;
354 phys_pud_init(pud, __pa(start), __pa(next));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100355 if (!after_bootmem)
356 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 unmap_low_page(map);
358 }
359
Matt Tolentino44df75e2006-01-17 07:03:41 +0100360 if (!after_bootmem)
361 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
364
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100365void __cpuinit zap_low_mappings(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100367 if (cpu == 0) {
368 pgd_t *pgd = pgd_offset_k(0UL);
369 pgd_clear(pgd);
370 } else {
371 /*
372 * For AP's, zap the low identity mappings by changing the cr3
373 * to init_level4_pgt and doing local flush tlb all
374 */
375 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
376 }
377 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Andi Kleena2f1b422005-11-05 17:25:53 +0100380/* Compute zone sizes for the DMA and DMA32 zones in a node. */
381__init void
382size_zones(unsigned long *z, unsigned long *h,
383 unsigned long start_pfn, unsigned long end_pfn)
384{
385 int i;
386 unsigned long w;
387
388 for (i = 0; i < MAX_NR_ZONES; i++)
389 z[i] = 0;
390
391 if (start_pfn < MAX_DMA_PFN)
392 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
393 if (start_pfn < MAX_DMA32_PFN) {
394 unsigned long dma32_pfn = MAX_DMA32_PFN;
395 if (dma32_pfn > end_pfn)
396 dma32_pfn = end_pfn;
397 z[ZONE_DMA32] = dma32_pfn - start_pfn;
398 }
399 z[ZONE_NORMAL] = end_pfn - start_pfn;
400
401 /* Remove lower zones from higher ones. */
402 w = 0;
403 for (i = 0; i < MAX_NR_ZONES; i++) {
404 if (z[i])
405 z[i] -= w;
406 w += z[i];
407 }
408
409 /* Compute holes */
Ravikiran G Thirumalai576fc092005-12-29 13:06:11 +0100410 w = start_pfn;
Andi Kleena2f1b422005-11-05 17:25:53 +0100411 for (i = 0; i < MAX_NR_ZONES; i++) {
412 unsigned long s = w;
413 w += z[i];
414 h[i] = e820_hole_size(s, w);
415 }
Andi Kleene18c6872005-11-05 17:25:53 +0100416
417 /* Add the space pace needed for mem_map to the holes too. */
418 for (i = 0; i < MAX_NR_ZONES; i++)
419 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
420
421 /* The 16MB DMA zone has the kernel and other misc mappings.
422 Account them too */
423 if (h[ZONE_DMA]) {
424 h[ZONE_DMA] += dma_reserve;
425 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
426 printk(KERN_WARNING
427 "Kernel too large and filling up ZONE_DMA?\n");
428 h[ZONE_DMA] = z[ZONE_DMA];
429 }
430 }
Andi Kleena2f1b422005-11-05 17:25:53 +0100431}
432
Matt Tolentino2b976902005-06-23 00:08:06 -0700433#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434void __init paging_init(void)
435{
Andi Kleena2f1b422005-11-05 17:25:53 +0100436 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
Matt Tolentino44df75e2006-01-17 07:03:41 +0100437
438 memory_present(0, 0, end_pfn);
439 sparse_init();
Andi Kleena2f1b422005-11-05 17:25:53 +0100440 size_zones(zones, holes, 0, end_pfn);
441 free_area_init_node(0, NODE_DATA(0), zones,
442 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443}
444#endif
445
446/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
447 from the CPU leading to inconsistent cache lines. address and size
448 must be aligned to 2MB boundaries.
449 Does nothing when the mapping doesn't exist. */
450void __init clear_kernel_mapping(unsigned long address, unsigned long size)
451{
452 unsigned long end = address + size;
453
454 BUG_ON(address & ~LARGE_PAGE_MASK);
455 BUG_ON(size & ~LARGE_PAGE_MASK);
456
457 for (; address < end; address += LARGE_PAGE_SIZE) {
458 pgd_t *pgd = pgd_offset_k(address);
459 pud_t *pud;
460 pmd_t *pmd;
461 if (pgd_none(*pgd))
462 continue;
463 pud = pud_offset(pgd, address);
464 if (pud_none(*pud))
465 continue;
466 pmd = pmd_offset(pud, address);
467 if (!pmd || pmd_none(*pmd))
468 continue;
469 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
470 /* Could handle this, but it should not happen currently. */
471 printk(KERN_ERR
472 "clear_kernel_mapping: mapping has been split. will leak memory\n");
473 pmd_ERROR(*pmd);
474 }
475 set_pmd(pmd, __pmd(0));
476 }
477 __flush_tlb_all();
478}
479
Matt Tolentino44df75e2006-01-17 07:03:41 +0100480/*
481 * Memory hotplug specific functions
482 * These are only for non-NUMA machines right now.
483 */
484#ifdef CONFIG_MEMORY_HOTPLUG
485
486void online_page(struct page *page)
487{
488 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800489 init_page_count(page);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100490 __free_page(page);
491 totalram_pages++;
492 num_physpages++;
493}
494
495int add_memory(u64 start, u64 size)
496{
497 struct pglist_data *pgdat = NODE_DATA(0);
498 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
499 unsigned long start_pfn = start >> PAGE_SHIFT;
500 unsigned long nr_pages = size >> PAGE_SHIFT;
501 int ret;
502
503 ret = __add_pages(zone, start_pfn, nr_pages);
504 if (ret)
505 goto error;
506
507 init_memory_mapping(start, (start + size -1));
508
509 return ret;
510error:
511 printk("%s: Problem encountered in __add_pages!\n", __func__);
512 return ret;
513}
514EXPORT_SYMBOL_GPL(add_memory);
515
516int remove_memory(u64 start, u64 size)
517{
518 return -EINVAL;
519}
520EXPORT_SYMBOL_GPL(remove_memory);
521
522#endif
523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
525 kcore_vsyscall;
526
527void __init mem_init(void)
528{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200529 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531#ifdef CONFIG_SWIOTLB
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100532 pci_swiotlb_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533#endif
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100534 no_iommu_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536 /* How many end-of-memory variables you have, grandma! */
537 max_low_pfn = end_pfn;
538 max_pfn = end_pfn;
539 num_physpages = end_pfn;
540 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
541
542 /* clear the zero-page */
543 memset(empty_zero_page, 0, PAGE_SIZE);
544
545 reservedpages = 0;
546
547 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700548#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200549 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200551 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552#endif
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200553 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 after_bootmem = 1;
556
557 codesize = (unsigned long) &_etext - (unsigned long) &_text;
558 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
559 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
560
561 /* Register memory areas for /proc/kcore */
562 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
563 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
564 VMALLOC_END-VMALLOC_START);
565 kclist_add(&kcore_kernel, &_stext, _end - _stext);
566 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
567 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
568 VSYSCALL_END - VSYSCALL_START);
569
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200570 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
572 end_pfn << (PAGE_SHIFT-10),
573 codesize >> 10,
574 reservedpages << (PAGE_SHIFT-10),
575 datasize >> 10,
576 initsize >> 10);
577
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100578#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 /*
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100580 * Sync boot_level4_pgt mappings with the init_level4_pgt
581 * except for the low identity mappings which are already zapped
582 * in init_level4_pgt. This sync-up is essential for AP's bringup
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 */
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100584 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585#endif
586}
587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588void free_initmem(void)
589{
590 unsigned long addr;
591
592 addr = (unsigned long)(&__init_begin);
593 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
594 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800595 init_page_count(virt_to_page(addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
597 free_page(addr);
598 totalram_pages++;
599 }
600 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
Andi Kleen2bc04142005-11-05 17:25:53 +0100601 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602}
603
Arjan van de Ven67df1972006-01-06 00:12:04 -0800604#ifdef CONFIG_DEBUG_RODATA
605
606extern char __start_rodata, __end_rodata;
607void mark_rodata_ro(void)
608{
609 unsigned long addr = (unsigned long)&__start_rodata;
610
611 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
612 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
613
614 printk ("Write protecting the kernel read-only data: %luk\n",
615 (&__end_rodata - &__start_rodata) >> 10);
616
617 /*
618 * change_page_attr_addr() requires a global_flush_tlb() call after it.
619 * We do this after the printk so that if something went wrong in the
620 * change, the printk gets out at least to give a better debug hint
621 * of who is the culprit.
622 */
623 global_flush_tlb();
624}
625#endif
626
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627#ifdef CONFIG_BLK_DEV_INITRD
628void free_initrd_mem(unsigned long start, unsigned long end)
629{
Linus Torvaldsf74e6672006-01-16 11:33:09 -0800630 if (start >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 return;
632 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
633 for (; start < end; start += PAGE_SIZE) {
634 ClearPageReserved(virt_to_page(start));
Nick Piggin7835e982006-03-22 00:08:40 -0800635 init_page_count(virt_to_page(start));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 free_page(start);
637 totalram_pages++;
638 }
639}
640#endif
641
642void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
643{
644 /* Should check here against the e820 map to avoid double free */
Matt Tolentino2b976902005-06-23 00:08:06 -0700645#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 int nid = phys_to_nid(phys);
647 reserve_bootmem_node(NODE_DATA(nid), phys, len);
648#else
649 reserve_bootmem(phys, len);
650#endif
Andi Kleene18c6872005-11-05 17:25:53 +0100651 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
652 dma_reserve += len / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
654
655int kern_addr_valid(unsigned long addr)
656{
657 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
658 pgd_t *pgd;
659 pud_t *pud;
660 pmd_t *pmd;
661 pte_t *pte;
662
663 if (above != 0 && above != -1UL)
664 return 0;
665
666 pgd = pgd_offset_k(addr);
667 if (pgd_none(*pgd))
668 return 0;
669
670 pud = pud_offset(pgd, addr);
671 if (pud_none(*pud))
672 return 0;
673
674 pmd = pmd_offset(pud, addr);
675 if (pmd_none(*pmd))
676 return 0;
677 if (pmd_large(*pmd))
678 return pfn_valid(pmd_pfn(*pmd));
679
680 pte = pte_offset_kernel(pmd, addr);
681 if (pte_none(*pte))
682 return 0;
683 return pfn_valid(pte_pfn(*pte));
684}
685
686#ifdef CONFIG_SYSCTL
687#include <linux/sysctl.h>
688
689extern int exception_trace, page_fault_trace;
690
691static ctl_table debug_table2[] = {
692 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
693 proc_dointvec },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 { 0, }
695};
696
697static ctl_table debug_root_table2[] = {
698 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
699 .child = debug_table2 },
700 { 0 },
701};
702
703static __init int x8664_sysctl_init(void)
704{
705 register_sysctl_table(debug_root_table2, 1);
706 return 0;
707}
708__initcall(x8664_sysctl_init);
709#endif
710
Andi Kleen1e014412005-04-16 15:24:55 -0700711/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
712 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
713 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
715static struct vm_area_struct gate_vma = {
716 .vm_start = VSYSCALL_START,
717 .vm_end = VSYSCALL_END,
718 .vm_page_prot = PAGE_READONLY
719};
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
722{
723#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700724 if (test_tsk_thread_flag(tsk, TIF_IA32))
725 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726#endif
727 return &gate_vma;
728}
729
730int in_gate_area(struct task_struct *task, unsigned long addr)
731{
732 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700733 if (!vma)
734 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return (addr >= vma->vm_start) && (addr < vma->vm_end);
736}
737
738/* Use this when you have no reliable task/vma, typically from interrupt
739 * context. It is less reliable than using the task's vma and may give
740 * false positives.
741 */
742int in_gate_area_no_task(unsigned long addr)
743{
Andi Kleen1e014412005-04-16 15:24:55 -0700744 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}