blob: 95bd232ff0cf3d37fc3b5838e0b477666115aebb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010025#include <linux/pci.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070026#include <linux/poison.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010027#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010028#include <linux/module.h>
29#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/processor.h>
32#include <asm/system.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#include <asm/dma.h>
37#include <asm/fixmap.h>
38#include <asm/e820.h>
39#include <asm/apic.h>
40#include <asm/tlb.h>
41#include <asm/mmu_context.h>
42#include <asm/proto.h>
43#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010044#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#ifndef Dprintk
47#define Dprintk(x...)
48#endif
49
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010050struct dma_mapping_ops* dma_ops;
51EXPORT_SYMBOL(dma_ops);
52
Andi Kleene18c6872005-11-05 17:25:53 +010053static unsigned long dma_reserve __initdata;
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
56
57/*
58 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
59 * physical space so we can cache the place of the first one and move
60 * around without checking the pgd every time.
61 */
62
63void show_mem(void)
64{
Andi Kleene92343c2005-09-12 18:49:24 +020065 long i, total = 0, reserved = 0;
66 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 pg_data_t *pgdat;
68 struct page *page;
69
Andi Kleene92343c2005-09-12 18:49:24 +020070 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020072 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080074 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
76 page = pfn_to_page(pgdat->node_start_pfn + i);
77 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020078 if (PageReserved(page))
79 reserved++;
80 else if (PageSwapCache(page))
81 cached++;
82 else if (page_count(page))
83 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 }
85 }
Andi Kleene92343c2005-09-12 18:49:24 +020086 printk(KERN_INFO "%lu pages of RAM\n", total);
87 printk(KERN_INFO "%lu reserved pages\n",reserved);
88 printk(KERN_INFO "%lu pages shared\n",shared);
89 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092int after_bootmem;
93
Andi Kleen5f44a662006-03-25 16:30:25 +010094static __init void *spp_getpage(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 void *ptr;
97 if (after_bootmem)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 else
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103
104 Dprintk("spp_getpage %p\n", ptr);
105 return ptr;
106}
107
Andi Kleen5f44a662006-03-25 16:30:25 +0100108static __init void set_pte_phys(unsigned long vaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 unsigned long phys, pgprot_t prot)
110{
111 pgd_t *pgd;
112 pud_t *pud;
113 pmd_t *pmd;
114 pte_t *pte, new_pte;
115
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
121 return;
122 }
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
129 return;
130 }
131 }
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
138 return;
139 }
140 }
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 pte_ERROR(*pte);
147 set_pte(pte, new_pte);
148
149 /*
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
152 */
153 __flush_tlb_one(vaddr);
154}
155
156/* NOTE: this is meant to be run only at boot */
Andi Kleen5f44a662006-03-25 16:30:25 +0100157void __init
158__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
160 unsigned long address = __fix_to_virt(idx);
161
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
164 return;
165 }
166 set_pte_phys(address, phys, prot);
167}
168
169unsigned long __initdata table_start, table_end;
170
171extern pmd_t temp_boot_pmds[];
172
173static struct temp_map {
174 pmd_t *pmd;
175 void *address;
176 int allocated;
177} temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
180 {}
181};
182
Matt Tolentino44df75e2006-01-17 07:03:41 +0100183static __meminit void *alloc_low_page(int *index, unsigned long *phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
185 struct temp_map *ti;
186 int i;
187 unsigned long pfn = table_end++, paddr;
188 void *adr;
189
Matt Tolentino44df75e2006-01-17 07:03:41 +0100190 if (after_bootmem) {
191 adr = (void *)get_zeroed_page(GFP_ATOMIC);
192 *phys = __pa(adr);
193 return adr;
194 }
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if (pfn >= end_pfn)
197 panic("alloc_low_page: ran out of memory");
198 for (i = 0; temp_mappings[i].allocated; i++) {
199 if (!temp_mappings[i].pmd)
200 panic("alloc_low_page: ran out of temp mappings");
201 }
202 ti = &temp_mappings[i];
203 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
204 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
205 ti->allocated = 1;
206 __flush_tlb();
207 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100208 memset(adr, 0, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 *index = i;
210 *phys = pfn * PAGE_SIZE;
211 return adr;
212}
213
Matt Tolentino44df75e2006-01-17 07:03:41 +0100214static __meminit void unmap_low_page(int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100216 struct temp_map *ti;
217
218 if (after_bootmem)
219 return;
220
221 ti = &temp_mappings[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 set_pmd(ti->pmd, __pmd(0));
223 ti->allocated = 0;
224}
225
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100226/* Must run before zap_low_mappings */
227__init void *early_ioremap(unsigned long addr, unsigned long size)
228{
229 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
230
231 /* actually usually some more */
232 if (size >= LARGE_PAGE_SIZE) {
233 printk("SMBIOS area too long %lu\n", size);
234 return NULL;
235 }
236 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
237 map += LARGE_PAGE_SIZE;
238 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
239 __flush_tlb();
240 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
241}
242
243/* To avoid virtual aliases later */
244__init void early_iounmap(void *addr, unsigned long size)
245{
246 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
247 printk("early_iounmap: bad address %p\n", addr);
248 set_pmd(temp_mappings[0].pmd, __pmd(0));
249 set_pmd(temp_mappings[1].pmd, __pmd(0));
250 __flush_tlb();
251}
252
Matt Tolentino44df75e2006-01-17 07:03:41 +0100253static void __meminit
254phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
255{
256 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Matt Tolentino44df75e2006-01-17 07:03:41 +0100258 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
259 unsigned long entry;
260
Jan Beulich5f51e132006-06-26 13:59:02 +0200261 if (address >= end) {
262 if (!after_bootmem)
263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100265 break;
266 }
267 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
268 entry &= __supported_pte_mask;
269 set_pmd(pmd, __pmd(entry));
270 }
271}
272
273static void __meminit
274phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
275{
276 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
277
278 if (pmd_none(*pmd)) {
279 spin_lock(&init_mm.page_table_lock);
280 phys_pmd_init(pmd, address, end);
281 spin_unlock(&init_mm.page_table_lock);
282 __flush_tlb_all();
283 }
284}
285
286static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
287{
288 long i = pud_index(address);
289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 pud = pud + i;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100291
292 if (after_bootmem && pud_val(*pud)) {
293 phys_pmd_update(pud, address, end);
294 return;
295 }
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 for (; i < PTRS_PER_PUD; pud++, i++) {
298 int map;
299 unsigned long paddr, pmd_phys;
300 pmd_t *pmd;
301
Matt Tolentino44df75e2006-01-17 07:03:41 +0100302 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
303 if (paddr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Arjan van de Veneee5a9f2006-04-07 19:49:24 +0200306 if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 set_pud(pud, __pud(0));
308 continue;
309 }
310
311 pmd = alloc_low_page(&map, &pmd_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100312 spin_lock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100314 phys_pmd_init(pmd, paddr, end);
315 spin_unlock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 unmap_low_page(map);
317 }
318 __flush_tlb();
319}
320
321static void __init find_early_table_space(unsigned long end)
322{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100323 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
326 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
327 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
328 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
329
Andi Kleenee408c72006-01-16 01:56:51 +0100330 /* RED-PEN putting page tables only on node 0 could
331 cause a hotspot and fill up ZONE_DMA. The page tables
332 need roughly 0.5KB per GB. */
333 start = 0x8000;
334 table_start = find_e820_area(start, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (table_start == -1UL)
336 panic("Cannot find space for the kernel page tables");
337
338 table_start >>= PAGE_SHIFT;
339 table_end = table_start;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100340
341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
Jan Beulich5f51e132006-06-26 13:59:02 +0200342 end, table_start << PAGE_SHIFT,
343 (table_start << PAGE_SHIFT) + tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
346/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
347 This runs before bootmem is initialized and gets pages directly from the
348 physical memory. To access them they are temporarily mapped. */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100349void __meminit init_memory_mapping(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 unsigned long next;
352
353 Dprintk("init_memory_mapping\n");
354
355 /*
356 * Find space for the kernel direct mapping tables.
357 * Later we should allocate these tables in the local node of the memory
358 * mapped. Unfortunately this is done currently before the nodes are
359 * discovered.
360 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100361 if (!after_bootmem)
362 find_early_table_space(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 start = (unsigned long)__va(start);
365 end = (unsigned long)__va(end);
366
367 for (; start < end; start = next) {
368 int map;
369 unsigned long pud_phys;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100370 pgd_t *pgd = pgd_offset_k(start);
371 pud_t *pud;
372
373 if (after_bootmem)
Andi Kleend2ae5b52006-06-26 13:57:56 +0200374 pud = pud_offset(pgd, start & PGDIR_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100375 else
376 pud = alloc_low_page(&map, &pud_phys);
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 next = start + PGDIR_SIZE;
379 if (next > end)
380 next = end;
381 phys_pud_init(pud, __pa(start), __pa(next));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100382 if (!after_bootmem)
383 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 unmap_low_page(map);
385 }
386
Matt Tolentino44df75e2006-01-17 07:03:41 +0100387 if (!after_bootmem)
388 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
391
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100392void __cpuinit zap_low_mappings(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100394 if (cpu == 0) {
395 pgd_t *pgd = pgd_offset_k(0UL);
396 pgd_clear(pgd);
397 } else {
398 /*
399 * For AP's, zap the low identity mappings by changing the cr3
400 * to init_level4_pgt and doing local flush tlb all
401 */
402 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
403 }
404 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Andi Kleena2f1b422005-11-05 17:25:53 +0100407/* Compute zone sizes for the DMA and DMA32 zones in a node. */
408__init void
409size_zones(unsigned long *z, unsigned long *h,
410 unsigned long start_pfn, unsigned long end_pfn)
411{
412 int i;
413 unsigned long w;
414
415 for (i = 0; i < MAX_NR_ZONES; i++)
416 z[i] = 0;
417
418 if (start_pfn < MAX_DMA_PFN)
419 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
420 if (start_pfn < MAX_DMA32_PFN) {
421 unsigned long dma32_pfn = MAX_DMA32_PFN;
422 if (dma32_pfn > end_pfn)
423 dma32_pfn = end_pfn;
424 z[ZONE_DMA32] = dma32_pfn - start_pfn;
425 }
426 z[ZONE_NORMAL] = end_pfn - start_pfn;
427
428 /* Remove lower zones from higher ones. */
429 w = 0;
430 for (i = 0; i < MAX_NR_ZONES; i++) {
431 if (z[i])
432 z[i] -= w;
433 w += z[i];
434 }
435
436 /* Compute holes */
Ravikiran G Thirumalai576fc092005-12-29 13:06:11 +0100437 w = start_pfn;
Andi Kleena2f1b422005-11-05 17:25:53 +0100438 for (i = 0; i < MAX_NR_ZONES; i++) {
439 unsigned long s = w;
440 w += z[i];
441 h[i] = e820_hole_size(s, w);
442 }
Andi Kleene18c6872005-11-05 17:25:53 +0100443
444 /* Add the space pace needed for mem_map to the holes too. */
445 for (i = 0; i < MAX_NR_ZONES; i++)
446 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
447
448 /* The 16MB DMA zone has the kernel and other misc mappings.
449 Account them too */
450 if (h[ZONE_DMA]) {
451 h[ZONE_DMA] += dma_reserve;
452 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
453 printk(KERN_WARNING
454 "Kernel too large and filling up ZONE_DMA?\n");
455 h[ZONE_DMA] = z[ZONE_DMA];
456 }
457 }
Andi Kleena2f1b422005-11-05 17:25:53 +0100458}
459
Matt Tolentino2b976902005-06-23 00:08:06 -0700460#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461void __init paging_init(void)
462{
Andi Kleena2f1b422005-11-05 17:25:53 +0100463 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
Matt Tolentino44df75e2006-01-17 07:03:41 +0100464
465 memory_present(0, 0, end_pfn);
466 sparse_init();
Andi Kleena2f1b422005-11-05 17:25:53 +0100467 size_zones(zones, holes, 0, end_pfn);
468 free_area_init_node(0, NODE_DATA(0), zones,
469 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470}
471#endif
472
473/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
474 from the CPU leading to inconsistent cache lines. address and size
475 must be aligned to 2MB boundaries.
476 Does nothing when the mapping doesn't exist. */
477void __init clear_kernel_mapping(unsigned long address, unsigned long size)
478{
479 unsigned long end = address + size;
480
481 BUG_ON(address & ~LARGE_PAGE_MASK);
482 BUG_ON(size & ~LARGE_PAGE_MASK);
483
484 for (; address < end; address += LARGE_PAGE_SIZE) {
485 pgd_t *pgd = pgd_offset_k(address);
486 pud_t *pud;
487 pmd_t *pmd;
488 if (pgd_none(*pgd))
489 continue;
490 pud = pud_offset(pgd, address);
491 if (pud_none(*pud))
492 continue;
493 pmd = pmd_offset(pud, address);
494 if (!pmd || pmd_none(*pmd))
495 continue;
496 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
497 /* Could handle this, but it should not happen currently. */
498 printk(KERN_ERR
499 "clear_kernel_mapping: mapping has been split. will leak memory\n");
500 pmd_ERROR(*pmd);
501 }
502 set_pmd(pmd, __pmd(0));
503 }
504 __flush_tlb_all();
505}
506
Matt Tolentino44df75e2006-01-17 07:03:41 +0100507/*
508 * Memory hotplug specific functions
Matt Tolentino44df75e2006-01-17 07:03:41 +0100509 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100510void online_page(struct page *page)
511{
512 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800513 init_page_count(page);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100514 __free_page(page);
515 totalram_pages++;
516 num_physpages++;
517}
518
Yasunori Gotobc02af92006-06-27 02:53:30 -0700519#ifdef CONFIG_MEMORY_HOTPLUG
520/*
521 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
522 * via probe interface of sysfs. If acpi notifies hot-add event, then it
523 * can tell node id by searching dsdt. But, probe interface doesn't have
524 * node id. So, return 0 as node id at this time.
525 */
526#ifdef CONFIG_NUMA
527int memory_add_physaddr_to_nid(u64 start)
528{
529 return 0;
530}
531#endif
532
533/*
534 * Memory is added always to NORMAL zone. This means you will never get
535 * additional DMA/DMA32 memory.
536 */
537int arch_add_memory(int nid, u64 start, u64 size)
538{
539 struct pglist_data *pgdat = NODE_DATA(nid);
540 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
541 unsigned long start_pfn = start >> PAGE_SHIFT;
542 unsigned long nr_pages = size >> PAGE_SHIFT;
543 int ret;
544
545 ret = __add_pages(zone, start_pfn, nr_pages);
546 if (ret)
547 goto error;
548
549 init_memory_mapping(start, (start + size -1));
550
551 return ret;
552error:
553 printk("%s: Problem encountered in __add_pages!\n", __func__);
554 return ret;
555}
556EXPORT_SYMBOL_GPL(arch_add_memory);
557
558int remove_memory(u64 start, u64 size)
559{
560 return -EINVAL;
561}
562EXPORT_SYMBOL_GPL(remove_memory);
563
564#else /* CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200565/*
566 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
567 * just online the pages.
568 */
569int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
570{
571 int err = -EIO;
572 unsigned long pfn;
573 unsigned long total = 0, mem = 0;
574 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200575 if (pfn_valid(pfn)) {
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200576 online_page(pfn_to_page(pfn));
577 err = 0;
578 mem++;
579 }
580 total++;
581 }
582 if (!err) {
583 z->spanned_pages += total;
584 z->present_pages += mem;
585 z->zone_pgdat->node_spanned_pages += total;
586 z->zone_pgdat->node_present_pages += mem;
587 }
588 return err;
589}
Yasunori Gotobc02af92006-06-27 02:53:30 -0700590#endif /* CONFIG_MEMORY_HOTPLUG */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
593 kcore_vsyscall;
594
595void __init mem_init(void)
596{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200597 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Jon Mason0dc243a2006-06-26 13:58:11 +0200599 pci_iommu_alloc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601 /* How many end-of-memory variables you have, grandma! */
602 max_low_pfn = end_pfn;
603 max_pfn = end_pfn;
604 num_physpages = end_pfn;
605 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
606
607 /* clear the zero-page */
608 memset(empty_zero_page, 0, PAGE_SIZE);
609
610 reservedpages = 0;
611
612 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700613#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200614 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200616 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617#endif
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200618 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620 after_bootmem = 1;
621
622 codesize = (unsigned long) &_etext - (unsigned long) &_text;
623 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
624 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
625
626 /* Register memory areas for /proc/kcore */
627 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
628 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
629 VMALLOC_END-VMALLOC_START);
630 kclist_add(&kcore_kernel, &_stext, _end - _stext);
631 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
632 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
633 VSYSCALL_END - VSYSCALL_START);
634
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200635 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
637 end_pfn << (PAGE_SHIFT-10),
638 codesize >> 10,
639 reservedpages << (PAGE_SHIFT-10),
640 datasize >> 10,
641 initsize >> 10);
642
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100643#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 /*
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100645 * Sync boot_level4_pgt mappings with the init_level4_pgt
646 * except for the low identity mappings which are already zapped
647 * in init_level4_pgt. This sync-up is essential for AP's bringup
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 */
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100649 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650#endif
651}
652
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200653void free_init_pages(char *what, unsigned long begin, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
655 unsigned long addr;
656
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200657 if (begin >= end)
658 return;
659
660 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
661 for (addr = begin; addr < end; addr += PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800663 init_page_count(virt_to_page(addr));
Randy Dunlapc9cf5522006-06-27 02:53:52 -0700664 memset((void *)(addr & ~(PAGE_SIZE-1)),
665 POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 free_page(addr);
667 totalram_pages++;
668 }
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200669}
670
671void free_initmem(void)
672{
Randy Dunlapc9cf5522006-06-27 02:53:52 -0700673 memset(__initdata_begin, POISON_FREE_INITDATA,
674 __initdata_end - __initdata_begin);
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200675 free_init_pages("unused kernel memory",
676 (unsigned long)(&__init_begin),
677 (unsigned long)(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
Arjan van de Ven67df1972006-01-06 00:12:04 -0800680#ifdef CONFIG_DEBUG_RODATA
681
682extern char __start_rodata, __end_rodata;
683void mark_rodata_ro(void)
684{
685 unsigned long addr = (unsigned long)&__start_rodata;
686
687 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
688 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
689
690 printk ("Write protecting the kernel read-only data: %luk\n",
691 (&__end_rodata - &__start_rodata) >> 10);
692
693 /*
694 * change_page_attr_addr() requires a global_flush_tlb() call after it.
695 * We do this after the printk so that if something went wrong in the
696 * change, the printk gets out at least to give a better debug hint
697 * of who is the culprit.
698 */
699 global_flush_tlb();
700}
701#endif
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703#ifdef CONFIG_BLK_DEV_INITRD
704void free_initrd_mem(unsigned long start, unsigned long end)
705{
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200706 free_init_pages("initrd memory", start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708#endif
709
710void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
711{
712 /* Should check here against the e820 map to avoid double free */
Matt Tolentino2b976902005-06-23 00:08:06 -0700713#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 int nid = phys_to_nid(phys);
715 reserve_bootmem_node(NODE_DATA(nid), phys, len);
716#else
717 reserve_bootmem(phys, len);
718#endif
Andi Kleene18c6872005-11-05 17:25:53 +0100719 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
720 dma_reserve += len / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
723int kern_addr_valid(unsigned long addr)
724{
725 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
726 pgd_t *pgd;
727 pud_t *pud;
728 pmd_t *pmd;
729 pte_t *pte;
730
731 if (above != 0 && above != -1UL)
732 return 0;
733
734 pgd = pgd_offset_k(addr);
735 if (pgd_none(*pgd))
736 return 0;
737
738 pud = pud_offset(pgd, addr);
739 if (pud_none(*pud))
740 return 0;
741
742 pmd = pmd_offset(pud, addr);
743 if (pmd_none(*pmd))
744 return 0;
745 if (pmd_large(*pmd))
746 return pfn_valid(pmd_pfn(*pmd));
747
748 pte = pte_offset_kernel(pmd, addr);
749 if (pte_none(*pte))
750 return 0;
751 return pfn_valid(pte_pfn(*pte));
752}
753
754#ifdef CONFIG_SYSCTL
755#include <linux/sysctl.h>
756
757extern int exception_trace, page_fault_trace;
758
759static ctl_table debug_table2[] = {
760 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
761 proc_dointvec },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 { 0, }
763};
764
765static ctl_table debug_root_table2[] = {
766 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
767 .child = debug_table2 },
768 { 0 },
769};
770
771static __init int x8664_sysctl_init(void)
772{
773 register_sysctl_table(debug_root_table2, 1);
774 return 0;
775}
776__initcall(x8664_sysctl_init);
777#endif
778
Andi Kleen1e014412005-04-16 15:24:55 -0700779/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
780 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
781 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783static struct vm_area_struct gate_vma = {
784 .vm_start = VSYSCALL_START,
785 .vm_end = VSYSCALL_END,
786 .vm_page_prot = PAGE_READONLY
787};
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
790{
791#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700792 if (test_tsk_thread_flag(tsk, TIF_IA32))
793 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794#endif
795 return &gate_vma;
796}
797
798int in_gate_area(struct task_struct *task, unsigned long addr)
799{
800 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700801 if (!vma)
802 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 return (addr >= vma->vm_start) && (addr < vma->vm_end);
804}
805
806/* Use this when you have no reliable task/vma, typically from interrupt
807 * context. It is less reliable than using the task's vma and may give
808 * false positives.
809 */
810int in_gate_area_no_task(unsigned long addr)
811{
Andi Kleen1e014412005-04-16 15:24:55 -0700812 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813}