blob: f1f977aafae1085cd3cd3aaa81fc971620bfc05e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010024#include <linux/pci.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070025#include <linux/poison.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010026#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010027#include <linux/module.h>
28#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010043#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010049struct dma_mapping_ops* dma_ops;
50EXPORT_SYMBOL(dma_ops);
51
Andi Kleene18c6872005-11-05 17:25:53 +010052static unsigned long dma_reserve __initdata;
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
Andi Kleene92343c2005-09-12 18:49:24 +020064 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 pg_data_t *pgdat;
67 struct page *page;
68
Andi Kleene92343c2005-09-12 18:49:24 +020069 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020071 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080073 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020077 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
84 }
Andi Kleene92343c2005-09-12 18:49:24 +020085 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091int after_bootmem;
92
Andi Kleen5f44a662006-03-25 16:30:25 +010093static __init void *spp_getpage(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 void *ptr;
96 if (after_bootmem)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
98 else
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
102
103 Dprintk("spp_getpage %p\n", ptr);
104 return ptr;
105}
106
Andi Kleen5f44a662006-03-25 16:30:25 +0100107static __init void set_pte_phys(unsigned long vaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 unsigned long phys, pgprot_t prot)
109{
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 pte_t *pte, new_pte;
114
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
116
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
120 return;
121 }
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
128 return;
129 }
130 }
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
137 return;
138 }
139 }
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
141
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
145 pte_ERROR(*pte);
146 set_pte(pte, new_pte);
147
148 /*
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
151 */
152 __flush_tlb_one(vaddr);
153}
154
155/* NOTE: this is meant to be run only at boot */
Andi Kleen5f44a662006-03-25 16:30:25 +0100156void __init
157__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 unsigned long address = __fix_to_virt(idx);
160
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
164 }
165 set_pte_phys(address, phys, prot);
166}
167
168unsigned long __initdata table_start, table_end;
169
170extern pmd_t temp_boot_pmds[];
171
172static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176} temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
179 {}
180};
181
Matt Tolentino44df75e2006-01-17 07:03:41 +0100182static __meminit void *alloc_low_page(int *index, unsigned long *phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
188
Matt Tolentino44df75e2006-01-17 07:03:41 +0100189 if (after_bootmem) {
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
191 *phys = __pa(adr);
192 return adr;
193 }
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 if (pfn >= end_pfn)
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
200 }
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
204 ti->allocated = 1;
205 __flush_tlb();
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100207 memset(adr, 0, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 *index = i;
209 *phys = pfn * PAGE_SIZE;
210 return adr;
211}
212
Matt Tolentino44df75e2006-01-17 07:03:41 +0100213static __meminit void unmap_low_page(int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100215 struct temp_map *ti;
216
217 if (after_bootmem)
218 return;
219
220 ti = &temp_mappings[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 set_pmd(ti->pmd, __pmd(0));
222 ti->allocated = 0;
223}
224
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100225/* Must run before zap_low_mappings */
226__init void *early_ioremap(unsigned long addr, unsigned long size)
227{
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
229
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100232 return NULL;
233 }
234 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
235 map += LARGE_PAGE_SIZE;
236 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
237 __flush_tlb();
238 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
239}
240
241/* To avoid virtual aliases later */
242__init void early_iounmap(void *addr, unsigned long size)
243{
244 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
245 printk("early_iounmap: bad address %p\n", addr);
246 set_pmd(temp_mappings[0].pmd, __pmd(0));
247 set_pmd(temp_mappings[1].pmd, __pmd(0));
248 __flush_tlb();
249}
250
Matt Tolentino44df75e2006-01-17 07:03:41 +0100251static void __meminit
Keith Mannthey6ad91652006-09-26 10:52:36 +0200252phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100253{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200254 int i = pmd_index(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Keith Mannthey6ad91652006-09-26 10:52:36 +0200256 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
Matt Tolentino44df75e2006-01-17 07:03:41 +0100257 unsigned long entry;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200258 pmd_t *pmd = pmd_page + pmd_index(address);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100259
Jan Beulich5f51e132006-06-26 13:59:02 +0200260 if (address >= end) {
261 if (!after_bootmem)
262 for (; i < PTRS_PER_PMD; i++, pmd++)
263 set_pmd(pmd, __pmd(0));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100264 break;
265 }
Keith Mannthey6ad91652006-09-26 10:52:36 +0200266
267 if (pmd_val(*pmd))
268 continue;
269
Matt Tolentino44df75e2006-01-17 07:03:41 +0100270 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
271 entry &= __supported_pte_mask;
272 set_pmd(pmd, __pmd(entry));
273 }
274}
275
276static void __meminit
277phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
278{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200279 pmd_t *pmd = pmd_offset(pud,0);
280 spin_lock(&init_mm.page_table_lock);
281 phys_pmd_init(pmd, address, end);
282 spin_unlock(&init_mm.page_table_lock);
283 __flush_tlb_all();
Matt Tolentino44df75e2006-01-17 07:03:41 +0100284}
285
Keith Mannthey6ad91652006-09-26 10:52:36 +0200286static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100287{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200288 int i = pud_index(addr);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100289
Matt Tolentino44df75e2006-01-17 07:03:41 +0100290
Keith Mannthey6ad91652006-09-26 10:52:36 +0200291 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 int map;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200293 unsigned long pmd_phys;
294 pud_t *pud = pud_page + pud_index(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 pmd_t *pmd;
296
Keith Mannthey6ad91652006-09-26 10:52:36 +0200297 if (addr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Keith Mannthey6ad91652006-09-26 10:52:36 +0200300 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 set_pud(pud, __pud(0));
302 continue;
303 }
304
Keith Mannthey6ad91652006-09-26 10:52:36 +0200305 if (pud_val(*pud)) {
306 phys_pmd_update(pud, addr, end);
307 continue;
308 }
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 pmd = alloc_low_page(&map, &pmd_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100311 spin_lock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
Keith Mannthey6ad91652006-09-26 10:52:36 +0200313 phys_pmd_init(pmd, addr, end);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100314 spin_unlock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 unmap_low_page(map);
316 }
317 __flush_tlb();
318}
319
320static void __init find_early_table_space(unsigned long end)
321{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100322 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
325 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
328
Andi Kleenee408c72006-01-16 01:56:51 +0100329 /* RED-PEN putting page tables only on node 0 could
330 cause a hotspot and fill up ZONE_DMA. The page tables
331 need roughly 0.5KB per GB. */
332 start = 0x8000;
333 table_start = find_e820_area(start, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables");
336
337 table_start >>= PAGE_SHIFT;
338 table_end = table_start;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100339
340 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
Jan Beulich5f51e132006-06-26 13:59:02 +0200341 end, table_start << PAGE_SHIFT,
342 (table_start << PAGE_SHIFT) + tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
345/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100348void __meminit init_memory_mapping(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
350 unsigned long next;
351
352 Dprintk("init_memory_mapping\n");
353
354 /*
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
358 * discovered.
359 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100360 if (!after_bootmem)
361 find_early_table_space(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
365
366 for (; start < end; start = next) {
367 int map;
368 unsigned long pud_phys;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100369 pgd_t *pgd = pgd_offset_k(start);
370 pud_t *pud;
371
372 if (after_bootmem)
Andi Kleend2ae5b52006-06-26 13:57:56 +0200373 pud = pud_offset(pgd, start & PGDIR_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100374 else
375 pud = alloc_low_page(&map, &pud_phys);
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 next = start + PGDIR_SIZE;
378 if (next > end)
379 next = end;
380 phys_pud_init(pud, __pa(start), __pa(next));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100381 if (!after_bootmem)
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 unmap_low_page(map);
384 }
385
Matt Tolentino44df75e2006-01-17 07:03:41 +0100386 if (!after_bootmem)
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389}
390
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100391void __cpuinit zap_low_mappings(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100393 if (cpu == 0) {
394 pgd_t *pgd = pgd_offset_k(0UL);
395 pgd_clear(pgd);
396 } else {
397 /*
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
400 */
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
402 }
403 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404}
405
Matt Tolentino2b976902005-06-23 00:08:06 -0700406#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407void __init paging_init(void)
408{
Mel Gorman6391af12006-10-11 01:20:39 -0700409 unsigned long max_zone_pfns[MAX_NR_ZONES];
410 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
411 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
412 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
413 max_zone_pfns[ZONE_NORMAL] = end_pfn;
414
Matt Tolentino44df75e2006-01-17 07:03:41 +0100415 memory_present(0, 0, end_pfn);
416 sparse_init();
Mel Gorman5cb248a2006-09-27 01:49:52 -0700417 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
419#endif
420
421/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
422 from the CPU leading to inconsistent cache lines. address and size
423 must be aligned to 2MB boundaries.
424 Does nothing when the mapping doesn't exist. */
425void __init clear_kernel_mapping(unsigned long address, unsigned long size)
426{
427 unsigned long end = address + size;
428
429 BUG_ON(address & ~LARGE_PAGE_MASK);
430 BUG_ON(size & ~LARGE_PAGE_MASK);
431
432 for (; address < end; address += LARGE_PAGE_SIZE) {
433 pgd_t *pgd = pgd_offset_k(address);
434 pud_t *pud;
435 pmd_t *pmd;
436 if (pgd_none(*pgd))
437 continue;
438 pud = pud_offset(pgd, address);
439 if (pud_none(*pud))
440 continue;
441 pmd = pmd_offset(pud, address);
442 if (!pmd || pmd_none(*pmd))
443 continue;
444 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
445 /* Could handle this, but it should not happen currently. */
446 printk(KERN_ERR
447 "clear_kernel_mapping: mapping has been split. will leak memory\n");
448 pmd_ERROR(*pmd);
449 }
450 set_pmd(pmd, __pmd(0));
451 }
452 __flush_tlb_all();
453}
454
Matt Tolentino44df75e2006-01-17 07:03:41 +0100455/*
456 * Memory hotplug specific functions
Matt Tolentino44df75e2006-01-17 07:03:41 +0100457 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100458void online_page(struct page *page)
459{
460 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800461 init_page_count(page);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100462 __free_page(page);
463 totalram_pages++;
464 num_physpages++;
465}
466
Yasunori Gotobc02af92006-06-27 02:53:30 -0700467#ifdef CONFIG_MEMORY_HOTPLUG
468/*
Yasunori Gotobc02af92006-06-27 02:53:30 -0700469 * Memory is added always to NORMAL zone. This means you will never get
470 * additional DMA/DMA32 memory.
471 */
472int arch_add_memory(int nid, u64 start, u64 size)
473{
474 struct pglist_data *pgdat = NODE_DATA(nid);
Christoph Lameter776ed982006-09-25 23:31:09 -0700475 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
Yasunori Gotobc02af92006-06-27 02:53:30 -0700476 unsigned long start_pfn = start >> PAGE_SHIFT;
477 unsigned long nr_pages = size >> PAGE_SHIFT;
478 int ret;
479
Keith Mannthey45e0b782006-09-30 23:27:09 -0700480 init_memory_mapping(start, (start + size -1));
481
Yasunori Gotobc02af92006-06-27 02:53:30 -0700482 ret = __add_pages(zone, start_pfn, nr_pages);
483 if (ret)
484 goto error;
485
Yasunori Gotobc02af92006-06-27 02:53:30 -0700486 return ret;
487error:
488 printk("%s: Problem encountered in __add_pages!\n", __func__);
489 return ret;
490}
491EXPORT_SYMBOL_GPL(arch_add_memory);
492
493int remove_memory(u64 start, u64 size)
494{
495 return -EINVAL;
496}
497EXPORT_SYMBOL_GPL(remove_memory);
498
Keith Mannthey4942e992006-09-30 23:27:06 -0700499#ifndef CONFIG_ACPI_NUMA
500int memory_add_physaddr_to_nid(u64 start)
501{
502 return 0;
503}
Keith Mannthey8c2676a2006-09-30 23:27:07 -0700504EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
Keith Mannthey4942e992006-09-30 23:27:06 -0700505#endif
506
Keith Mannthey45e0b782006-09-30 23:27:09 -0700507#ifndef CONFIG_ACPI_NUMA
508int memory_add_physaddr_to_nid(u64 start)
509{
510 return 0;
511}
512#endif
513
514#endif /* CONFIG_MEMORY_HOTPLUG */
515
516#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200517/*
518 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
519 * just online the pages.
520 */
521int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
522{
523 int err = -EIO;
524 unsigned long pfn;
525 unsigned long total = 0, mem = 0;
526 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200527 if (pfn_valid(pfn)) {
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200528 online_page(pfn_to_page(pfn));
529 err = 0;
530 mem++;
531 }
532 total++;
533 }
534 if (!err) {
535 z->spanned_pages += total;
536 z->present_pages += mem;
537 z->zone_pgdat->node_spanned_pages += total;
538 z->zone_pgdat->node_present_pages += mem;
539 }
540 return err;
541}
Keith Mannthey45e0b782006-09-30 23:27:09 -0700542#endif
Matt Tolentino44df75e2006-01-17 07:03:41 +0100543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
545 kcore_vsyscall;
546
547void __init mem_init(void)
548{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200549 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Jon Mason0dc243a2006-06-26 13:58:11 +0200551 pci_iommu_alloc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 /* clear the zero-page */
554 memset(empty_zero_page, 0, PAGE_SIZE);
555
556 reservedpages = 0;
557
558 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700559#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200560 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200562 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563#endif
Mel Gorman5cb248a2006-09-27 01:49:52 -0700564 reservedpages = end_pfn - totalram_pages -
565 absent_pages_in_range(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 after_bootmem = 1;
568
569 codesize = (unsigned long) &_etext - (unsigned long) &_text;
570 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
571 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
572
573 /* Register memory areas for /proc/kcore */
574 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
575 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
576 VMALLOC_END-VMALLOC_START);
577 kclist_add(&kcore_kernel, &_stext, _end - _stext);
578 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
579 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
580 VSYSCALL_END - VSYSCALL_START);
581
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200582 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
584 end_pfn << (PAGE_SHIFT-10),
585 codesize >> 10,
586 reservedpages << (PAGE_SHIFT-10),
587 datasize >> 10,
588 initsize >> 10);
589
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100590#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 /*
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100592 * Sync boot_level4_pgt mappings with the init_level4_pgt
593 * except for the low identity mappings which are already zapped
594 * in init_level4_pgt. This sync-up is essential for AP's bringup
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 */
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100596 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597#endif
598}
599
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200600void free_init_pages(char *what, unsigned long begin, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601{
602 unsigned long addr;
603
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200604 if (begin >= end)
605 return;
606
607 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
608 for (addr = begin; addr < end; addr += PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800610 init_page_count(virt_to_page(addr));
Randy Dunlapc9cf5522006-06-27 02:53:52 -0700611 memset((void *)(addr & ~(PAGE_SIZE-1)),
612 POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 free_page(addr);
614 totalram_pages++;
615 }
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200616}
617
618void free_initmem(void)
619{
Randy Dunlapc9cf5522006-06-27 02:53:52 -0700620 memset(__initdata_begin, POISON_FREE_INITDATA,
621 __initdata_end - __initdata_begin);
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200622 free_init_pages("unused kernel memory",
623 (unsigned long)(&__init_begin),
624 (unsigned long)(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625}
626
Arjan van de Ven67df1972006-01-06 00:12:04 -0800627#ifdef CONFIG_DEBUG_RODATA
628
Arjan van de Ven67df1972006-01-06 00:12:04 -0800629void mark_rodata_ro(void)
630{
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700631 unsigned long addr = (unsigned long)__start_rodata;
Arjan van de Ven67df1972006-01-06 00:12:04 -0800632
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700633 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
Arjan van de Ven67df1972006-01-06 00:12:04 -0800634 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
635
636 printk ("Write protecting the kernel read-only data: %luk\n",
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700637 (__end_rodata - __start_rodata) >> 10);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800638
639 /*
640 * change_page_attr_addr() requires a global_flush_tlb() call after it.
641 * We do this after the printk so that if something went wrong in the
642 * change, the printk gets out at least to give a better debug hint
643 * of who is the culprit.
644 */
645 global_flush_tlb();
646}
647#endif
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649#ifdef CONFIG_BLK_DEV_INITRD
650void free_initrd_mem(unsigned long start, unsigned long end)
651{
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200652 free_init_pages("initrd memory", start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
654#endif
655
656void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
657{
Matt Tolentino2b976902005-06-23 00:08:06 -0700658#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 int nid = phys_to_nid(phys);
Andi Kleen5e58a022006-11-14 16:57:46 +0100660#endif
661 unsigned long pfn = phys >> PAGE_SHIFT;
662 if (pfn >= end_pfn) {
663 /* This can happen with kdump kernels when accessing firmware
664 tables. */
665 if (pfn < end_pfn_map)
666 return;
667 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
668 phys, len);
669 return;
670 }
671
672 /* Should check here against the e820 map to avoid double free */
673#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 reserve_bootmem_node(NODE_DATA(nid), phys, len);
675#else
676 reserve_bootmem(phys, len);
677#endif
Mel Gorman0e0b8642006-09-27 01:49:56 -0700678 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
Andi Kleene18c6872005-11-05 17:25:53 +0100679 dma_reserve += len / PAGE_SIZE;
Mel Gorman0e0b8642006-09-27 01:49:56 -0700680 set_dma_reserve(dma_reserve);
681 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682}
683
684int kern_addr_valid(unsigned long addr)
685{
686 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
687 pgd_t *pgd;
688 pud_t *pud;
689 pmd_t *pmd;
690 pte_t *pte;
691
692 if (above != 0 && above != -1UL)
693 return 0;
694
695 pgd = pgd_offset_k(addr);
696 if (pgd_none(*pgd))
697 return 0;
698
699 pud = pud_offset(pgd, addr);
700 if (pud_none(*pud))
701 return 0;
702
703 pmd = pmd_offset(pud, addr);
704 if (pmd_none(*pmd))
705 return 0;
706 if (pmd_large(*pmd))
707 return pfn_valid(pmd_pfn(*pmd));
708
709 pte = pte_offset_kernel(pmd, addr);
710 if (pte_none(*pte))
711 return 0;
712 return pfn_valid(pte_pfn(*pte));
713}
714
715#ifdef CONFIG_SYSCTL
716#include <linux/sysctl.h>
717
718extern int exception_trace, page_fault_trace;
719
720static ctl_table debug_table2[] = {
721 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
722 proc_dointvec },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 { 0, }
724};
725
726static ctl_table debug_root_table2[] = {
727 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
728 .child = debug_table2 },
729 { 0 },
730};
731
732static __init int x8664_sysctl_init(void)
733{
734 register_sysctl_table(debug_root_table2, 1);
735 return 0;
736}
737__initcall(x8664_sysctl_init);
738#endif
739
Andi Kleen1e014412005-04-16 15:24:55 -0700740/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
741 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
742 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
744static struct vm_area_struct gate_vma = {
745 .vm_start = VSYSCALL_START,
746 .vm_end = VSYSCALL_END,
747 .vm_page_prot = PAGE_READONLY
748};
749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
751{
752#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700753 if (test_tsk_thread_flag(tsk, TIF_IA32))
754 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755#endif
756 return &gate_vma;
757}
758
759int in_gate_area(struct task_struct *task, unsigned long addr)
760{
761 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700762 if (!vma)
763 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return (addr >= vma->vm_start) && (addr < vma->vm_end);
765}
766
767/* Use this when you have no reliable task/vma, typically from interrupt
768 * context. It is less reliable than using the task's vma and may give
769 * false positives.
770 */
771int in_gate_area_no_task(unsigned long addr)
772{
Andi Kleen1e014412005-04-16 15:24:55 -0700773 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774}