blob: 984155b75e4c03f8ee14e32f57a9b03d226a610c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010024#include <linux/pci.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070025#include <linux/poison.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010026#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010027#include <linux/module.h>
28#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010043#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010049struct dma_mapping_ops* dma_ops;
50EXPORT_SYMBOL(dma_ops);
51
Andi Kleene18c6872005-11-05 17:25:53 +010052static unsigned long dma_reserve __initdata;
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
Andi Kleene92343c2005-09-12 18:49:24 +020064 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 pg_data_t *pgdat;
67 struct page *page;
68
Andi Kleene92343c2005-09-12 18:49:24 +020069 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020071 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080073 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020077 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
84 }
Andi Kleene92343c2005-09-12 18:49:24 +020085 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091int after_bootmem;
92
Andi Kleen5f44a662006-03-25 16:30:25 +010093static __init void *spp_getpage(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 void *ptr;
96 if (after_bootmem)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
98 else
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
102
103 Dprintk("spp_getpage %p\n", ptr);
104 return ptr;
105}
106
Andi Kleen5f44a662006-03-25 16:30:25 +0100107static __init void set_pte_phys(unsigned long vaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 unsigned long phys, pgprot_t prot)
109{
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 pte_t *pte, new_pte;
114
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
116
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
120 return;
121 }
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
128 return;
129 }
130 }
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
137 return;
138 }
139 }
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
141
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
145 pte_ERROR(*pte);
146 set_pte(pte, new_pte);
147
148 /*
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
151 */
152 __flush_tlb_one(vaddr);
153}
154
155/* NOTE: this is meant to be run only at boot */
Andi Kleen5f44a662006-03-25 16:30:25 +0100156void __init
157__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 unsigned long address = __fix_to_virt(idx);
160
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
164 }
165 set_pte_phys(address, phys, prot);
166}
167
168unsigned long __initdata table_start, table_end;
169
170extern pmd_t temp_boot_pmds[];
171
172static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176} temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
179 {}
180};
181
Matt Tolentino44df75e2006-01-17 07:03:41 +0100182static __meminit void *alloc_low_page(int *index, unsigned long *phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
188
Matt Tolentino44df75e2006-01-17 07:03:41 +0100189 if (after_bootmem) {
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
191 *phys = __pa(adr);
192 return adr;
193 }
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 if (pfn >= end_pfn)
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
200 }
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
204 ti->allocated = 1;
205 __flush_tlb();
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100207 memset(adr, 0, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 *index = i;
209 *phys = pfn * PAGE_SIZE;
210 return adr;
211}
212
Matt Tolentino44df75e2006-01-17 07:03:41 +0100213static __meminit void unmap_low_page(int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100215 struct temp_map *ti;
216
217 if (after_bootmem)
218 return;
219
220 ti = &temp_mappings[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 set_pmd(ti->pmd, __pmd(0));
222 ti->allocated = 0;
223}
224
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100225/* Must run before zap_low_mappings */
226__init void *early_ioremap(unsigned long addr, unsigned long size)
227{
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
229
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
232 printk("SMBIOS area too long %lu\n", size);
233 return NULL;
234 }
235 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
236 map += LARGE_PAGE_SIZE;
237 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
238 __flush_tlb();
239 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
240}
241
242/* To avoid virtual aliases later */
243__init void early_iounmap(void *addr, unsigned long size)
244{
245 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
246 printk("early_iounmap: bad address %p\n", addr);
247 set_pmd(temp_mappings[0].pmd, __pmd(0));
248 set_pmd(temp_mappings[1].pmd, __pmd(0));
249 __flush_tlb();
250}
251
Matt Tolentino44df75e2006-01-17 07:03:41 +0100252static void __meminit
Keith Mannthey6ad91652006-09-26 10:52:36 +0200253phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100254{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200255 int i = pmd_index(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Keith Mannthey6ad91652006-09-26 10:52:36 +0200257 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
Matt Tolentino44df75e2006-01-17 07:03:41 +0100258 unsigned long entry;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200259 pmd_t *pmd = pmd_page + pmd_index(address);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100260
Jan Beulich5f51e132006-06-26 13:59:02 +0200261 if (address >= end) {
262 if (!after_bootmem)
263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100265 break;
266 }
Keith Mannthey6ad91652006-09-26 10:52:36 +0200267
268 if (pmd_val(*pmd))
269 continue;
270
Matt Tolentino44df75e2006-01-17 07:03:41 +0100271 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
272 entry &= __supported_pte_mask;
273 set_pmd(pmd, __pmd(entry));
274 }
275}
276
277static void __meminit
278phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
279{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200280 pmd_t *pmd = pmd_offset(pud,0);
281 spin_lock(&init_mm.page_table_lock);
282 phys_pmd_init(pmd, address, end);
283 spin_unlock(&init_mm.page_table_lock);
284 __flush_tlb_all();
Matt Tolentino44df75e2006-01-17 07:03:41 +0100285}
286
Keith Mannthey6ad91652006-09-26 10:52:36 +0200287static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100288{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200289 int i = pud_index(addr);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100290
Matt Tolentino44df75e2006-01-17 07:03:41 +0100291
Keith Mannthey6ad91652006-09-26 10:52:36 +0200292 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 int map;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200294 unsigned long pmd_phys;
295 pud_t *pud = pud_page + pud_index(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 pmd_t *pmd;
297
Keith Mannthey6ad91652006-09-26 10:52:36 +0200298 if (addr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Keith Mannthey6ad91652006-09-26 10:52:36 +0200301 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 set_pud(pud, __pud(0));
303 continue;
304 }
305
Keith Mannthey6ad91652006-09-26 10:52:36 +0200306 if (pud_val(*pud)) {
307 phys_pmd_update(pud, addr, end);
308 continue;
309 }
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 pmd = alloc_low_page(&map, &pmd_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100312 spin_lock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
Keith Mannthey6ad91652006-09-26 10:52:36 +0200314 phys_pmd_init(pmd, addr, end);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100315 spin_unlock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 unmap_low_page(map);
317 }
318 __flush_tlb();
319}
320
321static void __init find_early_table_space(unsigned long end)
322{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100323 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
326 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
327 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
328 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
329
Andi Kleenee408c72006-01-16 01:56:51 +0100330 /* RED-PEN putting page tables only on node 0 could
331 cause a hotspot and fill up ZONE_DMA. The page tables
332 need roughly 0.5KB per GB. */
333 start = 0x8000;
334 table_start = find_e820_area(start, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (table_start == -1UL)
336 panic("Cannot find space for the kernel page tables");
337
338 table_start >>= PAGE_SHIFT;
339 table_end = table_start;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100340
341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
Jan Beulich5f51e132006-06-26 13:59:02 +0200342 end, table_start << PAGE_SHIFT,
343 (table_start << PAGE_SHIFT) + tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
346/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
347 This runs before bootmem is initialized and gets pages directly from the
348 physical memory. To access them they are temporarily mapped. */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100349void __meminit init_memory_mapping(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 unsigned long next;
352
353 Dprintk("init_memory_mapping\n");
354
355 /*
356 * Find space for the kernel direct mapping tables.
357 * Later we should allocate these tables in the local node of the memory
358 * mapped. Unfortunately this is done currently before the nodes are
359 * discovered.
360 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100361 if (!after_bootmem)
362 find_early_table_space(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 start = (unsigned long)__va(start);
365 end = (unsigned long)__va(end);
366
367 for (; start < end; start = next) {
368 int map;
369 unsigned long pud_phys;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100370 pgd_t *pgd = pgd_offset_k(start);
371 pud_t *pud;
372
373 if (after_bootmem)
Andi Kleend2ae5b52006-06-26 13:57:56 +0200374 pud = pud_offset(pgd, start & PGDIR_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100375 else
376 pud = alloc_low_page(&map, &pud_phys);
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 next = start + PGDIR_SIZE;
379 if (next > end)
380 next = end;
381 phys_pud_init(pud, __pa(start), __pa(next));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100382 if (!after_bootmem)
383 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 unmap_low_page(map);
385 }
386
Matt Tolentino44df75e2006-01-17 07:03:41 +0100387 if (!after_bootmem)
388 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
391
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100392void __cpuinit zap_low_mappings(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100394 if (cpu == 0) {
395 pgd_t *pgd = pgd_offset_k(0UL);
396 pgd_clear(pgd);
397 } else {
398 /*
399 * For AP's, zap the low identity mappings by changing the cr3
400 * to init_level4_pgt and doing local flush tlb all
401 */
402 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
403 }
404 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Andi Kleena2f1b422005-11-05 17:25:53 +0100407/* Compute zone sizes for the DMA and DMA32 zones in a node. */
408__init void
409size_zones(unsigned long *z, unsigned long *h,
410 unsigned long start_pfn, unsigned long end_pfn)
411{
412 int i;
413 unsigned long w;
414
415 for (i = 0; i < MAX_NR_ZONES; i++)
416 z[i] = 0;
417
418 if (start_pfn < MAX_DMA_PFN)
419 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
420 if (start_pfn < MAX_DMA32_PFN) {
421 unsigned long dma32_pfn = MAX_DMA32_PFN;
422 if (dma32_pfn > end_pfn)
423 dma32_pfn = end_pfn;
424 z[ZONE_DMA32] = dma32_pfn - start_pfn;
425 }
426 z[ZONE_NORMAL] = end_pfn - start_pfn;
427
428 /* Remove lower zones from higher ones. */
429 w = 0;
430 for (i = 0; i < MAX_NR_ZONES; i++) {
431 if (z[i])
432 z[i] -= w;
433 w += z[i];
434 }
435
436 /* Compute holes */
Ravikiran G Thirumalai576fc092005-12-29 13:06:11 +0100437 w = start_pfn;
Andi Kleena2f1b422005-11-05 17:25:53 +0100438 for (i = 0; i < MAX_NR_ZONES; i++) {
439 unsigned long s = w;
440 w += z[i];
441 h[i] = e820_hole_size(s, w);
442 }
Andi Kleene18c6872005-11-05 17:25:53 +0100443
444 /* Add the space pace needed for mem_map to the holes too. */
445 for (i = 0; i < MAX_NR_ZONES; i++)
446 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
447
448 /* The 16MB DMA zone has the kernel and other misc mappings.
449 Account them too */
450 if (h[ZONE_DMA]) {
451 h[ZONE_DMA] += dma_reserve;
452 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
453 printk(KERN_WARNING
454 "Kernel too large and filling up ZONE_DMA?\n");
455 h[ZONE_DMA] = z[ZONE_DMA];
456 }
457 }
Andi Kleena2f1b422005-11-05 17:25:53 +0100458}
459
Matt Tolentino2b976902005-06-23 00:08:06 -0700460#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461void __init paging_init(void)
462{
Andi Kleena2f1b422005-11-05 17:25:53 +0100463 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
Matt Tolentino44df75e2006-01-17 07:03:41 +0100464
465 memory_present(0, 0, end_pfn);
466 sparse_init();
Andi Kleena2f1b422005-11-05 17:25:53 +0100467 size_zones(zones, holes, 0, end_pfn);
468 free_area_init_node(0, NODE_DATA(0), zones,
469 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470}
471#endif
472
473/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
474 from the CPU leading to inconsistent cache lines. address and size
475 must be aligned to 2MB boundaries.
476 Does nothing when the mapping doesn't exist. */
477void __init clear_kernel_mapping(unsigned long address, unsigned long size)
478{
479 unsigned long end = address + size;
480
481 BUG_ON(address & ~LARGE_PAGE_MASK);
482 BUG_ON(size & ~LARGE_PAGE_MASK);
483
484 for (; address < end; address += LARGE_PAGE_SIZE) {
485 pgd_t *pgd = pgd_offset_k(address);
486 pud_t *pud;
487 pmd_t *pmd;
488 if (pgd_none(*pgd))
489 continue;
490 pud = pud_offset(pgd, address);
491 if (pud_none(*pud))
492 continue;
493 pmd = pmd_offset(pud, address);
494 if (!pmd || pmd_none(*pmd))
495 continue;
496 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
497 /* Could handle this, but it should not happen currently. */
498 printk(KERN_ERR
499 "clear_kernel_mapping: mapping has been split. will leak memory\n");
500 pmd_ERROR(*pmd);
501 }
502 set_pmd(pmd, __pmd(0));
503 }
504 __flush_tlb_all();
505}
506
Matt Tolentino44df75e2006-01-17 07:03:41 +0100507/*
508 * Memory hotplug specific functions
Matt Tolentino44df75e2006-01-17 07:03:41 +0100509 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100510void online_page(struct page *page)
511{
512 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800513 init_page_count(page);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100514 __free_page(page);
515 totalram_pages++;
516 num_physpages++;
517}
518
Yasunori Gotobc02af92006-06-27 02:53:30 -0700519#ifdef CONFIG_MEMORY_HOTPLUG
520/*
521 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
522 * via probe interface of sysfs. If acpi notifies hot-add event, then it
523 * can tell node id by searching dsdt. But, probe interface doesn't have
524 * node id. So, return 0 as node id at this time.
525 */
526#ifdef CONFIG_NUMA
527int memory_add_physaddr_to_nid(u64 start)
528{
529 return 0;
530}
531#endif
532
533/*
534 * Memory is added always to NORMAL zone. This means you will never get
535 * additional DMA/DMA32 memory.
536 */
537int arch_add_memory(int nid, u64 start, u64 size)
538{
539 struct pglist_data *pgdat = NODE_DATA(nid);
540 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
541 unsigned long start_pfn = start >> PAGE_SHIFT;
542 unsigned long nr_pages = size >> PAGE_SHIFT;
543 int ret;
544
545 ret = __add_pages(zone, start_pfn, nr_pages);
546 if (ret)
547 goto error;
548
549 init_memory_mapping(start, (start + size -1));
550
551 return ret;
552error:
553 printk("%s: Problem encountered in __add_pages!\n", __func__);
554 return ret;
555}
556EXPORT_SYMBOL_GPL(arch_add_memory);
557
558int remove_memory(u64 start, u64 size)
559{
560 return -EINVAL;
561}
562EXPORT_SYMBOL_GPL(remove_memory);
563
564#else /* CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200565/*
566 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
567 * just online the pages.
568 */
569int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
570{
571 int err = -EIO;
572 unsigned long pfn;
573 unsigned long total = 0, mem = 0;
574 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200575 if (pfn_valid(pfn)) {
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200576 online_page(pfn_to_page(pfn));
577 err = 0;
578 mem++;
579 }
580 total++;
581 }
582 if (!err) {
583 z->spanned_pages += total;
584 z->present_pages += mem;
585 z->zone_pgdat->node_spanned_pages += total;
586 z->zone_pgdat->node_present_pages += mem;
587 }
588 return err;
589}
Yasunori Gotobc02af92006-06-27 02:53:30 -0700590#endif /* CONFIG_MEMORY_HOTPLUG */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
593 kcore_vsyscall;
594
595void __init mem_init(void)
596{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200597 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Jon Mason0dc243a2006-06-26 13:58:11 +0200599 pci_iommu_alloc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 /* clear the zero-page */
602 memset(empty_zero_page, 0, PAGE_SIZE);
603
604 reservedpages = 0;
605
606 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700607#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200608 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200610 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611#endif
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200612 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 after_bootmem = 1;
615
616 codesize = (unsigned long) &_etext - (unsigned long) &_text;
617 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
618 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
619
620 /* Register memory areas for /proc/kcore */
621 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
622 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
623 VMALLOC_END-VMALLOC_START);
624 kclist_add(&kcore_kernel, &_stext, _end - _stext);
625 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
626 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
627 VSYSCALL_END - VSYSCALL_START);
628
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200629 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
631 end_pfn << (PAGE_SHIFT-10),
632 codesize >> 10,
633 reservedpages << (PAGE_SHIFT-10),
634 datasize >> 10,
635 initsize >> 10);
636
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100637#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 /*
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100639 * Sync boot_level4_pgt mappings with the init_level4_pgt
640 * except for the low identity mappings which are already zapped
641 * in init_level4_pgt. This sync-up is essential for AP's bringup
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 */
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100643 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644#endif
645}
646
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200647void free_init_pages(char *what, unsigned long begin, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
649 unsigned long addr;
650
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200651 if (begin >= end)
652 return;
653
654 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
655 for (addr = begin; addr < end; addr += PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800657 init_page_count(virt_to_page(addr));
Randy Dunlapc9cf5522006-06-27 02:53:52 -0700658 memset((void *)(addr & ~(PAGE_SIZE-1)),
659 POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 free_page(addr);
661 totalram_pages++;
662 }
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200663}
664
665void free_initmem(void)
666{
Randy Dunlapc9cf5522006-06-27 02:53:52 -0700667 memset(__initdata_begin, POISON_FREE_INITDATA,
668 __initdata_end - __initdata_begin);
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200669 free_init_pages("unused kernel memory",
670 (unsigned long)(&__init_begin),
671 (unsigned long)(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672}
673
Arjan van de Ven67df1972006-01-06 00:12:04 -0800674#ifdef CONFIG_DEBUG_RODATA
675
Arjan van de Ven67df1972006-01-06 00:12:04 -0800676void mark_rodata_ro(void)
677{
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700678 unsigned long addr = (unsigned long)__start_rodata;
Arjan van de Ven67df1972006-01-06 00:12:04 -0800679
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700680 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
Arjan van de Ven67df1972006-01-06 00:12:04 -0800681 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
682
683 printk ("Write protecting the kernel read-only data: %luk\n",
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700684 (__end_rodata - __start_rodata) >> 10);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800685
686 /*
687 * change_page_attr_addr() requires a global_flush_tlb() call after it.
688 * We do this after the printk so that if something went wrong in the
689 * change, the printk gets out at least to give a better debug hint
690 * of who is the culprit.
691 */
692 global_flush_tlb();
693}
694#endif
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696#ifdef CONFIG_BLK_DEV_INITRD
697void free_initrd_mem(unsigned long start, unsigned long end)
698{
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200699 free_init_pages("initrd memory", start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701#endif
702
703void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
704{
705 /* Should check here against the e820 map to avoid double free */
Matt Tolentino2b976902005-06-23 00:08:06 -0700706#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 int nid = phys_to_nid(phys);
708 reserve_bootmem_node(NODE_DATA(nid), phys, len);
709#else
710 reserve_bootmem(phys, len);
711#endif
Andi Kleene18c6872005-11-05 17:25:53 +0100712 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
713 dma_reserve += len / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
716int kern_addr_valid(unsigned long addr)
717{
718 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
719 pgd_t *pgd;
720 pud_t *pud;
721 pmd_t *pmd;
722 pte_t *pte;
723
724 if (above != 0 && above != -1UL)
725 return 0;
726
727 pgd = pgd_offset_k(addr);
728 if (pgd_none(*pgd))
729 return 0;
730
731 pud = pud_offset(pgd, addr);
732 if (pud_none(*pud))
733 return 0;
734
735 pmd = pmd_offset(pud, addr);
736 if (pmd_none(*pmd))
737 return 0;
738 if (pmd_large(*pmd))
739 return pfn_valid(pmd_pfn(*pmd));
740
741 pte = pte_offset_kernel(pmd, addr);
742 if (pte_none(*pte))
743 return 0;
744 return pfn_valid(pte_pfn(*pte));
745}
746
747#ifdef CONFIG_SYSCTL
748#include <linux/sysctl.h>
749
750extern int exception_trace, page_fault_trace;
751
752static ctl_table debug_table2[] = {
753 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
754 proc_dointvec },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 { 0, }
756};
757
758static ctl_table debug_root_table2[] = {
759 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
760 .child = debug_table2 },
761 { 0 },
762};
763
764static __init int x8664_sysctl_init(void)
765{
766 register_sysctl_table(debug_root_table2, 1);
767 return 0;
768}
769__initcall(x8664_sysctl_init);
770#endif
771
Andi Kleen1e014412005-04-16 15:24:55 -0700772/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
773 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
774 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776static struct vm_area_struct gate_vma = {
777 .vm_start = VSYSCALL_START,
778 .vm_end = VSYSCALL_END,
779 .vm_page_prot = PAGE_READONLY
780};
781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
783{
784#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700785 if (test_tsk_thread_flag(tsk, TIF_IA32))
786 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787#endif
788 return &gate_vma;
789}
790
791int in_gate_area(struct task_struct *task, unsigned long addr)
792{
793 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700794 if (!vma)
795 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 return (addr >= vma->vm_start) && (addr < vma->vm_end);
797}
798
799/* Use this when you have no reliable task/vma, typically from interrupt
800 * context. It is less reliable than using the task's vma and may give
801 * false positives.
802 */
803int in_gate_area_no_task(unsigned long addr)
804{
Andi Kleen1e014412005-04-16 15:24:55 -0700805 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806}