blob: e3134bc9a4fc9ed302519159588b8f560cbb84c7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010024#include <linux/pci.h>
Jan Beulich6fb14752007-05-02 19:27:10 +020025#include <linux/pfn.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070026#include <linux/poison.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010027#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010028#include <linux/module.h>
29#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/processor.h>
32#include <asm/system.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#include <asm/dma.h>
37#include <asm/fixmap.h>
38#include <asm/e820.h>
39#include <asm/apic.h>
40#include <asm/tlb.h>
41#include <asm/mmu_context.h>
42#include <asm/proto.h>
43#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010044#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#ifndef Dprintk
47#define Dprintk(x...)
48#endif
49
Stephen Hemmingere6584502007-05-02 19:27:06 +020050const struct dma_mapping_ops* dma_ops;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010051EXPORT_SYMBOL(dma_ops);
52
Andi Kleene18c6872005-11-05 17:25:53 +010053static unsigned long dma_reserve __initdata;
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
56
57/*
58 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
59 * physical space so we can cache the place of the first one and move
60 * around without checking the pgd every time.
61 */
62
63void show_mem(void)
64{
Andi Kleene92343c2005-09-12 18:49:24 +020065 long i, total = 0, reserved = 0;
66 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 pg_data_t *pgdat;
68 struct page *page;
69
Andi Kleene92343c2005-09-12 18:49:24 +020070 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020072 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080074 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
76 page = pfn_to_page(pgdat->node_start_pfn + i);
77 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020078 if (PageReserved(page))
79 reserved++;
80 else if (PageSwapCache(page))
81 cached++;
82 else if (page_count(page))
83 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 }
85 }
Andi Kleene92343c2005-09-12 18:49:24 +020086 printk(KERN_INFO "%lu pages of RAM\n", total);
87 printk(KERN_INFO "%lu reserved pages\n",reserved);
88 printk(KERN_INFO "%lu pages shared\n",shared);
89 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092int after_bootmem;
93
Andi Kleen5f44a662006-03-25 16:30:25 +010094static __init void *spp_getpage(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 void *ptr;
97 if (after_bootmem)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 else
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103
104 Dprintk("spp_getpage %p\n", ptr);
105 return ptr;
106}
107
Andi Kleen5f44a662006-03-25 16:30:25 +0100108static __init void set_pte_phys(unsigned long vaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 unsigned long phys, pgprot_t prot)
110{
111 pgd_t *pgd;
112 pud_t *pud;
113 pmd_t *pmd;
114 pte_t *pte, new_pte;
115
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
121 return;
122 }
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
129 return;
130 }
131 }
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
138 return;
139 }
140 }
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 pte_ERROR(*pte);
147 set_pte(pte, new_pte);
148
149 /*
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
152 */
153 __flush_tlb_one(vaddr);
154}
155
156/* NOTE: this is meant to be run only at boot */
Andi Kleen5f44a662006-03-25 16:30:25 +0100157void __init
158__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
160 unsigned long address = __fix_to_virt(idx);
161
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
164 return;
165 }
166 set_pte_phys(address, phys, prot);
167}
168
169unsigned long __initdata table_start, table_end;
170
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200171static __meminit void *alloc_low_page(unsigned long *phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200173 unsigned long pfn = table_end++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 void *adr;
175
Matt Tolentino44df75e2006-01-17 07:03:41 +0100176 if (after_bootmem) {
177 adr = (void *)get_zeroed_page(GFP_ATOMIC);
178 *phys = __pa(adr);
179 return adr;
180 }
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (pfn >= end_pfn)
183 panic("alloc_low_page: ran out of memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200185 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
186 memset(adr, 0, PAGE_SIZE);
187 *phys = pfn * PAGE_SIZE;
188 return adr;
189}
190
191static __meminit void unmap_low_page(void *adr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100193
194 if (after_bootmem)
195 return;
196
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200197 early_iounmap(adr, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100200/* Must run before zap_low_mappings */
201__init void *early_ioremap(unsigned long addr, unsigned long size)
202{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200203 unsigned long vaddr;
204 pmd_t *pmd, *last_pmd;
205 int i, pmds;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100206
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200207 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
208 vaddr = __START_KERNEL_map;
209 pmd = level2_kernel_pgt;
210 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
211 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
212 for (i = 0; i < pmds; i++) {
213 if (pmd_present(pmd[i]))
214 goto next;
215 }
216 vaddr += addr & ~PMD_MASK;
217 addr &= PMD_MASK;
218 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
219 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
220 __flush_tlb();
221 return (void *)vaddr;
222 next:
223 ;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100224 }
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200225 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
226 return NULL;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100227}
228
229/* To avoid virtual aliases later */
230__init void early_iounmap(void *addr, unsigned long size)
231{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200232 unsigned long vaddr;
233 pmd_t *pmd;
234 int i, pmds;
235
236 vaddr = (unsigned long)addr;
237 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
238 pmd = level2_kernel_pgt + pmd_index(vaddr);
239 for (i = 0; i < pmds; i++)
240 pmd_clear(pmd + i);
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100241 __flush_tlb();
242}
243
Matt Tolentino44df75e2006-01-17 07:03:41 +0100244static void __meminit
Keith Mannthey6ad91652006-09-26 10:52:36 +0200245phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100246{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200247 int i = pmd_index(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
Keith Mannthey6ad91652006-09-26 10:52:36 +0200249 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
Matt Tolentino44df75e2006-01-17 07:03:41 +0100250 unsigned long entry;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200251 pmd_t *pmd = pmd_page + pmd_index(address);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100252
Jan Beulich5f51e132006-06-26 13:59:02 +0200253 if (address >= end) {
254 if (!after_bootmem)
255 for (; i < PTRS_PER_PMD; i++, pmd++)
256 set_pmd(pmd, __pmd(0));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100257 break;
258 }
Keith Mannthey6ad91652006-09-26 10:52:36 +0200259
260 if (pmd_val(*pmd))
261 continue;
262
Matt Tolentino44df75e2006-01-17 07:03:41 +0100263 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
264 entry &= __supported_pte_mask;
265 set_pmd(pmd, __pmd(entry));
266 }
267}
268
269static void __meminit
270phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
271{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200272 pmd_t *pmd = pmd_offset(pud,0);
273 spin_lock(&init_mm.page_table_lock);
274 phys_pmd_init(pmd, address, end);
275 spin_unlock(&init_mm.page_table_lock);
276 __flush_tlb_all();
Matt Tolentino44df75e2006-01-17 07:03:41 +0100277}
278
Keith Mannthey6ad91652006-09-26 10:52:36 +0200279static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100280{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200281 int i = pud_index(addr);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100282
Matt Tolentino44df75e2006-01-17 07:03:41 +0100283
Keith Mannthey6ad91652006-09-26 10:52:36 +0200284 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
Keith Mannthey6ad91652006-09-26 10:52:36 +0200285 unsigned long pmd_phys;
286 pud_t *pud = pud_page + pud_index(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 pmd_t *pmd;
288
Keith Mannthey6ad91652006-09-26 10:52:36 +0200289 if (addr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Keith Mannthey6ad91652006-09-26 10:52:36 +0200292 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 set_pud(pud, __pud(0));
294 continue;
295 }
296
Keith Mannthey6ad91652006-09-26 10:52:36 +0200297 if (pud_val(*pud)) {
298 phys_pmd_update(pud, addr, end);
299 continue;
300 }
301
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200302 pmd = alloc_low_page(&pmd_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100303 spin_lock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
Keith Mannthey6ad91652006-09-26 10:52:36 +0200305 phys_pmd_init(pmd, addr, end);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100306 spin_unlock(&init_mm.page_table_lock);
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200307 unmap_low_page(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
309 __flush_tlb();
310}
311
312static void __init find_early_table_space(unsigned long end)
313{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100314 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
317 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
318 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
319 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
320
Andi Kleenee408c72006-01-16 01:56:51 +0100321 /* RED-PEN putting page tables only on node 0 could
322 cause a hotspot and fill up ZONE_DMA. The page tables
323 need roughly 0.5KB per GB. */
324 start = 0x8000;
325 table_start = find_e820_area(start, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 if (table_start == -1UL)
327 panic("Cannot find space for the kernel page tables");
328
329 table_start >>= PAGE_SHIFT;
330 table_end = table_start;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100331
332 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
Jan Beulich5f51e132006-06-26 13:59:02 +0200333 end, table_start << PAGE_SHIFT,
334 (table_start << PAGE_SHIFT) + tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335}
336
337/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
338 This runs before bootmem is initialized and gets pages directly from the
339 physical memory. To access them they are temporarily mapped. */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100340void __meminit init_memory_mapping(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
342 unsigned long next;
343
344 Dprintk("init_memory_mapping\n");
345
346 /*
347 * Find space for the kernel direct mapping tables.
348 * Later we should allocate these tables in the local node of the memory
349 * mapped. Unfortunately this is done currently before the nodes are
350 * discovered.
351 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100352 if (!after_bootmem)
353 find_early_table_space(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 start = (unsigned long)__va(start);
356 end = (unsigned long)__va(end);
357
358 for (; start < end; start = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 unsigned long pud_phys;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100360 pgd_t *pgd = pgd_offset_k(start);
361 pud_t *pud;
362
363 if (after_bootmem)
Andi Kleend2ae5b52006-06-26 13:57:56 +0200364 pud = pud_offset(pgd, start & PGDIR_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100365 else
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200366 pud = alloc_low_page(&pud_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 next = start + PGDIR_SIZE;
369 if (next > end)
370 next = end;
371 phys_pud_init(pud, __pa(start), __pa(next));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100372 if (!after_bootmem)
373 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200374 unmap_low_page(pud);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
Matt Tolentino44df75e2006-01-17 07:03:41 +0100377 if (!after_bootmem)
378 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
Matt Tolentino2b976902005-06-23 00:08:06 -0700382#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383void __init paging_init(void)
384{
Mel Gorman6391af12006-10-11 01:20:39 -0700385 unsigned long max_zone_pfns[MAX_NR_ZONES];
386 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
387 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
388 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
389 max_zone_pfns[ZONE_NORMAL] = end_pfn;
390
Matt Tolentino44df75e2006-01-17 07:03:41 +0100391 memory_present(0, 0, end_pfn);
392 sparse_init();
Mel Gorman5cb248a2006-09-27 01:49:52 -0700393 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394}
395#endif
396
397/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
398 from the CPU leading to inconsistent cache lines. address and size
399 must be aligned to 2MB boundaries.
400 Does nothing when the mapping doesn't exist. */
401void __init clear_kernel_mapping(unsigned long address, unsigned long size)
402{
403 unsigned long end = address + size;
404
405 BUG_ON(address & ~LARGE_PAGE_MASK);
406 BUG_ON(size & ~LARGE_PAGE_MASK);
407
408 for (; address < end; address += LARGE_PAGE_SIZE) {
409 pgd_t *pgd = pgd_offset_k(address);
410 pud_t *pud;
411 pmd_t *pmd;
412 if (pgd_none(*pgd))
413 continue;
414 pud = pud_offset(pgd, address);
415 if (pud_none(*pud))
416 continue;
417 pmd = pmd_offset(pud, address);
418 if (!pmd || pmd_none(*pmd))
419 continue;
420 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
421 /* Could handle this, but it should not happen currently. */
422 printk(KERN_ERR
423 "clear_kernel_mapping: mapping has been split. will leak memory\n");
424 pmd_ERROR(*pmd);
425 }
426 set_pmd(pmd, __pmd(0));
427 }
428 __flush_tlb_all();
429}
430
Matt Tolentino44df75e2006-01-17 07:03:41 +0100431/*
432 * Memory hotplug specific functions
Matt Tolentino44df75e2006-01-17 07:03:41 +0100433 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100434void online_page(struct page *page)
435{
436 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800437 init_page_count(page);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100438 __free_page(page);
439 totalram_pages++;
440 num_physpages++;
441}
442
Yasunori Gotobc02af92006-06-27 02:53:30 -0700443#ifdef CONFIG_MEMORY_HOTPLUG
444/*
Yasunori Gotobc02af92006-06-27 02:53:30 -0700445 * Memory is added always to NORMAL zone. This means you will never get
446 * additional DMA/DMA32 memory.
447 */
448int arch_add_memory(int nid, u64 start, u64 size)
449{
450 struct pglist_data *pgdat = NODE_DATA(nid);
Christoph Lameter776ed982006-09-25 23:31:09 -0700451 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
Yasunori Gotobc02af92006-06-27 02:53:30 -0700452 unsigned long start_pfn = start >> PAGE_SHIFT;
453 unsigned long nr_pages = size >> PAGE_SHIFT;
454 int ret;
455
Keith Mannthey45e0b782006-09-30 23:27:09 -0700456 init_memory_mapping(start, (start + size -1));
457
Yasunori Gotobc02af92006-06-27 02:53:30 -0700458 ret = __add_pages(zone, start_pfn, nr_pages);
459 if (ret)
460 goto error;
461
Yasunori Gotobc02af92006-06-27 02:53:30 -0700462 return ret;
463error:
464 printk("%s: Problem encountered in __add_pages!\n", __func__);
465 return ret;
466}
467EXPORT_SYMBOL_GPL(arch_add_memory);
468
469int remove_memory(u64 start, u64 size)
470{
471 return -EINVAL;
472}
473EXPORT_SYMBOL_GPL(remove_memory);
474
Yasunori Goto82432292006-11-18 22:19:40 -0800475#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
Keith Mannthey4942e992006-09-30 23:27:06 -0700476int memory_add_physaddr_to_nid(u64 start)
477{
478 return 0;
479}
Keith Mannthey8c2676a2006-09-30 23:27:07 -0700480EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
Keith Mannthey4942e992006-09-30 23:27:06 -0700481#endif
482
Keith Mannthey45e0b782006-09-30 23:27:09 -0700483#endif /* CONFIG_MEMORY_HOTPLUG */
484
485#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200486/*
487 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
488 * just online the pages.
489 */
490int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
491{
492 int err = -EIO;
493 unsigned long pfn;
494 unsigned long total = 0, mem = 0;
495 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200496 if (pfn_valid(pfn)) {
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200497 online_page(pfn_to_page(pfn));
498 err = 0;
499 mem++;
500 }
501 total++;
502 }
503 if (!err) {
504 z->spanned_pages += total;
505 z->present_pages += mem;
506 z->zone_pgdat->node_spanned_pages += total;
507 z->zone_pgdat->node_present_pages += mem;
508 }
509 return err;
510}
Keith Mannthey45e0b782006-09-30 23:27:09 -0700511#endif
Matt Tolentino44df75e2006-01-17 07:03:41 +0100512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
514 kcore_vsyscall;
515
516void __init mem_init(void)
517{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200518 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Jon Mason0dc243a2006-06-26 13:58:11 +0200520 pci_iommu_alloc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 /* clear the zero-page */
523 memset(empty_zero_page, 0, PAGE_SIZE);
524
525 reservedpages = 0;
526
527 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700528#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200529 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200531 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532#endif
Mel Gorman5cb248a2006-09-27 01:49:52 -0700533 reservedpages = end_pfn - totalram_pages -
534 absent_pages_in_range(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536 after_bootmem = 1;
537
538 codesize = (unsigned long) &_etext - (unsigned long) &_text;
539 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
540 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
541
542 /* Register memory areas for /proc/kcore */
543 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
544 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
545 VMALLOC_END-VMALLOC_START);
546 kclist_add(&kcore_kernel, &_stext, _end - _stext);
547 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
548 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
549 VSYSCALL_END - VSYSCALL_START);
550
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200551 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
553 end_pfn << (PAGE_SHIFT-10),
554 codesize >> 10,
555 reservedpages << (PAGE_SHIFT-10),
556 datasize >> 10,
557 initsize >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558}
559
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200560void free_init_pages(char *what, unsigned long begin, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
562 unsigned long addr;
563
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200564 if (begin >= end)
565 return;
566
Jan Beulich6fb14752007-05-02 19:27:10 +0200567 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200568 for (addr = begin; addr < end; addr += PAGE_SIZE) {
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200569 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
570 ClearPageReserved(page);
571 init_page_count(page);
572 memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
Jan Beulich6fb14752007-05-02 19:27:10 +0200573 if (addr >= __START_KERNEL_map)
574 change_page_attr_addr(addr, 1, __pgprot(0));
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200575 __free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 totalram_pages++;
577 }
Jan Beulich6fb14752007-05-02 19:27:10 +0200578 if (addr > __START_KERNEL_map)
579 global_flush_tlb();
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200580}
581
582void free_initmem(void)
583{
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200584 free_init_pages("unused kernel memory",
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200585 __pa_symbol(&__init_begin),
586 __pa_symbol(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Arjan van de Ven67df1972006-01-06 00:12:04 -0800589#ifdef CONFIG_DEBUG_RODATA
590
Arjan van de Ven67df1972006-01-06 00:12:04 -0800591void mark_rodata_ro(void)
592{
Jan Beulich6fb14752007-05-02 19:27:10 +0200593 unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size;
Arjan van de Ven67df1972006-01-06 00:12:04 -0800594
Jan Beulich6fb14752007-05-02 19:27:10 +0200595#ifdef CONFIG_HOTPLUG_CPU
596 /* It must still be possible to apply SMP alternatives. */
597 if (num_possible_cpus() > 1)
598 start = PFN_ALIGN(__va(__pa_symbol(&_etext)));
599#endif
600 size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start;
601 change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800602
Jan Beulich6fb14752007-05-02 19:27:10 +0200603 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
604 size >> 10);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800605
606 /*
607 * change_page_attr_addr() requires a global_flush_tlb() call after it.
608 * We do this after the printk so that if something went wrong in the
609 * change, the printk gets out at least to give a better debug hint
610 * of who is the culprit.
611 */
612 global_flush_tlb();
613}
614#endif
615
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616#ifdef CONFIG_BLK_DEV_INITRD
617void free_initrd_mem(unsigned long start, unsigned long end)
618{
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200619 free_init_pages("initrd memory", __pa(start), __pa(end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621#endif
622
623void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
624{
Matt Tolentino2b976902005-06-23 00:08:06 -0700625#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 int nid = phys_to_nid(phys);
Andi Kleen5e58a022006-11-14 16:57:46 +0100627#endif
628 unsigned long pfn = phys >> PAGE_SHIFT;
629 if (pfn >= end_pfn) {
630 /* This can happen with kdump kernels when accessing firmware
631 tables. */
632 if (pfn < end_pfn_map)
633 return;
634 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
635 phys, len);
636 return;
637 }
638
639 /* Should check here against the e820 map to avoid double free */
640#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 reserve_bootmem_node(NODE_DATA(nid), phys, len);
642#else
643 reserve_bootmem(phys, len);
644#endif
Mel Gorman0e0b8642006-09-27 01:49:56 -0700645 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
Andi Kleene18c6872005-11-05 17:25:53 +0100646 dma_reserve += len / PAGE_SIZE;
Mel Gorman0e0b8642006-09-27 01:49:56 -0700647 set_dma_reserve(dma_reserve);
648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649}
650
651int kern_addr_valid(unsigned long addr)
652{
653 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
654 pgd_t *pgd;
655 pud_t *pud;
656 pmd_t *pmd;
657 pte_t *pte;
658
659 if (above != 0 && above != -1UL)
660 return 0;
661
662 pgd = pgd_offset_k(addr);
663 if (pgd_none(*pgd))
664 return 0;
665
666 pud = pud_offset(pgd, addr);
667 if (pud_none(*pud))
668 return 0;
669
670 pmd = pmd_offset(pud, addr);
671 if (pmd_none(*pmd))
672 return 0;
673 if (pmd_large(*pmd))
674 return pfn_valid(pmd_pfn(*pmd));
675
676 pte = pte_offset_kernel(pmd, addr);
677 if (pte_none(*pte))
678 return 0;
679 return pfn_valid(pte_pfn(*pte));
680}
681
682#ifdef CONFIG_SYSCTL
683#include <linux/sysctl.h>
684
685extern int exception_trace, page_fault_trace;
686
687static ctl_table debug_table2[] = {
Eric W. Biedermanc37ce032007-02-14 00:33:51 -0800688 {
689 .ctl_name = 99,
690 .procname = "exception-trace",
691 .data = &exception_trace,
692 .maxlen = sizeof(int),
693 .mode = 0644,
694 .proc_handler = proc_dointvec
695 },
696 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697};
698
699static ctl_table debug_root_table2[] = {
Eric W. Biedermanc37ce032007-02-14 00:33:51 -0800700 {
701 .ctl_name = CTL_DEBUG,
702 .procname = "debug",
703 .mode = 0555,
704 .child = debug_table2
705 },
706 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707};
708
709static __init int x8664_sysctl_init(void)
710{
Eric W. Biederman0b4d4142007-02-14 00:34:09 -0800711 register_sysctl_table(debug_root_table2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 return 0;
713}
714__initcall(x8664_sysctl_init);
715#endif
716
Ernie Petrides103efcd2006-12-07 02:14:09 +0100717/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
Andi Kleen1e014412005-04-16 15:24:55 -0700718 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
719 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721static struct vm_area_struct gate_vma = {
722 .vm_start = VSYSCALL_START,
Ernie Petrides103efcd2006-12-07 02:14:09 +0100723 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
724 .vm_page_prot = PAGE_READONLY_EXEC,
725 .vm_flags = VM_READ | VM_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726};
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
729{
730#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700731 if (test_tsk_thread_flag(tsk, TIF_IA32))
732 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733#endif
734 return &gate_vma;
735}
736
737int in_gate_area(struct task_struct *task, unsigned long addr)
738{
739 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700740 if (!vma)
741 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return (addr >= vma->vm_start) && (addr < vma->vm_end);
743}
744
745/* Use this when you have no reliable task/vma, typically from interrupt
746 * context. It is less reliable than using the task's vma and may give
747 * false positives.
748 */
749int in_gate_area_no_task(unsigned long addr)
750{
Andi Kleen1e014412005-04-16 15:24:55 -0700751 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}