blob: 69e22d3c92382ea5f55f1a3cc660e389edc2c107 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010024#include <linux/pci.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070025#include <linux/poison.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010026#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010027#include <linux/module.h>
28#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010043#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
Stephen Hemmingere6584502007-05-02 19:27:06 +020049const struct dma_mapping_ops* dma_ops;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010050EXPORT_SYMBOL(dma_ops);
51
Andi Kleene18c6872005-11-05 17:25:53 +010052static unsigned long dma_reserve __initdata;
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
Andi Kleene92343c2005-09-12 18:49:24 +020064 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 pg_data_t *pgdat;
67 struct page *page;
68
Andi Kleene92343c2005-09-12 18:49:24 +020069 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020071 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080073 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020077 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
84 }
Andi Kleene92343c2005-09-12 18:49:24 +020085 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091int after_bootmem;
92
Andi Kleen5f44a662006-03-25 16:30:25 +010093static __init void *spp_getpage(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 void *ptr;
96 if (after_bootmem)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
98 else
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
102
103 Dprintk("spp_getpage %p\n", ptr);
104 return ptr;
105}
106
Andi Kleen5f44a662006-03-25 16:30:25 +0100107static __init void set_pte_phys(unsigned long vaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 unsigned long phys, pgprot_t prot)
109{
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 pte_t *pte, new_pte;
114
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
116
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
120 return;
121 }
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
128 return;
129 }
130 }
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
137 return;
138 }
139 }
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
141
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
145 pte_ERROR(*pte);
146 set_pte(pte, new_pte);
147
148 /*
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
151 */
152 __flush_tlb_one(vaddr);
153}
154
155/* NOTE: this is meant to be run only at boot */
Andi Kleen5f44a662006-03-25 16:30:25 +0100156void __init
157__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 unsigned long address = __fix_to_virt(idx);
160
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
164 }
165 set_pte_phys(address, phys, prot);
166}
167
168unsigned long __initdata table_start, table_end;
169
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200170static __meminit void *alloc_low_page(unsigned long *phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200172 unsigned long pfn = table_end++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 void *adr;
174
Matt Tolentino44df75e2006-01-17 07:03:41 +0100175 if (after_bootmem) {
176 adr = (void *)get_zeroed_page(GFP_ATOMIC);
177 *phys = __pa(adr);
178 return adr;
179 }
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 if (pfn >= end_pfn)
182 panic("alloc_low_page: ran out of memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200184 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
185 memset(adr, 0, PAGE_SIZE);
186 *phys = pfn * PAGE_SIZE;
187 return adr;
188}
189
190static __meminit void unmap_low_page(void *adr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100192
193 if (after_bootmem)
194 return;
195
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200196 early_iounmap(adr, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100199/* Must run before zap_low_mappings */
200__init void *early_ioremap(unsigned long addr, unsigned long size)
201{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200202 unsigned long vaddr;
203 pmd_t *pmd, *last_pmd;
204 int i, pmds;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100205
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200206 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
207 vaddr = __START_KERNEL_map;
208 pmd = level2_kernel_pgt;
209 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
210 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
211 for (i = 0; i < pmds; i++) {
212 if (pmd_present(pmd[i]))
213 goto next;
214 }
215 vaddr += addr & ~PMD_MASK;
216 addr &= PMD_MASK;
217 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
218 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
219 __flush_tlb();
220 return (void *)vaddr;
221 next:
222 ;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100223 }
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200224 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
225 return NULL;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100226}
227
228/* To avoid virtual aliases later */
229__init void early_iounmap(void *addr, unsigned long size)
230{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200231 unsigned long vaddr;
232 pmd_t *pmd;
233 int i, pmds;
234
235 vaddr = (unsigned long)addr;
236 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
237 pmd = level2_kernel_pgt + pmd_index(vaddr);
238 for (i = 0; i < pmds; i++)
239 pmd_clear(pmd + i);
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100240 __flush_tlb();
241}
242
Matt Tolentino44df75e2006-01-17 07:03:41 +0100243static void __meminit
Keith Mannthey6ad91652006-09-26 10:52:36 +0200244phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100245{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200246 int i = pmd_index(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Keith Mannthey6ad91652006-09-26 10:52:36 +0200248 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
Matt Tolentino44df75e2006-01-17 07:03:41 +0100249 unsigned long entry;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200250 pmd_t *pmd = pmd_page + pmd_index(address);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100251
Jan Beulich5f51e132006-06-26 13:59:02 +0200252 if (address >= end) {
253 if (!after_bootmem)
254 for (; i < PTRS_PER_PMD; i++, pmd++)
255 set_pmd(pmd, __pmd(0));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100256 break;
257 }
Keith Mannthey6ad91652006-09-26 10:52:36 +0200258
259 if (pmd_val(*pmd))
260 continue;
261
Matt Tolentino44df75e2006-01-17 07:03:41 +0100262 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
263 entry &= __supported_pte_mask;
264 set_pmd(pmd, __pmd(entry));
265 }
266}
267
268static void __meminit
269phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
270{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200271 pmd_t *pmd = pmd_offset(pud,0);
272 spin_lock(&init_mm.page_table_lock);
273 phys_pmd_init(pmd, address, end);
274 spin_unlock(&init_mm.page_table_lock);
275 __flush_tlb_all();
Matt Tolentino44df75e2006-01-17 07:03:41 +0100276}
277
Keith Mannthey6ad91652006-09-26 10:52:36 +0200278static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100279{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200280 int i = pud_index(addr);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100281
Matt Tolentino44df75e2006-01-17 07:03:41 +0100282
Keith Mannthey6ad91652006-09-26 10:52:36 +0200283 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
Keith Mannthey6ad91652006-09-26 10:52:36 +0200284 unsigned long pmd_phys;
285 pud_t *pud = pud_page + pud_index(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 pmd_t *pmd;
287
Keith Mannthey6ad91652006-09-26 10:52:36 +0200288 if (addr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Keith Mannthey6ad91652006-09-26 10:52:36 +0200291 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 set_pud(pud, __pud(0));
293 continue;
294 }
295
Keith Mannthey6ad91652006-09-26 10:52:36 +0200296 if (pud_val(*pud)) {
297 phys_pmd_update(pud, addr, end);
298 continue;
299 }
300
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200301 pmd = alloc_low_page(&pmd_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100302 spin_lock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
Keith Mannthey6ad91652006-09-26 10:52:36 +0200304 phys_pmd_init(pmd, addr, end);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100305 spin_unlock(&init_mm.page_table_lock);
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200306 unmap_low_page(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
308 __flush_tlb();
309}
310
311static void __init find_early_table_space(unsigned long end)
312{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100313 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
316 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
317 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
318 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
319
Andi Kleenee408c72006-01-16 01:56:51 +0100320 /* RED-PEN putting page tables only on node 0 could
321 cause a hotspot and fill up ZONE_DMA. The page tables
322 need roughly 0.5KB per GB. */
323 start = 0x8000;
324 table_start = find_e820_area(start, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 if (table_start == -1UL)
326 panic("Cannot find space for the kernel page tables");
327
328 table_start >>= PAGE_SHIFT;
329 table_end = table_start;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100330
331 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
Jan Beulich5f51e132006-06-26 13:59:02 +0200332 end, table_start << PAGE_SHIFT,
333 (table_start << PAGE_SHIFT) + tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334}
335
336/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
337 This runs before bootmem is initialized and gets pages directly from the
338 physical memory. To access them they are temporarily mapped. */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100339void __meminit init_memory_mapping(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
341 unsigned long next;
342
343 Dprintk("init_memory_mapping\n");
344
345 /*
346 * Find space for the kernel direct mapping tables.
347 * Later we should allocate these tables in the local node of the memory
348 * mapped. Unfortunately this is done currently before the nodes are
349 * discovered.
350 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100351 if (!after_bootmem)
352 find_early_table_space(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 start = (unsigned long)__va(start);
355 end = (unsigned long)__va(end);
356
357 for (; start < end; start = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 unsigned long pud_phys;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100359 pgd_t *pgd = pgd_offset_k(start);
360 pud_t *pud;
361
362 if (after_bootmem)
Andi Kleend2ae5b52006-06-26 13:57:56 +0200363 pud = pud_offset(pgd, start & PGDIR_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100364 else
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200365 pud = alloc_low_page(&pud_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 next = start + PGDIR_SIZE;
368 if (next > end)
369 next = end;
370 phys_pud_init(pud, __pa(start), __pa(next));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100371 if (!after_bootmem)
372 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200373 unmap_low_page(pud);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375
Matt Tolentino44df75e2006-01-17 07:03:41 +0100376 if (!after_bootmem)
377 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
Matt Tolentino2b976902005-06-23 00:08:06 -0700381#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382void __init paging_init(void)
383{
Mel Gorman6391af12006-10-11 01:20:39 -0700384 unsigned long max_zone_pfns[MAX_NR_ZONES];
385 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
386 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
387 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
388 max_zone_pfns[ZONE_NORMAL] = end_pfn;
389
Matt Tolentino44df75e2006-01-17 07:03:41 +0100390 memory_present(0, 0, end_pfn);
391 sparse_init();
Mel Gorman5cb248a2006-09-27 01:49:52 -0700392 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394#endif
395
396/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
397 from the CPU leading to inconsistent cache lines. address and size
398 must be aligned to 2MB boundaries.
399 Does nothing when the mapping doesn't exist. */
400void __init clear_kernel_mapping(unsigned long address, unsigned long size)
401{
402 unsigned long end = address + size;
403
404 BUG_ON(address & ~LARGE_PAGE_MASK);
405 BUG_ON(size & ~LARGE_PAGE_MASK);
406
407 for (; address < end; address += LARGE_PAGE_SIZE) {
408 pgd_t *pgd = pgd_offset_k(address);
409 pud_t *pud;
410 pmd_t *pmd;
411 if (pgd_none(*pgd))
412 continue;
413 pud = pud_offset(pgd, address);
414 if (pud_none(*pud))
415 continue;
416 pmd = pmd_offset(pud, address);
417 if (!pmd || pmd_none(*pmd))
418 continue;
419 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
420 /* Could handle this, but it should not happen currently. */
421 printk(KERN_ERR
422 "clear_kernel_mapping: mapping has been split. will leak memory\n");
423 pmd_ERROR(*pmd);
424 }
425 set_pmd(pmd, __pmd(0));
426 }
427 __flush_tlb_all();
428}
429
Matt Tolentino44df75e2006-01-17 07:03:41 +0100430/*
431 * Memory hotplug specific functions
Matt Tolentino44df75e2006-01-17 07:03:41 +0100432 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100433void online_page(struct page *page)
434{
435 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800436 init_page_count(page);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100437 __free_page(page);
438 totalram_pages++;
439 num_physpages++;
440}
441
Yasunori Gotobc02af92006-06-27 02:53:30 -0700442#ifdef CONFIG_MEMORY_HOTPLUG
443/*
Yasunori Gotobc02af92006-06-27 02:53:30 -0700444 * Memory is added always to NORMAL zone. This means you will never get
445 * additional DMA/DMA32 memory.
446 */
447int arch_add_memory(int nid, u64 start, u64 size)
448{
449 struct pglist_data *pgdat = NODE_DATA(nid);
Christoph Lameter776ed982006-09-25 23:31:09 -0700450 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
Yasunori Gotobc02af92006-06-27 02:53:30 -0700451 unsigned long start_pfn = start >> PAGE_SHIFT;
452 unsigned long nr_pages = size >> PAGE_SHIFT;
453 int ret;
454
Keith Mannthey45e0b782006-09-30 23:27:09 -0700455 init_memory_mapping(start, (start + size -1));
456
Yasunori Gotobc02af92006-06-27 02:53:30 -0700457 ret = __add_pages(zone, start_pfn, nr_pages);
458 if (ret)
459 goto error;
460
Yasunori Gotobc02af92006-06-27 02:53:30 -0700461 return ret;
462error:
463 printk("%s: Problem encountered in __add_pages!\n", __func__);
464 return ret;
465}
466EXPORT_SYMBOL_GPL(arch_add_memory);
467
468int remove_memory(u64 start, u64 size)
469{
470 return -EINVAL;
471}
472EXPORT_SYMBOL_GPL(remove_memory);
473
Yasunori Goto82432292006-11-18 22:19:40 -0800474#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
Keith Mannthey4942e992006-09-30 23:27:06 -0700475int memory_add_physaddr_to_nid(u64 start)
476{
477 return 0;
478}
Keith Mannthey8c2676a2006-09-30 23:27:07 -0700479EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
Keith Mannthey4942e992006-09-30 23:27:06 -0700480#endif
481
Keith Mannthey45e0b782006-09-30 23:27:09 -0700482#endif /* CONFIG_MEMORY_HOTPLUG */
483
484#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200485/*
486 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
487 * just online the pages.
488 */
489int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
490{
491 int err = -EIO;
492 unsigned long pfn;
493 unsigned long total = 0, mem = 0;
494 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200495 if (pfn_valid(pfn)) {
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200496 online_page(pfn_to_page(pfn));
497 err = 0;
498 mem++;
499 }
500 total++;
501 }
502 if (!err) {
503 z->spanned_pages += total;
504 z->present_pages += mem;
505 z->zone_pgdat->node_spanned_pages += total;
506 z->zone_pgdat->node_present_pages += mem;
507 }
508 return err;
509}
Keith Mannthey45e0b782006-09-30 23:27:09 -0700510#endif
Matt Tolentino44df75e2006-01-17 07:03:41 +0100511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
513 kcore_vsyscall;
514
515void __init mem_init(void)
516{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200517 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Jon Mason0dc243a2006-06-26 13:58:11 +0200519 pci_iommu_alloc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 /* clear the zero-page */
522 memset(empty_zero_page, 0, PAGE_SIZE);
523
524 reservedpages = 0;
525
526 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700527#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200528 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200530 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531#endif
Mel Gorman5cb248a2006-09-27 01:49:52 -0700532 reservedpages = end_pfn - totalram_pages -
533 absent_pages_in_range(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 after_bootmem = 1;
536
537 codesize = (unsigned long) &_etext - (unsigned long) &_text;
538 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
539 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
540
541 /* Register memory areas for /proc/kcore */
542 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
543 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
544 VMALLOC_END-VMALLOC_START);
545 kclist_add(&kcore_kernel, &_stext, _end - _stext);
546 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
547 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
548 VSYSCALL_END - VSYSCALL_START);
549
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200550 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
552 end_pfn << (PAGE_SHIFT-10),
553 codesize >> 10,
554 reservedpages << (PAGE_SHIFT-10),
555 datasize >> 10,
556 initsize >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200559void free_init_pages(char *what, unsigned long begin, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560{
561 unsigned long addr;
562
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200563 if (begin >= end)
564 return;
565
566 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
567 for (addr = begin; addr < end; addr += PAGE_SIZE) {
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200568 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
569 ClearPageReserved(page);
570 init_page_count(page);
571 memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
572 __free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 totalram_pages++;
574 }
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200575}
576
577void free_initmem(void)
578{
Randy Dunlapc9cf5522006-06-27 02:53:52 -0700579 memset(__initdata_begin, POISON_FREE_INITDATA,
580 __initdata_end - __initdata_begin);
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200581 free_init_pages("unused kernel memory",
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200582 __pa_symbol(&__init_begin),
583 __pa_symbol(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584}
585
Arjan van de Ven67df1972006-01-06 00:12:04 -0800586#ifdef CONFIG_DEBUG_RODATA
587
Arjan van de Ven67df1972006-01-06 00:12:04 -0800588void mark_rodata_ro(void)
589{
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200590 unsigned long addr = (unsigned long)__va(__pa_symbol(&__start_rodata));
591 unsigned long end = (unsigned long)__va(__pa_symbol(&__end_rodata));
Arjan van de Ven67df1972006-01-06 00:12:04 -0800592
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200593 for (; addr < end; addr += PAGE_SIZE)
Arjan van de Ven67df1972006-01-06 00:12:04 -0800594 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
595
596 printk ("Write protecting the kernel read-only data: %luk\n",
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700597 (__end_rodata - __start_rodata) >> 10);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800598
599 /*
600 * change_page_attr_addr() requires a global_flush_tlb() call after it.
601 * We do this after the printk so that if something went wrong in the
602 * change, the printk gets out at least to give a better debug hint
603 * of who is the culprit.
604 */
605 global_flush_tlb();
606}
607#endif
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609#ifdef CONFIG_BLK_DEV_INITRD
610void free_initrd_mem(unsigned long start, unsigned long end)
611{
Vivek Goyal0dbf7022007-05-02 19:27:07 +0200612 free_init_pages("initrd memory", __pa(start), __pa(end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613}
614#endif
615
616void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
617{
Matt Tolentino2b976902005-06-23 00:08:06 -0700618#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 int nid = phys_to_nid(phys);
Andi Kleen5e58a022006-11-14 16:57:46 +0100620#endif
621 unsigned long pfn = phys >> PAGE_SHIFT;
622 if (pfn >= end_pfn) {
623 /* This can happen with kdump kernels when accessing firmware
624 tables. */
625 if (pfn < end_pfn_map)
626 return;
627 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
628 phys, len);
629 return;
630 }
631
632 /* Should check here against the e820 map to avoid double free */
633#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 reserve_bootmem_node(NODE_DATA(nid), phys, len);
635#else
636 reserve_bootmem(phys, len);
637#endif
Mel Gorman0e0b8642006-09-27 01:49:56 -0700638 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
Andi Kleene18c6872005-11-05 17:25:53 +0100639 dma_reserve += len / PAGE_SIZE;
Mel Gorman0e0b8642006-09-27 01:49:56 -0700640 set_dma_reserve(dma_reserve);
641 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
644int kern_addr_valid(unsigned long addr)
645{
646 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
647 pgd_t *pgd;
648 pud_t *pud;
649 pmd_t *pmd;
650 pte_t *pte;
651
652 if (above != 0 && above != -1UL)
653 return 0;
654
655 pgd = pgd_offset_k(addr);
656 if (pgd_none(*pgd))
657 return 0;
658
659 pud = pud_offset(pgd, addr);
660 if (pud_none(*pud))
661 return 0;
662
663 pmd = pmd_offset(pud, addr);
664 if (pmd_none(*pmd))
665 return 0;
666 if (pmd_large(*pmd))
667 return pfn_valid(pmd_pfn(*pmd));
668
669 pte = pte_offset_kernel(pmd, addr);
670 if (pte_none(*pte))
671 return 0;
672 return pfn_valid(pte_pfn(*pte));
673}
674
675#ifdef CONFIG_SYSCTL
676#include <linux/sysctl.h>
677
678extern int exception_trace, page_fault_trace;
679
680static ctl_table debug_table2[] = {
Eric W. Biedermanc37ce032007-02-14 00:33:51 -0800681 {
682 .ctl_name = 99,
683 .procname = "exception-trace",
684 .data = &exception_trace,
685 .maxlen = sizeof(int),
686 .mode = 0644,
687 .proc_handler = proc_dointvec
688 },
689 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690};
691
692static ctl_table debug_root_table2[] = {
Eric W. Biedermanc37ce032007-02-14 00:33:51 -0800693 {
694 .ctl_name = CTL_DEBUG,
695 .procname = "debug",
696 .mode = 0555,
697 .child = debug_table2
698 },
699 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700};
701
702static __init int x8664_sysctl_init(void)
703{
Eric W. Biederman0b4d4142007-02-14 00:34:09 -0800704 register_sysctl_table(debug_root_table2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 return 0;
706}
707__initcall(x8664_sysctl_init);
708#endif
709
Ernie Petrides103efcd2006-12-07 02:14:09 +0100710/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
Andi Kleen1e014412005-04-16 15:24:55 -0700711 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
712 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714static struct vm_area_struct gate_vma = {
715 .vm_start = VSYSCALL_START,
Ernie Petrides103efcd2006-12-07 02:14:09 +0100716 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
717 .vm_page_prot = PAGE_READONLY_EXEC,
718 .vm_flags = VM_READ | VM_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719};
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
722{
723#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700724 if (test_tsk_thread_flag(tsk, TIF_IA32))
725 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726#endif
727 return &gate_vma;
728}
729
730int in_gate_area(struct task_struct *task, unsigned long addr)
731{
732 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700733 if (!vma)
734 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return (addr >= vma->vm_start) && (addr < vma->vm_end);
736}
737
738/* Use this when you have no reliable task/vma, typically from interrupt
739 * context. It is less reliable than using the task's vma and may give
740 * false positives.
741 */
742int in_gate_area_no_task(unsigned long addr)
743{
Andi Kleen1e014412005-04-16 15:24:55 -0700744 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}