blob: 47cf9163aa89f70292ce0735986cc10f176c705e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
25
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29#include <asm/pgtable.h>
30#include <asm/pgalloc.h>
31#include <asm/dma.h>
32#include <asm/fixmap.h>
33#include <asm/e820.h>
34#include <asm/apic.h>
35#include <asm/tlb.h>
36#include <asm/mmu_context.h>
37#include <asm/proto.h>
38#include <asm/smp.h>
39
40#ifndef Dprintk
41#define Dprintk(x...)
42#endif
43
44#ifdef CONFIG_GART_IOMMU
45extern int swiotlb;
46#endif
47
48extern char _stext[];
49
50DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
51
52/*
53 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
54 * physical space so we can cache the place of the first one and move
55 * around without checking the pgd every time.
56 */
57
58void show_mem(void)
59{
60 int i, total = 0, reserved = 0;
61 int shared = 0, cached = 0;
62 pg_data_t *pgdat;
63 struct page *page;
64
65 printk("Mem-info:\n");
66 show_free_areas();
67 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
68
69 for_each_pgdat(pgdat) {
70 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
71 page = pfn_to_page(pgdat->node_start_pfn + i);
72 total++;
73 if (PageReserved(page))
74 reserved++;
75 else if (PageSwapCache(page))
76 cached++;
77 else if (page_count(page))
78 shared += page_count(page) - 1;
79 }
80 }
81 printk("%d pages of RAM\n", total);
82 printk("%d reserved pages\n",reserved);
83 printk("%d pages shared\n",shared);
84 printk("%d pages swap cached\n",cached);
85}
86
87/* References to section boundaries */
88
89extern char _text, _etext, _edata, __bss_start, _end[];
90extern char __init_begin, __init_end;
91
92int after_bootmem;
93
94static void *spp_getpage(void)
95{
96 void *ptr;
97 if (after_bootmem)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 else
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103
104 Dprintk("spp_getpage %p\n", ptr);
105 return ptr;
106}
107
108static void set_pte_phys(unsigned long vaddr,
109 unsigned long phys, pgprot_t prot)
110{
111 pgd_t *pgd;
112 pud_t *pud;
113 pmd_t *pmd;
114 pte_t *pte, new_pte;
115
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
121 return;
122 }
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
129 return;
130 }
131 }
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
138 return;
139 }
140 }
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 pte_ERROR(*pte);
147 set_pte(pte, new_pte);
148
149 /*
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
152 */
153 __flush_tlb_one(vaddr);
154}
155
156/* NOTE: this is meant to be run only at boot */
157void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
158{
159 unsigned long address = __fix_to_virt(idx);
160
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
164 }
165 set_pte_phys(address, phys, prot);
166}
167
168unsigned long __initdata table_start, table_end;
169
170extern pmd_t temp_boot_pmds[];
171
172static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176} temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
179 {}
180};
181
182static __init void *alloc_low_page(int *index, unsigned long *phys)
183{
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
188
189 if (pfn >= end_pfn)
190 panic("alloc_low_page: ran out of memory");
191 for (i = 0; temp_mappings[i].allocated; i++) {
192 if (!temp_mappings[i].pmd)
193 panic("alloc_low_page: ran out of temp mappings");
194 }
195 ti = &temp_mappings[i];
196 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
197 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
198 ti->allocated = 1;
199 __flush_tlb();
200 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
201 *index = i;
202 *phys = pfn * PAGE_SIZE;
203 return adr;
204}
205
206static __init void unmap_low_page(int i)
207{
208 struct temp_map *ti = &temp_mappings[i];
209 set_pmd(ti->pmd, __pmd(0));
210 ti->allocated = 0;
211}
212
213static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
214{
215 long i, j;
216
217 i = pud_index(address);
218 pud = pud + i;
219 for (; i < PTRS_PER_PUD; pud++, i++) {
220 int map;
221 unsigned long paddr, pmd_phys;
222 pmd_t *pmd;
223
224 paddr = address + i*PUD_SIZE;
225 if (paddr >= end) {
226 for (; i < PTRS_PER_PUD; i++, pud++)
227 set_pud(pud, __pud(0));
228 break;
229 }
230
231 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
232 set_pud(pud, __pud(0));
233 continue;
234 }
235
236 pmd = alloc_low_page(&map, &pmd_phys);
237 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
238 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
239 unsigned long pe;
240
241 if (paddr >= end) {
242 for (; j < PTRS_PER_PMD; j++, pmd++)
243 set_pmd(pmd, __pmd(0));
244 break;
245 }
246 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
247 pe &= __supported_pte_mask;
248 set_pmd(pmd, __pmd(pe));
249 }
250 unmap_low_page(map);
251 }
252 __flush_tlb();
253}
254
255static void __init find_early_table_space(unsigned long end)
256{
257 unsigned long puds, pmds, tables;
258
259 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
260 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
261 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
262 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
263
264 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
265 if (table_start == -1UL)
266 panic("Cannot find space for the kernel page tables");
267
268 table_start >>= PAGE_SHIFT;
269 table_end = table_start;
270}
271
272/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
273 This runs before bootmem is initialized and gets pages directly from the
274 physical memory. To access them they are temporarily mapped. */
275void __init init_memory_mapping(unsigned long start, unsigned long end)
276{
277 unsigned long next;
278
279 Dprintk("init_memory_mapping\n");
280
281 /*
282 * Find space for the kernel direct mapping tables.
283 * Later we should allocate these tables in the local node of the memory
284 * mapped. Unfortunately this is done currently before the nodes are
285 * discovered.
286 */
287 find_early_table_space(end);
288
289 start = (unsigned long)__va(start);
290 end = (unsigned long)__va(end);
291
292 for (; start < end; start = next) {
293 int map;
294 unsigned long pud_phys;
295 pud_t *pud = alloc_low_page(&map, &pud_phys);
296 next = start + PGDIR_SIZE;
297 if (next > end)
298 next = end;
299 phys_pud_init(pud, __pa(start), __pa(next));
300 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
301 unmap_low_page(map);
302 }
303
304 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
305 __flush_tlb_all();
306 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
307 table_start<<PAGE_SHIFT,
308 table_end<<PAGE_SHIFT);
309}
310
311extern struct x8664_pda cpu_pda[NR_CPUS];
312
313/* Assumes all CPUs still execute in init_mm */
314void zap_low_mappings(void)
315{
316 pgd_t *pgd = pgd_offset_k(0UL);
317 pgd_clear(pgd);
318 flush_tlb_all();
319}
320
Matt Tolentino2b976902005-06-23 00:08:06 -0700321#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322void __init paging_init(void)
323{
324 {
Andi Kleen485761b2005-08-26 18:34:10 -0700325 unsigned long zones_size[MAX_NR_ZONES];
326 unsigned long holes[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 unsigned int max_dma;
328
Andi Kleen485761b2005-08-26 18:34:10 -0700329 memset(zones_size, 0, sizeof(zones_size));
330 memset(holes, 0, sizeof(holes));
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
333
Andi Kleen485761b2005-08-26 18:34:10 -0700334 if (end_pfn < max_dma) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 zones_size[ZONE_DMA] = end_pfn;
Andi Kleen485761b2005-08-26 18:34:10 -0700336 holes[ZONE_DMA] = e820_hole_size(0, end_pfn);
337 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 zones_size[ZONE_DMA] = max_dma;
Andi Kleen485761b2005-08-26 18:34:10 -0700339 holes[ZONE_DMA] = e820_hole_size(0, max_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 zones_size[ZONE_NORMAL] = end_pfn - max_dma;
Andi Kleen485761b2005-08-26 18:34:10 -0700341 holes[ZONE_NORMAL] = e820_hole_size(max_dma, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
Andi Kleen485761b2005-08-26 18:34:10 -0700343 free_area_init_node(0, NODE_DATA(0), zones_size,
344 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346 return;
347}
348#endif
349
350/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
351 from the CPU leading to inconsistent cache lines. address and size
352 must be aligned to 2MB boundaries.
353 Does nothing when the mapping doesn't exist. */
354void __init clear_kernel_mapping(unsigned long address, unsigned long size)
355{
356 unsigned long end = address + size;
357
358 BUG_ON(address & ~LARGE_PAGE_MASK);
359 BUG_ON(size & ~LARGE_PAGE_MASK);
360
361 for (; address < end; address += LARGE_PAGE_SIZE) {
362 pgd_t *pgd = pgd_offset_k(address);
363 pud_t *pud;
364 pmd_t *pmd;
365 if (pgd_none(*pgd))
366 continue;
367 pud = pud_offset(pgd, address);
368 if (pud_none(*pud))
369 continue;
370 pmd = pmd_offset(pud, address);
371 if (!pmd || pmd_none(*pmd))
372 continue;
373 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
374 /* Could handle this, but it should not happen currently. */
375 printk(KERN_ERR
376 "clear_kernel_mapping: mapping has been split. will leak memory\n");
377 pmd_ERROR(*pmd);
378 }
379 set_pmd(pmd, __pmd(0));
380 }
381 __flush_tlb_all();
382}
383
384static inline int page_is_ram (unsigned long pagenr)
385{
386 int i;
387
388 for (i = 0; i < e820.nr_map; i++) {
389 unsigned long addr, end;
390
391 if (e820.map[i].type != E820_RAM) /* not usable memory */
392 continue;
393 /*
394 * !!!FIXME!!! Some BIOSen report areas as RAM that
395 * are not. Notably the 640->1Mb area. We need a sanity
396 * check here.
397 */
398 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
399 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
400 if ((pagenr >= addr) && (pagenr < end))
401 return 1;
402 }
403 return 0;
404}
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
407 kcore_vsyscall;
408
409void __init mem_init(void)
410{
411 int codesize, reservedpages, datasize, initsize;
412 int tmp;
413
414#ifdef CONFIG_SWIOTLB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (!iommu_aperture &&
416 (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
417 swiotlb = 1;
418 if (swiotlb)
419 swiotlb_init();
420#endif
421
422 /* How many end-of-memory variables you have, grandma! */
423 max_low_pfn = end_pfn;
424 max_pfn = end_pfn;
425 num_physpages = end_pfn;
426 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
427
428 /* clear the zero-page */
429 memset(empty_zero_page, 0, PAGE_SIZE);
430
431 reservedpages = 0;
432
433 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700434#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 totalram_pages += numa_free_all_bootmem();
436 tmp = 0;
437 /* should count reserved pages here for all nodes */
438#else
Matt Tolentino2b976902005-06-23 00:08:06 -0700439
440#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 max_mapnr = end_pfn;
442 if (!mem_map) BUG();
Matt Tolentino2b976902005-06-23 00:08:06 -0700443#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445 totalram_pages += free_all_bootmem();
446
447 for (tmp = 0; tmp < end_pfn; tmp++)
448 /*
449 * Only count reserved RAM pages
450 */
451 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
452 reservedpages++;
453#endif
454
455 after_bootmem = 1;
456
457 codesize = (unsigned long) &_etext - (unsigned long) &_text;
458 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
459 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
460
461 /* Register memory areas for /proc/kcore */
462 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
463 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
464 VMALLOC_END-VMALLOC_START);
465 kclist_add(&kcore_kernel, &_stext, _end - _stext);
466 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
467 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
468 VSYSCALL_END - VSYSCALL_START);
469
470 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
471 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
472 end_pfn << (PAGE_SHIFT-10),
473 codesize >> 10,
474 reservedpages << (PAGE_SHIFT-10),
475 datasize >> 10,
476 initsize >> 10);
477
478 /*
479 * Subtle. SMP is doing its boot stuff late (because it has to
480 * fork idle threads) - but it also needs low mappings for the
481 * protected-mode entry to work. We zap these entries only after
482 * the WP-bit has been tested.
483 */
484#ifndef CONFIG_SMP
485 zap_low_mappings();
486#endif
487}
488
489extern char __initdata_begin[], __initdata_end[];
490
491void free_initmem(void)
492{
493 unsigned long addr;
494
495 addr = (unsigned long)(&__init_begin);
496 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
497 ClearPageReserved(virt_to_page(addr));
498 set_page_count(virt_to_page(addr), 1);
499 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
500 free_page(addr);
501 totalram_pages++;
502 }
503 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
504 printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
505}
506
507#ifdef CONFIG_BLK_DEV_INITRD
508void free_initrd_mem(unsigned long start, unsigned long end)
509{
510 if (start < (unsigned long)&_end)
511 return;
512 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
513 for (; start < end; start += PAGE_SIZE) {
514 ClearPageReserved(virt_to_page(start));
515 set_page_count(virt_to_page(start), 1);
516 free_page(start);
517 totalram_pages++;
518 }
519}
520#endif
521
522void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
523{
524 /* Should check here against the e820 map to avoid double free */
Matt Tolentino2b976902005-06-23 00:08:06 -0700525#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 int nid = phys_to_nid(phys);
527 reserve_bootmem_node(NODE_DATA(nid), phys, len);
528#else
529 reserve_bootmem(phys, len);
530#endif
531}
532
533int kern_addr_valid(unsigned long addr)
534{
535 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
536 pgd_t *pgd;
537 pud_t *pud;
538 pmd_t *pmd;
539 pte_t *pte;
540
541 if (above != 0 && above != -1UL)
542 return 0;
543
544 pgd = pgd_offset_k(addr);
545 if (pgd_none(*pgd))
546 return 0;
547
548 pud = pud_offset(pgd, addr);
549 if (pud_none(*pud))
550 return 0;
551
552 pmd = pmd_offset(pud, addr);
553 if (pmd_none(*pmd))
554 return 0;
555 if (pmd_large(*pmd))
556 return pfn_valid(pmd_pfn(*pmd));
557
558 pte = pte_offset_kernel(pmd, addr);
559 if (pte_none(*pte))
560 return 0;
561 return pfn_valid(pte_pfn(*pte));
562}
563
564#ifdef CONFIG_SYSCTL
565#include <linux/sysctl.h>
566
567extern int exception_trace, page_fault_trace;
568
569static ctl_table debug_table2[] = {
570 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
571 proc_dointvec },
572#ifdef CONFIG_CHECKING
573 { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
574 proc_dointvec },
575#endif
576 { 0, }
577};
578
579static ctl_table debug_root_table2[] = {
580 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
581 .child = debug_table2 },
582 { 0 },
583};
584
585static __init int x8664_sysctl_init(void)
586{
587 register_sysctl_table(debug_root_table2, 1);
588 return 0;
589}
590__initcall(x8664_sysctl_init);
591#endif
592
Andi Kleen1e014412005-04-16 15:24:55 -0700593/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
594 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
595 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597static struct vm_area_struct gate_vma = {
598 .vm_start = VSYSCALL_START,
599 .vm_end = VSYSCALL_END,
600 .vm_page_prot = PAGE_READONLY
601};
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
604{
605#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700606 if (test_tsk_thread_flag(tsk, TIF_IA32))
607 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608#endif
609 return &gate_vma;
610}
611
612int in_gate_area(struct task_struct *task, unsigned long addr)
613{
614 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700615 if (!vma)
616 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 return (addr >= vma->vm_start) && (addr < vma->vm_end);
618}
619
620/* Use this when you have no reliable task/vma, typically from interrupt
621 * context. It is less reliable than using the task's vma and may give
622 * false positives.
623 */
624int in_gate_area_no_task(unsigned long addr)
625{
Andi Kleen1e014412005-04-16 15:24:55 -0700626 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627}