blob: eca60125efc3b76a5e8145829f362f031757d35e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010025#include <linux/pci.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010026#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28#include <asm/processor.h>
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/pgalloc.h>
33#include <asm/dma.h>
34#include <asm/fixmap.h>
35#include <asm/e820.h>
36#include <asm/apic.h>
37#include <asm/tlb.h>
38#include <asm/mmu_context.h>
39#include <asm/proto.h>
40#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010041#include <asm/sections.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010042#include <asm/dma-mapping.h>
43#include <asm/swiotlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010049struct dma_mapping_ops* dma_ops;
50EXPORT_SYMBOL(dma_ops);
51
Andi Kleene18c6872005-11-05 17:25:53 +010052static unsigned long dma_reserve __initdata;
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
Andi Kleene92343c2005-09-12 18:49:24 +020064 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 pg_data_t *pgdat;
67 struct page *page;
68
Andi Kleene92343c2005-09-12 18:49:24 +020069 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020071 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73 for_each_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020077 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
84 }
Andi Kleene92343c2005-09-12 18:49:24 +020085 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91/* References to section boundaries */
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093int after_bootmem;
94
95static void *spp_getpage(void)
96{
97 void *ptr;
98 if (after_bootmem)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
100 else
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104
105 Dprintk("spp_getpage %p\n", ptr);
106 return ptr;
107}
108
109static void set_pte_phys(unsigned long vaddr,
110 unsigned long phys, pgprot_t prot)
111{
112 pgd_t *pgd;
113 pud_t *pud;
114 pmd_t *pmd;
115 pte_t *pte, new_pte;
116
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118
119 pgd = pgd_offset_k(vaddr);
120 if (pgd_none(*pgd)) {
121 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 return;
123 }
124 pud = pud_offset(pgd, vaddr);
125 if (pud_none(*pud)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pud, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
130 return;
131 }
132 }
133 pmd = pmd_offset(pud, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
139 return;
140 }
141 }
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
147 pte_ERROR(*pte);
148 set_pte(pte, new_pte);
149
150 /*
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
153 */
154 __flush_tlb_one(vaddr);
155}
156
157/* NOTE: this is meant to be run only at boot */
158void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159{
160 unsigned long address = __fix_to_virt(idx);
161
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
164 return;
165 }
166 set_pte_phys(address, phys, prot);
167}
168
169unsigned long __initdata table_start, table_end;
170
171extern pmd_t temp_boot_pmds[];
172
173static struct temp_map {
174 pmd_t *pmd;
175 void *address;
176 int allocated;
177} temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
180 {}
181};
182
183static __init void *alloc_low_page(int *index, unsigned long *phys)
184{
185 struct temp_map *ti;
186 int i;
187 unsigned long pfn = table_end++, paddr;
188 void *adr;
189
190 if (pfn >= end_pfn)
191 panic("alloc_low_page: ran out of memory");
192 for (i = 0; temp_mappings[i].allocated; i++) {
193 if (!temp_mappings[i].pmd)
194 panic("alloc_low_page: ran out of temp mappings");
195 }
196 ti = &temp_mappings[i];
197 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
198 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
199 ti->allocated = 1;
200 __flush_tlb();
201 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
202 *index = i;
203 *phys = pfn * PAGE_SIZE;
204 return adr;
205}
206
207static __init void unmap_low_page(int i)
208{
209 struct temp_map *ti = &temp_mappings[i];
210 set_pmd(ti->pmd, __pmd(0));
211 ti->allocated = 0;
212}
213
214static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
215{
216 long i, j;
217
218 i = pud_index(address);
219 pud = pud + i;
220 for (; i < PTRS_PER_PUD; pud++, i++) {
221 int map;
222 unsigned long paddr, pmd_phys;
223 pmd_t *pmd;
224
225 paddr = address + i*PUD_SIZE;
226 if (paddr >= end) {
227 for (; i < PTRS_PER_PUD; i++, pud++)
228 set_pud(pud, __pud(0));
229 break;
230 }
231
232 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
233 set_pud(pud, __pud(0));
234 continue;
235 }
236
237 pmd = alloc_low_page(&map, &pmd_phys);
238 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
239 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
240 unsigned long pe;
241
242 if (paddr >= end) {
243 for (; j < PTRS_PER_PMD; j++, pmd++)
244 set_pmd(pmd, __pmd(0));
245 break;
246 }
247 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
248 pe &= __supported_pte_mask;
249 set_pmd(pmd, __pmd(pe));
250 }
251 unmap_low_page(map);
252 }
253 __flush_tlb();
254}
255
256static void __init find_early_table_space(unsigned long end)
257{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100258 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
261 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
262 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
263 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
264
Andi Kleen6c5acd12006-01-11 22:46:57 +0100265 /* Put page tables beyond the DMA zones if possible.
266 RED-PEN might be better to spread them out more over
267 memory to avoid hotspots */
268 if (end > MAX_DMA32_PFN<<PAGE_SHIFT)
269 start = MAX_DMA32_PFN << PAGE_SHIFT;
270 else if (end > MAX_DMA_PFN << PAGE_SHIFT)
271 start = MAX_DMA_PFN << PAGE_SHIFT;
272 else
273 start = 0x8000;
274
275 table_start = find_e820_area(start, end, tables);
276 if (table_start == -1)
277 table_start = find_e820_area(0x8000, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (table_start == -1UL)
279 panic("Cannot find space for the kernel page tables");
280
281 table_start >>= PAGE_SHIFT;
282 table_end = table_start;
283}
284
285/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
286 This runs before bootmem is initialized and gets pages directly from the
287 physical memory. To access them they are temporarily mapped. */
288void __init init_memory_mapping(unsigned long start, unsigned long end)
289{
290 unsigned long next;
291
292 Dprintk("init_memory_mapping\n");
293
294 /*
295 * Find space for the kernel direct mapping tables.
296 * Later we should allocate these tables in the local node of the memory
297 * mapped. Unfortunately this is done currently before the nodes are
298 * discovered.
299 */
300 find_early_table_space(end);
301
302 start = (unsigned long)__va(start);
303 end = (unsigned long)__va(end);
304
305 for (; start < end; start = next) {
306 int map;
307 unsigned long pud_phys;
308 pud_t *pud = alloc_low_page(&map, &pud_phys);
309 next = start + PGDIR_SIZE;
310 if (next > end)
311 next = end;
312 phys_pud_init(pud, __pa(start), __pa(next));
313 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
314 unmap_low_page(map);
315 }
316
317 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
318 __flush_tlb_all();
319 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
320 table_start<<PAGE_SHIFT,
321 table_end<<PAGE_SHIFT);
322}
323
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100324void __cpuinit zap_low_mappings(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100326 if (cpu == 0) {
327 pgd_t *pgd = pgd_offset_k(0UL);
328 pgd_clear(pgd);
329 } else {
330 /*
331 * For AP's, zap the low identity mappings by changing the cr3
332 * to init_level4_pgt and doing local flush tlb all
333 */
334 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
335 }
336 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Andi Kleena2f1b422005-11-05 17:25:53 +0100339/* Compute zone sizes for the DMA and DMA32 zones in a node. */
340__init void
341size_zones(unsigned long *z, unsigned long *h,
342 unsigned long start_pfn, unsigned long end_pfn)
343{
344 int i;
345 unsigned long w;
346
347 for (i = 0; i < MAX_NR_ZONES; i++)
348 z[i] = 0;
349
350 if (start_pfn < MAX_DMA_PFN)
351 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
352 if (start_pfn < MAX_DMA32_PFN) {
353 unsigned long dma32_pfn = MAX_DMA32_PFN;
354 if (dma32_pfn > end_pfn)
355 dma32_pfn = end_pfn;
356 z[ZONE_DMA32] = dma32_pfn - start_pfn;
357 }
358 z[ZONE_NORMAL] = end_pfn - start_pfn;
359
360 /* Remove lower zones from higher ones. */
361 w = 0;
362 for (i = 0; i < MAX_NR_ZONES; i++) {
363 if (z[i])
364 z[i] -= w;
365 w += z[i];
366 }
367
368 /* Compute holes */
Ravikiran G Thirumalai576fc092005-12-29 13:06:11 +0100369 w = start_pfn;
Andi Kleena2f1b422005-11-05 17:25:53 +0100370 for (i = 0; i < MAX_NR_ZONES; i++) {
371 unsigned long s = w;
372 w += z[i];
373 h[i] = e820_hole_size(s, w);
374 }
Andi Kleene18c6872005-11-05 17:25:53 +0100375
376 /* Add the space pace needed for mem_map to the holes too. */
377 for (i = 0; i < MAX_NR_ZONES; i++)
378 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
379
380 /* The 16MB DMA zone has the kernel and other misc mappings.
381 Account them too */
382 if (h[ZONE_DMA]) {
383 h[ZONE_DMA] += dma_reserve;
384 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
385 printk(KERN_WARNING
386 "Kernel too large and filling up ZONE_DMA?\n");
387 h[ZONE_DMA] = z[ZONE_DMA];
388 }
389 }
Andi Kleena2f1b422005-11-05 17:25:53 +0100390}
391
Matt Tolentino2b976902005-06-23 00:08:06 -0700392#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393void __init paging_init(void)
394{
Andi Kleena2f1b422005-11-05 17:25:53 +0100395 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
396 size_zones(zones, holes, 0, end_pfn);
397 free_area_init_node(0, NODE_DATA(0), zones,
398 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400#endif
401
402/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
403 from the CPU leading to inconsistent cache lines. address and size
404 must be aligned to 2MB boundaries.
405 Does nothing when the mapping doesn't exist. */
406void __init clear_kernel_mapping(unsigned long address, unsigned long size)
407{
408 unsigned long end = address + size;
409
410 BUG_ON(address & ~LARGE_PAGE_MASK);
411 BUG_ON(size & ~LARGE_PAGE_MASK);
412
413 for (; address < end; address += LARGE_PAGE_SIZE) {
414 pgd_t *pgd = pgd_offset_k(address);
415 pud_t *pud;
416 pmd_t *pmd;
417 if (pgd_none(*pgd))
418 continue;
419 pud = pud_offset(pgd, address);
420 if (pud_none(*pud))
421 continue;
422 pmd = pmd_offset(pud, address);
423 if (!pmd || pmd_none(*pmd))
424 continue;
425 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
426 /* Could handle this, but it should not happen currently. */
427 printk(KERN_ERR
428 "clear_kernel_mapping: mapping has been split. will leak memory\n");
429 pmd_ERROR(*pmd);
430 }
431 set_pmd(pmd, __pmd(0));
432 }
433 __flush_tlb_all();
434}
435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
437 kcore_vsyscall;
438
439void __init mem_init(void)
440{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200441 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443#ifdef CONFIG_SWIOTLB
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100444 pci_swiotlb_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445#endif
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100446 no_iommu_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 /* How many end-of-memory variables you have, grandma! */
449 max_low_pfn = end_pfn;
450 max_pfn = end_pfn;
451 num_physpages = end_pfn;
452 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
453
454 /* clear the zero-page */
455 memset(empty_zero_page, 0, PAGE_SIZE);
456
457 reservedpages = 0;
458
459 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700460#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200461 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200463 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464#endif
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200465 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
467 after_bootmem = 1;
468
469 codesize = (unsigned long) &_etext - (unsigned long) &_text;
470 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
471 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
472
473 /* Register memory areas for /proc/kcore */
474 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
475 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
476 VMALLOC_END-VMALLOC_START);
477 kclist_add(&kcore_kernel, &_stext, _end - _stext);
478 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
479 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
480 VSYSCALL_END - VSYSCALL_START);
481
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200482 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
484 end_pfn << (PAGE_SHIFT-10),
485 codesize >> 10,
486 reservedpages << (PAGE_SHIFT-10),
487 datasize >> 10,
488 initsize >> 10);
489
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100490#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 /*
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100492 * Sync boot_level4_pgt mappings with the init_level4_pgt
493 * except for the low identity mappings which are already zapped
494 * in init_level4_pgt. This sync-up is essential for AP's bringup
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 */
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100496 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497#endif
498}
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500void free_initmem(void)
501{
502 unsigned long addr;
503
504 addr = (unsigned long)(&__init_begin);
505 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
506 ClearPageReserved(virt_to_page(addr));
507 set_page_count(virt_to_page(addr), 1);
508 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
509 free_page(addr);
510 totalram_pages++;
511 }
512 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
Andi Kleen2bc04142005-11-05 17:25:53 +0100513 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
Arjan van de Ven67df1972006-01-06 00:12:04 -0800516#ifdef CONFIG_DEBUG_RODATA
517
518extern char __start_rodata, __end_rodata;
519void mark_rodata_ro(void)
520{
521 unsigned long addr = (unsigned long)&__start_rodata;
522
523 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
524 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
525
526 printk ("Write protecting the kernel read-only data: %luk\n",
527 (&__end_rodata - &__start_rodata) >> 10);
528
529 /*
530 * change_page_attr_addr() requires a global_flush_tlb() call after it.
531 * We do this after the printk so that if something went wrong in the
532 * change, the printk gets out at least to give a better debug hint
533 * of who is the culprit.
534 */
535 global_flush_tlb();
536}
537#endif
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539#ifdef CONFIG_BLK_DEV_INITRD
540void free_initrd_mem(unsigned long start, unsigned long end)
541{
542 if (start < (unsigned long)&_end)
543 return;
544 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
545 for (; start < end; start += PAGE_SIZE) {
546 ClearPageReserved(virt_to_page(start));
547 set_page_count(virt_to_page(start), 1);
548 free_page(start);
549 totalram_pages++;
550 }
551}
552#endif
553
554void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
555{
556 /* Should check here against the e820 map to avoid double free */
Matt Tolentino2b976902005-06-23 00:08:06 -0700557#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 int nid = phys_to_nid(phys);
559 reserve_bootmem_node(NODE_DATA(nid), phys, len);
560#else
561 reserve_bootmem(phys, len);
562#endif
Andi Kleene18c6872005-11-05 17:25:53 +0100563 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
564 dma_reserve += len / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
567int kern_addr_valid(unsigned long addr)
568{
569 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
570 pgd_t *pgd;
571 pud_t *pud;
572 pmd_t *pmd;
573 pte_t *pte;
574
575 if (above != 0 && above != -1UL)
576 return 0;
577
578 pgd = pgd_offset_k(addr);
579 if (pgd_none(*pgd))
580 return 0;
581
582 pud = pud_offset(pgd, addr);
583 if (pud_none(*pud))
584 return 0;
585
586 pmd = pmd_offset(pud, addr);
587 if (pmd_none(*pmd))
588 return 0;
589 if (pmd_large(*pmd))
590 return pfn_valid(pmd_pfn(*pmd));
591
592 pte = pte_offset_kernel(pmd, addr);
593 if (pte_none(*pte))
594 return 0;
595 return pfn_valid(pte_pfn(*pte));
596}
597
598#ifdef CONFIG_SYSCTL
599#include <linux/sysctl.h>
600
601extern int exception_trace, page_fault_trace;
602
603static ctl_table debug_table2[] = {
604 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
605 proc_dointvec },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 { 0, }
607};
608
609static ctl_table debug_root_table2[] = {
610 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
611 .child = debug_table2 },
612 { 0 },
613};
614
615static __init int x8664_sysctl_init(void)
616{
617 register_sysctl_table(debug_root_table2, 1);
618 return 0;
619}
620__initcall(x8664_sysctl_init);
621#endif
622
Andi Kleen1e014412005-04-16 15:24:55 -0700623/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
624 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
625 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627static struct vm_area_struct gate_vma = {
628 .vm_start = VSYSCALL_START,
629 .vm_end = VSYSCALL_END,
630 .vm_page_prot = PAGE_READONLY
631};
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
634{
635#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700636 if (test_tsk_thread_flag(tsk, TIF_IA32))
637 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638#endif
639 return &gate_vma;
640}
641
642int in_gate_area(struct task_struct *task, unsigned long addr)
643{
644 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700645 if (!vma)
646 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return (addr >= vma->vm_start) && (addr < vma->vm_end);
648}
649
650/* Use this when you have no reliable task/vma, typically from interrupt
651 * context. It is less reliable than using the task's vma and may give
652 * false positives.
653 */
654int in_gate_area_no_task(unsigned long addr)
655{
Andi Kleen1e014412005-04-16 15:24:55 -0700656 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657}