blob: 489e18df1fe990159f3ef2c7b34af4967dbe1004 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
25
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29#include <asm/pgtable.h>
30#include <asm/pgalloc.h>
31#include <asm/dma.h>
32#include <asm/fixmap.h>
33#include <asm/e820.h>
34#include <asm/apic.h>
35#include <asm/tlb.h>
36#include <asm/mmu_context.h>
37#include <asm/proto.h>
38#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010039#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#ifndef Dprintk
42#define Dprintk(x...)
43#endif
44
45#ifdef CONFIG_GART_IOMMU
46extern int swiotlb;
47#endif
48
Andi Kleene18c6872005-11-05 17:25:53 +010049static unsigned long dma_reserve __initdata;
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
52
53/*
54 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
55 * physical space so we can cache the place of the first one and move
56 * around without checking the pgd every time.
57 */
58
59void show_mem(void)
60{
Andi Kleene92343c2005-09-12 18:49:24 +020061 long i, total = 0, reserved = 0;
62 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 pg_data_t *pgdat;
64 struct page *page;
65
Andi Kleene92343c2005-09-12 18:49:24 +020066 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020068 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70 for_each_pgdat(pgdat) {
71 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
72 page = pfn_to_page(pgdat->node_start_pfn + i);
73 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020074 if (PageReserved(page))
75 reserved++;
76 else if (PageSwapCache(page))
77 cached++;
78 else if (page_count(page))
79 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 }
81 }
Andi Kleene92343c2005-09-12 18:49:24 +020082 printk(KERN_INFO "%lu pages of RAM\n", total);
83 printk(KERN_INFO "%lu reserved pages\n",reserved);
84 printk(KERN_INFO "%lu pages shared\n",shared);
85 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88/* References to section boundaries */
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090int after_bootmem;
91
92static void *spp_getpage(void)
93{
94 void *ptr;
95 if (after_bootmem)
96 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
97 else
98 ptr = alloc_bootmem_pages(PAGE_SIZE);
99 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
100 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
101
102 Dprintk("spp_getpage %p\n", ptr);
103 return ptr;
104}
105
106static void set_pte_phys(unsigned long vaddr,
107 unsigned long phys, pgprot_t prot)
108{
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 pte_t *pte, new_pte;
113
114 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
115
116 pgd = pgd_offset_k(vaddr);
117 if (pgd_none(*pgd)) {
118 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
119 return;
120 }
121 pud = pud_offset(pgd, vaddr);
122 if (pud_none(*pud)) {
123 pmd = (pmd_t *) spp_getpage();
124 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
125 if (pmd != pmd_offset(pud, 0)) {
126 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
127 return;
128 }
129 }
130 pmd = pmd_offset(pud, vaddr);
131 if (pmd_none(*pmd)) {
132 pte = (pte_t *) spp_getpage();
133 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
134 if (pte != pte_offset_kernel(pmd, 0)) {
135 printk("PAGETABLE BUG #02!\n");
136 return;
137 }
138 }
139 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
140
141 pte = pte_offset_kernel(pmd, vaddr);
142 if (!pte_none(*pte) &&
143 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
144 pte_ERROR(*pte);
145 set_pte(pte, new_pte);
146
147 /*
148 * It's enough to flush this one mapping.
149 * (PGE mappings get flushed as well)
150 */
151 __flush_tlb_one(vaddr);
152}
153
154/* NOTE: this is meant to be run only at boot */
155void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
156{
157 unsigned long address = __fix_to_virt(idx);
158
159 if (idx >= __end_of_fixed_addresses) {
160 printk("Invalid __set_fixmap\n");
161 return;
162 }
163 set_pte_phys(address, phys, prot);
164}
165
166unsigned long __initdata table_start, table_end;
167
168extern pmd_t temp_boot_pmds[];
169
170static struct temp_map {
171 pmd_t *pmd;
172 void *address;
173 int allocated;
174} temp_mappings[] __initdata = {
175 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
176 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
177 {}
178};
179
180static __init void *alloc_low_page(int *index, unsigned long *phys)
181{
182 struct temp_map *ti;
183 int i;
184 unsigned long pfn = table_end++, paddr;
185 void *adr;
186
187 if (pfn >= end_pfn)
188 panic("alloc_low_page: ran out of memory");
189 for (i = 0; temp_mappings[i].allocated; i++) {
190 if (!temp_mappings[i].pmd)
191 panic("alloc_low_page: ran out of temp mappings");
192 }
193 ti = &temp_mappings[i];
194 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
195 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
196 ti->allocated = 1;
197 __flush_tlb();
198 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
199 *index = i;
200 *phys = pfn * PAGE_SIZE;
201 return adr;
202}
203
204static __init void unmap_low_page(int i)
205{
206 struct temp_map *ti = &temp_mappings[i];
207 set_pmd(ti->pmd, __pmd(0));
208 ti->allocated = 0;
209}
210
211static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
212{
213 long i, j;
214
215 i = pud_index(address);
216 pud = pud + i;
217 for (; i < PTRS_PER_PUD; pud++, i++) {
218 int map;
219 unsigned long paddr, pmd_phys;
220 pmd_t *pmd;
221
222 paddr = address + i*PUD_SIZE;
223 if (paddr >= end) {
224 for (; i < PTRS_PER_PUD; i++, pud++)
225 set_pud(pud, __pud(0));
226 break;
227 }
228
229 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
230 set_pud(pud, __pud(0));
231 continue;
232 }
233
234 pmd = alloc_low_page(&map, &pmd_phys);
235 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
236 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
237 unsigned long pe;
238
239 if (paddr >= end) {
240 for (; j < PTRS_PER_PMD; j++, pmd++)
241 set_pmd(pmd, __pmd(0));
242 break;
243 }
244 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
245 pe &= __supported_pte_mask;
246 set_pmd(pmd, __pmd(pe));
247 }
248 unmap_low_page(map);
249 }
250 __flush_tlb();
251}
252
253static void __init find_early_table_space(unsigned long end)
254{
255 unsigned long puds, pmds, tables;
256
257 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
258 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
259 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
260 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
261
262 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
263 if (table_start == -1UL)
264 panic("Cannot find space for the kernel page tables");
265
266 table_start >>= PAGE_SHIFT;
267 table_end = table_start;
268}
269
270/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
271 This runs before bootmem is initialized and gets pages directly from the
272 physical memory. To access them they are temporarily mapped. */
273void __init init_memory_mapping(unsigned long start, unsigned long end)
274{
275 unsigned long next;
276
277 Dprintk("init_memory_mapping\n");
278
279 /*
280 * Find space for the kernel direct mapping tables.
281 * Later we should allocate these tables in the local node of the memory
282 * mapped. Unfortunately this is done currently before the nodes are
283 * discovered.
284 */
285 find_early_table_space(end);
286
287 start = (unsigned long)__va(start);
288 end = (unsigned long)__va(end);
289
290 for (; start < end; start = next) {
291 int map;
292 unsigned long pud_phys;
293 pud_t *pud = alloc_low_page(&map, &pud_phys);
294 next = start + PGDIR_SIZE;
295 if (next > end)
296 next = end;
297 phys_pud_init(pud, __pa(start), __pa(next));
298 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
299 unmap_low_page(map);
300 }
301
302 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
303 __flush_tlb_all();
304 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
305 table_start<<PAGE_SHIFT,
306 table_end<<PAGE_SHIFT);
307}
308
309extern struct x8664_pda cpu_pda[NR_CPUS];
310
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100311void __cpuinit zap_low_mappings(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100313 if (cpu == 0) {
314 pgd_t *pgd = pgd_offset_k(0UL);
315 pgd_clear(pgd);
316 } else {
317 /*
318 * For AP's, zap the low identity mappings by changing the cr3
319 * to init_level4_pgt and doing local flush tlb all
320 */
321 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
322 }
323 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
Andi Kleena2f1b422005-11-05 17:25:53 +0100326/* Compute zone sizes for the DMA and DMA32 zones in a node. */
327__init void
328size_zones(unsigned long *z, unsigned long *h,
329 unsigned long start_pfn, unsigned long end_pfn)
330{
331 int i;
332 unsigned long w;
333
334 for (i = 0; i < MAX_NR_ZONES; i++)
335 z[i] = 0;
336
337 if (start_pfn < MAX_DMA_PFN)
338 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
339 if (start_pfn < MAX_DMA32_PFN) {
340 unsigned long dma32_pfn = MAX_DMA32_PFN;
341 if (dma32_pfn > end_pfn)
342 dma32_pfn = end_pfn;
343 z[ZONE_DMA32] = dma32_pfn - start_pfn;
344 }
345 z[ZONE_NORMAL] = end_pfn - start_pfn;
346
347 /* Remove lower zones from higher ones. */
348 w = 0;
349 for (i = 0; i < MAX_NR_ZONES; i++) {
350 if (z[i])
351 z[i] -= w;
352 w += z[i];
353 }
354
355 /* Compute holes */
356 w = 0;
357 for (i = 0; i < MAX_NR_ZONES; i++) {
358 unsigned long s = w;
359 w += z[i];
360 h[i] = e820_hole_size(s, w);
361 }
Andi Kleene18c6872005-11-05 17:25:53 +0100362
363 /* Add the space pace needed for mem_map to the holes too. */
364 for (i = 0; i < MAX_NR_ZONES; i++)
365 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
366
367 /* The 16MB DMA zone has the kernel and other misc mappings.
368 Account them too */
369 if (h[ZONE_DMA]) {
370 h[ZONE_DMA] += dma_reserve;
371 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
372 printk(KERN_WARNING
373 "Kernel too large and filling up ZONE_DMA?\n");
374 h[ZONE_DMA] = z[ZONE_DMA];
375 }
376 }
Andi Kleena2f1b422005-11-05 17:25:53 +0100377}
378
Matt Tolentino2b976902005-06-23 00:08:06 -0700379#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380void __init paging_init(void)
381{
Andi Kleena2f1b422005-11-05 17:25:53 +0100382 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
383 size_zones(zones, holes, 0, end_pfn);
384 free_area_init_node(0, NODE_DATA(0), zones,
385 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387#endif
388
389/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
390 from the CPU leading to inconsistent cache lines. address and size
391 must be aligned to 2MB boundaries.
392 Does nothing when the mapping doesn't exist. */
393void __init clear_kernel_mapping(unsigned long address, unsigned long size)
394{
395 unsigned long end = address + size;
396
397 BUG_ON(address & ~LARGE_PAGE_MASK);
398 BUG_ON(size & ~LARGE_PAGE_MASK);
399
400 for (; address < end; address += LARGE_PAGE_SIZE) {
401 pgd_t *pgd = pgd_offset_k(address);
402 pud_t *pud;
403 pmd_t *pmd;
404 if (pgd_none(*pgd))
405 continue;
406 pud = pud_offset(pgd, address);
407 if (pud_none(*pud))
408 continue;
409 pmd = pmd_offset(pud, address);
410 if (!pmd || pmd_none(*pmd))
411 continue;
412 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
413 /* Could handle this, but it should not happen currently. */
414 printk(KERN_ERR
415 "clear_kernel_mapping: mapping has been split. will leak memory\n");
416 pmd_ERROR(*pmd);
417 }
418 set_pmd(pmd, __pmd(0));
419 }
420 __flush_tlb_all();
421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
424 kcore_vsyscall;
425
426void __init mem_init(void)
427{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200428 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430#ifdef CONFIG_SWIOTLB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 if (!iommu_aperture &&
432 (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
433 swiotlb = 1;
434 if (swiotlb)
435 swiotlb_init();
436#endif
437
438 /* How many end-of-memory variables you have, grandma! */
439 max_low_pfn = end_pfn;
440 max_pfn = end_pfn;
441 num_physpages = end_pfn;
442 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
443
444 /* clear the zero-page */
445 memset(empty_zero_page, 0, PAGE_SIZE);
446
447 reservedpages = 0;
448
449 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700450#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200451 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200453 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454#endif
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200455 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457 after_bootmem = 1;
458
459 codesize = (unsigned long) &_etext - (unsigned long) &_text;
460 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
461 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
462
463 /* Register memory areas for /proc/kcore */
464 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
465 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
466 VMALLOC_END-VMALLOC_START);
467 kclist_add(&kcore_kernel, &_stext, _end - _stext);
468 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
469 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
470 VSYSCALL_END - VSYSCALL_START);
471
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200472 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
474 end_pfn << (PAGE_SHIFT-10),
475 codesize >> 10,
476 reservedpages << (PAGE_SHIFT-10),
477 datasize >> 10,
478 initsize >> 10);
479
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100480#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 /*
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100482 * Sync boot_level4_pgt mappings with the init_level4_pgt
483 * except for the low identity mappings which are already zapped
484 * in init_level4_pgt. This sync-up is essential for AP's bringup
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 */
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100486 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487#endif
488}
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490void free_initmem(void)
491{
492 unsigned long addr;
493
494 addr = (unsigned long)(&__init_begin);
495 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
496 ClearPageReserved(virt_to_page(addr));
497 set_page_count(virt_to_page(addr), 1);
498 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
499 free_page(addr);
500 totalram_pages++;
501 }
502 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
Andi Kleen2bc04142005-11-05 17:25:53 +0100503 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504}
505
506#ifdef CONFIG_BLK_DEV_INITRD
507void free_initrd_mem(unsigned long start, unsigned long end)
508{
509 if (start < (unsigned long)&_end)
510 return;
511 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
512 for (; start < end; start += PAGE_SIZE) {
513 ClearPageReserved(virt_to_page(start));
514 set_page_count(virt_to_page(start), 1);
515 free_page(start);
516 totalram_pages++;
517 }
518}
519#endif
520
521void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
522{
523 /* Should check here against the e820 map to avoid double free */
Matt Tolentino2b976902005-06-23 00:08:06 -0700524#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 int nid = phys_to_nid(phys);
526 reserve_bootmem_node(NODE_DATA(nid), phys, len);
527#else
528 reserve_bootmem(phys, len);
529#endif
Andi Kleene18c6872005-11-05 17:25:53 +0100530 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
531 dma_reserve += len / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532}
533
534int kern_addr_valid(unsigned long addr)
535{
536 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
537 pgd_t *pgd;
538 pud_t *pud;
539 pmd_t *pmd;
540 pte_t *pte;
541
542 if (above != 0 && above != -1UL)
543 return 0;
544
545 pgd = pgd_offset_k(addr);
546 if (pgd_none(*pgd))
547 return 0;
548
549 pud = pud_offset(pgd, addr);
550 if (pud_none(*pud))
551 return 0;
552
553 pmd = pmd_offset(pud, addr);
554 if (pmd_none(*pmd))
555 return 0;
556 if (pmd_large(*pmd))
557 return pfn_valid(pmd_pfn(*pmd));
558
559 pte = pte_offset_kernel(pmd, addr);
560 if (pte_none(*pte))
561 return 0;
562 return pfn_valid(pte_pfn(*pte));
563}
564
565#ifdef CONFIG_SYSCTL
566#include <linux/sysctl.h>
567
568extern int exception_trace, page_fault_trace;
569
570static ctl_table debug_table2[] = {
571 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
572 proc_dointvec },
573#ifdef CONFIG_CHECKING
574 { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
575 proc_dointvec },
576#endif
577 { 0, }
578};
579
580static ctl_table debug_root_table2[] = {
581 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
582 .child = debug_table2 },
583 { 0 },
584};
585
586static __init int x8664_sysctl_init(void)
587{
588 register_sysctl_table(debug_root_table2, 1);
589 return 0;
590}
591__initcall(x8664_sysctl_init);
592#endif
593
Andi Kleen1e014412005-04-16 15:24:55 -0700594/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
595 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
596 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598static struct vm_area_struct gate_vma = {
599 .vm_start = VSYSCALL_START,
600 .vm_end = VSYSCALL_END,
601 .vm_page_prot = PAGE_READONLY
602};
603
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
605{
606#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700607 if (test_tsk_thread_flag(tsk, TIF_IA32))
608 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609#endif
610 return &gate_vma;
611}
612
613int in_gate_area(struct task_struct *task, unsigned long addr)
614{
615 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700616 if (!vma)
617 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return (addr >= vma->vm_start) && (addr < vma->vm_end);
619}
620
621/* Use this when you have no reliable task/vma, typically from interrupt
622 * context. It is less reliable than using the task's vma and may give
623 * false positives.
624 */
625int in_gate_area_no_task(unsigned long addr)
626{
Andi Kleen1e014412005-04-16 15:24:55 -0700627 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}