blob: 3fdd51497a838b801cc69dbd8408ea849da75546 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
9#include <linux/init.h>
10#include <linux/random.h>
11#include <asm/vsyscall.h>
12#include <asm/vgtod.h>
13#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010014#include <asm/vdso.h>
15
16#include "vextern.h" /* Just for VMAGIC. */
17#undef VEXTERN
Andi Kleen2aae9502007-07-21 17:10:01 +020018
19int vdso_enabled = 1;
20
Roland McGrath7f3646a2008-01-30 13:30:41 +010021extern char vdso_start[], vdso_end[];
Andi Kleen2aae9502007-07-21 17:10:01 +020022extern unsigned short vdso_sync_cpuid;
23
24struct page **vdso_pages;
25
Roland McGrath7f3646a2008-01-30 13:30:41 +010026static inline void *var_ref(void *p, char *name)
Andi Kleen2aae9502007-07-21 17:10:01 +020027{
Andi Kleen2aae9502007-07-21 17:10:01 +020028 if (*(void **)p != (void *)VMAGIC) {
29 printk("VDSO: variable %s broken\n", name);
30 vdso_enabled = 0;
31 }
32 return p;
33}
34
35static int __init init_vdso_vars(void)
36{
37 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
38 int i;
39 char *vbase;
40
41 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
42 if (!vdso_pages)
43 goto oom;
44 for (i = 0; i < npages; i++) {
45 struct page *p;
46 p = alloc_page(GFP_KERNEL);
47 if (!p)
48 goto oom;
49 vdso_pages[i] = p;
50 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
51 }
52
53 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
54 if (!vbase)
55 goto oom;
56
57 if (memcmp(vbase, "\177ELF", 4)) {
58 printk("VDSO: I'm broken; not ELF\n");
59 vdso_enabled = 0;
60 }
61
Andi Kleen2aae9502007-07-21 17:10:01 +020062#define VEXTERN(x) \
Roland McGrath7f3646a2008-01-30 13:30:41 +010063 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
Andi Kleen2aae9502007-07-21 17:10:01 +020064#include "vextern.h"
65#undef VEXTERN
66 return 0;
67
68 oom:
69 printk("Cannot allocate vdso\n");
70 vdso_enabled = 0;
71 return -ENOMEM;
72}
73__initcall(init_vdso_vars);
74
75struct linux_binprm;
76
77/* Put the vdso above the (randomized) stack with another randomized offset.
78 This way there is no hole in the middle of address space.
79 To save memory make sure it is still in the same PTE as the stack top.
80 This doesn't give that many random bits */
81static unsigned long vdso_addr(unsigned long start, unsigned len)
82{
83 unsigned long addr, end;
84 unsigned offset;
85 end = (start + PMD_SIZE - 1) & PMD_MASK;
86 if (end >= TASK_SIZE64)
87 end = TASK_SIZE64;
88 end -= len;
89 /* This loses some more bits than a modulo, but is cheaper */
90 offset = get_random_int() & (PTRS_PER_PTE - 1);
91 addr = start + (offset << PAGE_SHIFT);
92 if (addr >= end)
93 addr = end;
94 return addr;
95}
96
97/* Setup a VMA at program startup for the vsyscall page.
98 Not called for compat tasks */
99int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
100{
101 struct mm_struct *mm = current->mm;
102 unsigned long addr;
103 int ret;
104 unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE);
105
106 if (!vdso_enabled)
107 return 0;
108
109 down_write(&mm->mmap_sem);
110 addr = vdso_addr(mm->start_stack, len);
111 addr = get_unmapped_area(NULL, addr, len, 0, 0);
112 if (IS_ERR_VALUE(addr)) {
113 ret = addr;
114 goto up_fail;
115 }
116
117 ret = install_special_mapping(mm, addr, len,
118 VM_READ|VM_EXEC|
119 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
120 VM_ALWAYSDUMP,
121 vdso_pages);
122 if (ret)
123 goto up_fail;
124
125 current->mm->context.vdso = (void *)addr;
126up_fail:
127 up_write(&mm->mmap_sem);
128 return ret;
129}
130
131static __init int vdso_setup(char *s)
132{
133 vdso_enabled = simple_strtoul(s, NULL, 0);
134 return 0;
135}
136__setup("vdso=", vdso_setup);