blob: 4b5d26f108bbb21f9ff1ac494388b70fa610c247 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
17
18#include "vextern.h" /* Just for VMAGIC. */
19#undef VEXTERN
Andi Kleen2aae9502007-07-21 17:10:01 +020020
OGAWA Hirofumie6b0ede2008-05-12 15:43:38 +020021unsigned int __read_mostly vdso_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020022
Roland McGrath7f3646a2008-01-30 13:30:41 +010023extern char vdso_start[], vdso_end[];
Andi Kleen2aae9502007-07-21 17:10:01 +020024extern unsigned short vdso_sync_cpuid;
25
Jan Beulich369c9922008-07-18 13:37:53 +010026static struct page **vdso_pages;
27static unsigned vdso_size;
Andi Kleen2aae9502007-07-21 17:10:01 +020028
Roland McGrath7f3646a2008-01-30 13:30:41 +010029static inline void *var_ref(void *p, char *name)
Andi Kleen2aae9502007-07-21 17:10:01 +020030{
Andi Kleen2aae9502007-07-21 17:10:01 +020031 if (*(void **)p != (void *)VMAGIC) {
32 printk("VDSO: variable %s broken\n", name);
33 vdso_enabled = 0;
34 }
35 return p;
36}
37
38static int __init init_vdso_vars(void)
39{
40 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
41 int i;
42 char *vbase;
43
Jan Beulich369c9922008-07-18 13:37:53 +010044 vdso_size = npages << PAGE_SHIFT;
Andi Kleen2aae9502007-07-21 17:10:01 +020045 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
46 if (!vdso_pages)
47 goto oom;
48 for (i = 0; i < npages; i++) {
49 struct page *p;
50 p = alloc_page(GFP_KERNEL);
51 if (!p)
52 goto oom;
53 vdso_pages[i] = p;
54 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
55 }
56
57 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
58 if (!vbase)
59 goto oom;
60
61 if (memcmp(vbase, "\177ELF", 4)) {
62 printk("VDSO: I'm broken; not ELF\n");
63 vdso_enabled = 0;
64 }
65
Andi Kleen2aae9502007-07-21 17:10:01 +020066#define VEXTERN(x) \
Roland McGrath7f3646a2008-01-30 13:30:41 +010067 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
Andi Kleen2aae9502007-07-21 17:10:01 +020068#include "vextern.h"
69#undef VEXTERN
Shaohua Libe783a42010-08-02 08:49:34 +080070 vunmap(vbase);
Andi Kleen2aae9502007-07-21 17:10:01 +020071 return 0;
72
73 oom:
74 printk("Cannot allocate vdso\n");
75 vdso_enabled = 0;
76 return -ENOMEM;
77}
Jiri Slabyd7a03802010-06-16 22:30:42 +020078subsys_initcall(init_vdso_vars);
Andi Kleen2aae9502007-07-21 17:10:01 +020079
80struct linux_binprm;
81
82/* Put the vdso above the (randomized) stack with another randomized offset.
83 This way there is no hole in the middle of address space.
84 To save memory make sure it is still in the same PTE as the stack top.
85 This doesn't give that many random bits */
86static unsigned long vdso_addr(unsigned long start, unsigned len)
87{
88 unsigned long addr, end;
89 unsigned offset;
90 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010091 if (end >= TASK_SIZE_MAX)
92 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020093 end -= len;
94 /* This loses some more bits than a modulo, but is cheaper */
95 offset = get_random_int() & (PTRS_PER_PTE - 1);
96 addr = start + (offset << PAGE_SHIFT);
97 if (addr >= end)
98 addr = end;
99 return addr;
100}
101
102/* Setup a VMA at program startup for the vsyscall page.
103 Not called for compat tasks */
Martin Schwidefskyfc5243d2008-12-25 13:38:35 +0100104int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
Andi Kleen2aae9502007-07-21 17:10:01 +0200105{
106 struct mm_struct *mm = current->mm;
107 unsigned long addr;
108 int ret;
Andi Kleen2aae9502007-07-21 17:10:01 +0200109
110 if (!vdso_enabled)
111 return 0;
112
113 down_write(&mm->mmap_sem);
Jan Beulich369c9922008-07-18 13:37:53 +0100114 addr = vdso_addr(mm->start_stack, vdso_size);
115 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200116 if (IS_ERR_VALUE(addr)) {
117 ret = addr;
118 goto up_fail;
119 }
120
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200121 current->mm->context.vdso = (void *)addr;
122
Jan Beulich369c9922008-07-18 13:37:53 +0100123 ret = install_special_mapping(mm, addr, vdso_size,
Andi Kleen2aae9502007-07-21 17:10:01 +0200124 VM_READ|VM_EXEC|
125 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
126 VM_ALWAYSDUMP,
127 vdso_pages);
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200128 if (ret) {
129 current->mm->context.vdso = NULL;
Andi Kleen2aae9502007-07-21 17:10:01 +0200130 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200131 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200132
Andi Kleen2aae9502007-07-21 17:10:01 +0200133up_fail:
134 up_write(&mm->mmap_sem);
135 return ret;
136}
137
138static __init int vdso_setup(char *s)
139{
140 vdso_enabled = simple_strtoul(s, NULL, 0);
141 return 0;
142}
143__setup("vdso=", vdso_setup);