blob: 1bbcc6205ace7054f144fb84ca6c696de4b73067 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010018
OGAWA Hirofumie6b0ede2008-05-12 15:43:38 +020019unsigned int __read_mostly vdso_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020020
Roland McGrath7f3646a2008-01-30 13:30:41 +010021extern char vdso_start[], vdso_end[];
Andi Kleen2aae9502007-07-21 17:10:01 +020022extern unsigned short vdso_sync_cpuid;
23
Andy Lutomirskiaafade22011-07-21 15:47:10 -040024extern struct page *vdso_pages[];
Jan Beulich369c9922008-07-18 13:37:53 +010025static unsigned vdso_size;
Andi Kleen2aae9502007-07-21 17:10:01 +020026
H. J. Lu1a21d4e2012-02-19 11:38:06 -080027#ifdef CONFIG_X86_X32_ABI
28extern char vdsox32_start[], vdsox32_end[];
29extern struct page *vdsox32_pages[];
30static unsigned vdsox32_size;
31
32static void __init patch_vdsox32(void *vdso, size_t len)
33{
34 Elf32_Ehdr *hdr = vdso;
35 Elf32_Shdr *sechdrs, *alt_sec = 0;
36 char *secstrings;
37 void *alt_data;
38 int i;
39
40 BUG_ON(len < sizeof(Elf32_Ehdr));
41 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
42
43 sechdrs = (void *)hdr + hdr->e_shoff;
44 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
45
46 for (i = 1; i < hdr->e_shnum; i++) {
47 Elf32_Shdr *shdr = &sechdrs[i];
48 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
49 alt_sec = shdr;
50 goto found;
51 }
52 }
53
54 /* If we get here, it's probably a bug. */
55 pr_warning("patch_vdsox32: .altinstructions not found\n");
56 return; /* nothing to patch */
57
58found:
59 alt_data = (void *)hdr + alt_sec->sh_offset;
60 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
61}
62#endif
63
64static void __init patch_vdso64(void *vdso, size_t len)
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040065{
66 Elf64_Ehdr *hdr = vdso;
67 Elf64_Shdr *sechdrs, *alt_sec = 0;
68 char *secstrings;
69 void *alt_data;
70 int i;
71
72 BUG_ON(len < sizeof(Elf64_Ehdr));
73 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
74
75 sechdrs = (void *)hdr + hdr->e_shoff;
76 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
77
78 for (i = 1; i < hdr->e_shnum; i++) {
79 Elf64_Shdr *shdr = &sechdrs[i];
80 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
81 alt_sec = shdr;
82 goto found;
83 }
84 }
85
86 /* If we get here, it's probably a bug. */
H. J. Lu1a21d4e2012-02-19 11:38:06 -080087 pr_warning("patch_vdso64: .altinstructions not found\n");
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040088 return; /* nothing to patch */
89
90found:
91 alt_data = (void *)hdr + alt_sec->sh_offset;
92 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
93}
94
Andy Lutomirskiaafade22011-07-21 15:47:10 -040095static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020096{
97 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
98 int i;
Andi Kleen2aae9502007-07-21 17:10:01 +020099
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800100 patch_vdso64(vdso_start, vdso_end - vdso_start);
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -0400101
Jan Beulich369c9922008-07-18 13:37:53 +0100102 vdso_size = npages << PAGE_SHIFT;
Andy Lutomirskiaafade22011-07-21 15:47:10 -0400103 for (i = 0; i < npages; i++)
104 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
Andi Kleen2aae9502007-07-21 17:10:01 +0200105
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800106#ifdef CONFIG_X86_X32_ABI
107 patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
108 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
109 vdsox32_size = npages << PAGE_SHIFT;
110 for (i = 0; i < npages; i++)
111 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
112#endif
113
Andi Kleen2aae9502007-07-21 17:10:01 +0200114 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200115}
Andy Lutomirskiaafade22011-07-21 15:47:10 -0400116subsys_initcall(init_vdso);
Andi Kleen2aae9502007-07-21 17:10:01 +0200117
118struct linux_binprm;
119
120/* Put the vdso above the (randomized) stack with another randomized offset.
121 This way there is no hole in the middle of address space.
122 To save memory make sure it is still in the same PTE as the stack top.
123 This doesn't give that many random bits */
124static unsigned long vdso_addr(unsigned long start, unsigned len)
125{
126 unsigned long addr, end;
127 unsigned offset;
128 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +0100129 if (end >= TASK_SIZE_MAX)
130 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +0200131 end -= len;
132 /* This loses some more bits than a modulo, but is cheaper */
133 offset = get_random_int() & (PTRS_PER_PTE - 1);
134 addr = start + (offset << PAGE_SHIFT);
135 if (addr >= end)
136 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +0200137
138 /*
139 * page-align it here so that get_unmapped_area doesn't
140 * align it wrongfully again to the next page. addr can come in 4K
141 * unaligned here as a result of stack start randomization.
142 */
143 addr = PAGE_ALIGN(addr);
144 addr = align_addr(addr, NULL, ALIGN_VDSO);
145
Andi Kleen2aae9502007-07-21 17:10:01 +0200146 return addr;
147}
148
149/* Setup a VMA at program startup for the vsyscall page.
150 Not called for compat tasks */
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800151static int setup_additional_pages(struct linux_binprm *bprm,
152 int uses_interp,
153 struct page **pages,
154 unsigned size)
Andi Kleen2aae9502007-07-21 17:10:01 +0200155{
156 struct mm_struct *mm = current->mm;
157 unsigned long addr;
158 int ret;
Andi Kleen2aae9502007-07-21 17:10:01 +0200159
160 if (!vdso_enabled)
161 return 0;
162
163 down_write(&mm->mmap_sem);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800164 addr = vdso_addr(mm->start_stack, size);
165 addr = get_unmapped_area(NULL, addr, size, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200166 if (IS_ERR_VALUE(addr)) {
167 ret = addr;
168 goto up_fail;
169 }
170
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200171 current->mm->context.vdso = (void *)addr;
172
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800173 ret = install_special_mapping(mm, addr, size,
Andi Kleen2aae9502007-07-21 17:10:01 +0200174 VM_READ|VM_EXEC|
175 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
176 VM_ALWAYSDUMP,
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800177 pages);
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200178 if (ret) {
179 current->mm->context.vdso = NULL;
Andi Kleen2aae9502007-07-21 17:10:01 +0200180 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200181 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200182
Andi Kleen2aae9502007-07-21 17:10:01 +0200183up_fail:
184 up_write(&mm->mmap_sem);
185 return ret;
186}
187
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800188int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
189{
190 return setup_additional_pages (bprm, uses_interp, vdso_pages,
191 vdso_size);
192}
193
194#ifdef CONFIG_X86_X32_ABI
195int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
196{
197 return setup_additional_pages (bprm, uses_interp, vdsox32_pages,
198 vdsox32_size);
199}
200#endif
201
Andi Kleen2aae9502007-07-21 17:10:01 +0200202static __init int vdso_setup(char *s)
203{
204 vdso_enabled = simple_strtoul(s, NULL, 0);
205 return 0;
206}
207__setup("vdso=", vdso_setup);