blob: 7345bc9a1af69a5e04b90ab8a971a9061e9940b4 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010018
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010019#if defined(CONFIG_X86_64)
OGAWA Hirofumie6b0ede2008-05-12 15:43:38 +020020unsigned int __read_mostly vdso_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020021
Roland McGrath7f3646a2008-01-30 13:30:41 +010022extern char vdso_start[], vdso_end[];
Andi Kleen2aae9502007-07-21 17:10:01 +020023extern unsigned short vdso_sync_cpuid;
24
Andy Lutomirskiaafade22011-07-21 15:47:10 -040025extern struct page *vdso_pages[];
Jan Beulich369c9922008-07-18 13:37:53 +010026static unsigned vdso_size;
Andi Kleen2aae9502007-07-21 17:10:01 +020027
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028#ifdef CONFIG_X86_X32_ABI
29extern char vdsox32_start[], vdsox32_end[];
30extern struct page *vdsox32_pages[];
31static unsigned vdsox32_size;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010032#endif
33#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080034
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010035#if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
36 defined(CONFIG_COMPAT)
37void __init patch_vdso32(void *vdso, size_t len)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080038{
39 Elf32_Ehdr *hdr = vdso;
40 Elf32_Shdr *sechdrs, *alt_sec = 0;
41 char *secstrings;
42 void *alt_data;
43 int i;
44
45 BUG_ON(len < sizeof(Elf32_Ehdr));
46 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
47
48 sechdrs = (void *)hdr + hdr->e_shoff;
49 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
50
51 for (i = 1; i < hdr->e_shnum; i++) {
52 Elf32_Shdr *shdr = &sechdrs[i];
53 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
54 alt_sec = shdr;
55 goto found;
56 }
57 }
58
59 /* If we get here, it's probably a bug. */
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010060 pr_warning("patch_vdso32: .altinstructions not found\n");
H. J. Lu1a21d4e2012-02-19 11:38:06 -080061 return; /* nothing to patch */
62
63found:
64 alt_data = (void *)hdr + alt_sec->sh_offset;
65 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
66}
67#endif
68
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010069#if defined(CONFIG_X86_64)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080070static void __init patch_vdso64(void *vdso, size_t len)
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040071{
72 Elf64_Ehdr *hdr = vdso;
73 Elf64_Shdr *sechdrs, *alt_sec = 0;
74 char *secstrings;
75 void *alt_data;
76 int i;
77
78 BUG_ON(len < sizeof(Elf64_Ehdr));
79 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
80
81 sechdrs = (void *)hdr + hdr->e_shoff;
82 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
83
84 for (i = 1; i < hdr->e_shnum; i++) {
85 Elf64_Shdr *shdr = &sechdrs[i];
86 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
87 alt_sec = shdr;
88 goto found;
89 }
90 }
91
92 /* If we get here, it's probably a bug. */
H. J. Lu1a21d4e2012-02-19 11:38:06 -080093 pr_warning("patch_vdso64: .altinstructions not found\n");
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040094 return; /* nothing to patch */
95
96found:
97 alt_data = (void *)hdr + alt_sec->sh_offset;
98 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
99}
100
Andy Lutomirskiaafade22011-07-21 15:47:10 -0400101static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +0200102{
103 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
104 int i;
Andi Kleen2aae9502007-07-21 17:10:01 +0200105
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800106 patch_vdso64(vdso_start, vdso_end - vdso_start);
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -0400107
Jan Beulich369c9922008-07-18 13:37:53 +0100108 vdso_size = npages << PAGE_SHIFT;
Andy Lutomirskiaafade22011-07-21 15:47:10 -0400109 for (i = 0; i < npages; i++)
110 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
Andi Kleen2aae9502007-07-21 17:10:01 +0200111
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800112#ifdef CONFIG_X86_X32_ABI
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100113 patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800114 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
115 vdsox32_size = npages << PAGE_SHIFT;
116 for (i = 0; i < npages; i++)
117 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
118#endif
119
Andi Kleen2aae9502007-07-21 17:10:01 +0200120 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200121}
Andy Lutomirskiaafade22011-07-21 15:47:10 -0400122subsys_initcall(init_vdso);
Andi Kleen2aae9502007-07-21 17:10:01 +0200123
124struct linux_binprm;
125
126/* Put the vdso above the (randomized) stack with another randomized offset.
127 This way there is no hole in the middle of address space.
128 To save memory make sure it is still in the same PTE as the stack top.
129 This doesn't give that many random bits */
130static unsigned long vdso_addr(unsigned long start, unsigned len)
131{
132 unsigned long addr, end;
133 unsigned offset;
134 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +0100135 if (end >= TASK_SIZE_MAX)
136 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +0200137 end -= len;
138 /* This loses some more bits than a modulo, but is cheaper */
139 offset = get_random_int() & (PTRS_PER_PTE - 1);
140 addr = start + (offset << PAGE_SHIFT);
141 if (addr >= end)
142 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +0200143
144 /*
145 * page-align it here so that get_unmapped_area doesn't
146 * align it wrongfully again to the next page. addr can come in 4K
147 * unaligned here as a result of stack start randomization.
148 */
149 addr = PAGE_ALIGN(addr);
Michel Lespinassef99024722012-12-11 16:01:52 -0800150 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +0200151
Andi Kleen2aae9502007-07-21 17:10:01 +0200152 return addr;
153}
154
155/* Setup a VMA at program startup for the vsyscall page.
156 Not called for compat tasks */
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800157static int setup_additional_pages(struct linux_binprm *bprm,
158 int uses_interp,
159 struct page **pages,
160 unsigned size)
Andi Kleen2aae9502007-07-21 17:10:01 +0200161{
162 struct mm_struct *mm = current->mm;
163 unsigned long addr;
164 int ret;
Andi Kleen2aae9502007-07-21 17:10:01 +0200165
166 if (!vdso_enabled)
167 return 0;
168
169 down_write(&mm->mmap_sem);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800170 addr = vdso_addr(mm->start_stack, size);
171 addr = get_unmapped_area(NULL, addr, size, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200172 if (IS_ERR_VALUE(addr)) {
173 ret = addr;
174 goto up_fail;
175 }
176
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200177 current->mm->context.vdso = (void *)addr;
178
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800179 ret = install_special_mapping(mm, addr, size,
Andi Kleen2aae9502007-07-21 17:10:01 +0200180 VM_READ|VM_EXEC|
Jason Baron909af762012-03-23 15:02:51 -0700181 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800182 pages);
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200183 if (ret) {
184 current->mm->context.vdso = NULL;
Andi Kleen2aae9502007-07-21 17:10:01 +0200185 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200186 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200187
Andi Kleen2aae9502007-07-21 17:10:01 +0200188up_fail:
189 up_write(&mm->mmap_sem);
190 return ret;
191}
192
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800193int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
194{
H. Peter Anvin22e842d2012-02-21 14:32:19 -0800195 return setup_additional_pages(bprm, uses_interp, vdso_pages,
196 vdso_size);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800197}
198
199#ifdef CONFIG_X86_X32_ABI
200int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
201{
H. Peter Anvin22e842d2012-02-21 14:32:19 -0800202 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
203 vdsox32_size);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800204}
205#endif
206
Andi Kleen2aae9502007-07-21 17:10:01 +0200207static __init int vdso_setup(char *s)
208{
209 vdso_enabled = simple_strtoul(s, NULL, 0);
210 return 0;
211}
212__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100213#endif