blob: 8b790398ed1dbec8e503c7eb2d21c575262503cb [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010018
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010019#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070020unsigned int __read_mostly vdso64_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020021
Andy Lutomirskib67e6122014-03-20 15:01:21 -070022DECLARE_VDSO_IMAGE(vdso);
Andi Kleen2aae9502007-07-21 17:10:01 +020023extern unsigned short vdso_sync_cpuid;
Jan Beulich369c9922008-07-18 13:37:53 +010024static unsigned vdso_size;
Andi Kleen2aae9502007-07-21 17:10:01 +020025
H. J. Lu1a21d4e2012-02-19 11:38:06 -080026#ifdef CONFIG_X86_X32_ABI
Andy Lutomirskib67e6122014-03-20 15:01:21 -070027DECLARE_VDSO_IMAGE(vdsox32);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028static unsigned vdsox32_size;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010029#endif
30#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080031
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010032#if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
33 defined(CONFIG_COMPAT)
34void __init patch_vdso32(void *vdso, size_t len)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080035{
36 Elf32_Ehdr *hdr = vdso;
37 Elf32_Shdr *sechdrs, *alt_sec = 0;
38 char *secstrings;
39 void *alt_data;
40 int i;
41
42 BUG_ON(len < sizeof(Elf32_Ehdr));
43 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
44
45 sechdrs = (void *)hdr + hdr->e_shoff;
46 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
47
48 for (i = 1; i < hdr->e_shnum; i++) {
49 Elf32_Shdr *shdr = &sechdrs[i];
50 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
51 alt_sec = shdr;
52 goto found;
53 }
54 }
55
56 /* If we get here, it's probably a bug. */
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010057 pr_warning("patch_vdso32: .altinstructions not found\n");
H. J. Lu1a21d4e2012-02-19 11:38:06 -080058 return; /* nothing to patch */
59
60found:
61 alt_data = (void *)hdr + alt_sec->sh_offset;
62 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
63}
64#endif
65
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010066#if defined(CONFIG_X86_64)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080067static void __init patch_vdso64(void *vdso, size_t len)
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040068{
69 Elf64_Ehdr *hdr = vdso;
70 Elf64_Shdr *sechdrs, *alt_sec = 0;
71 char *secstrings;
72 void *alt_data;
73 int i;
74
75 BUG_ON(len < sizeof(Elf64_Ehdr));
76 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
77
78 sechdrs = (void *)hdr + hdr->e_shoff;
79 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
80
81 for (i = 1; i < hdr->e_shnum; i++) {
82 Elf64_Shdr *shdr = &sechdrs[i];
83 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
84 alt_sec = shdr;
85 goto found;
86 }
87 }
88
89 /* If we get here, it's probably a bug. */
H. J. Lu1a21d4e2012-02-19 11:38:06 -080090 pr_warning("patch_vdso64: .altinstructions not found\n");
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040091 return; /* nothing to patch */
92
93found:
94 alt_data = (void *)hdr + alt_sec->sh_offset;
95 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
96}
97
Andy Lutomirskiaafade22011-07-21 15:47:10 -040098static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020099{
100 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
101 int i;
Andi Kleen2aae9502007-07-21 17:10:01 +0200102
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800103 patch_vdso64(vdso_start, vdso_end - vdso_start);
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -0400104
Jan Beulich369c9922008-07-18 13:37:53 +0100105 vdso_size = npages << PAGE_SHIFT;
Andy Lutomirskiaafade22011-07-21 15:47:10 -0400106 for (i = 0; i < npages; i++)
107 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
Andi Kleen2aae9502007-07-21 17:10:01 +0200108
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800109#ifdef CONFIG_X86_X32_ABI
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100110 patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800111 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
112 vdsox32_size = npages << PAGE_SHIFT;
113 for (i = 0; i < npages; i++)
114 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
115#endif
116
Andi Kleen2aae9502007-07-21 17:10:01 +0200117 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200118}
Andy Lutomirskiaafade22011-07-21 15:47:10 -0400119subsys_initcall(init_vdso);
Andi Kleen2aae9502007-07-21 17:10:01 +0200120
121struct linux_binprm;
122
123/* Put the vdso above the (randomized) stack with another randomized offset.
124 This way there is no hole in the middle of address space.
125 To save memory make sure it is still in the same PTE as the stack top.
126 This doesn't give that many random bits */
127static unsigned long vdso_addr(unsigned long start, unsigned len)
128{
129 unsigned long addr, end;
130 unsigned offset;
131 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +0100132 if (end >= TASK_SIZE_MAX)
133 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +0200134 end -= len;
135 /* This loses some more bits than a modulo, but is cheaper */
136 offset = get_random_int() & (PTRS_PER_PTE - 1);
137 addr = start + (offset << PAGE_SHIFT);
138 if (addr >= end)
139 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +0200140
141 /*
142 * page-align it here so that get_unmapped_area doesn't
143 * align it wrongfully again to the next page. addr can come in 4K
144 * unaligned here as a result of stack start randomization.
145 */
146 addr = PAGE_ALIGN(addr);
Michel Lespinassef99024722012-12-11 16:01:52 -0800147 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +0200148
Andi Kleen2aae9502007-07-21 17:10:01 +0200149 return addr;
150}
151
152/* Setup a VMA at program startup for the vsyscall page.
153 Not called for compat tasks */
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800154static int setup_additional_pages(struct linux_binprm *bprm,
155 int uses_interp,
156 struct page **pages,
157 unsigned size)
Andi Kleen2aae9502007-07-21 17:10:01 +0200158{
159 struct mm_struct *mm = current->mm;
160 unsigned long addr;
161 int ret;
Andi Kleen2aae9502007-07-21 17:10:01 +0200162
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700163 if (!vdso64_enabled)
Andi Kleen2aae9502007-07-21 17:10:01 +0200164 return 0;
165
166 down_write(&mm->mmap_sem);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800167 addr = vdso_addr(mm->start_stack, size);
168 addr = get_unmapped_area(NULL, addr, size, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200169 if (IS_ERR_VALUE(addr)) {
170 ret = addr;
171 goto up_fail;
172 }
173
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200174 current->mm->context.vdso = (void *)addr;
175
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800176 ret = install_special_mapping(mm, addr, size,
Andi Kleen2aae9502007-07-21 17:10:01 +0200177 VM_READ|VM_EXEC|
Jason Baron909af762012-03-23 15:02:51 -0700178 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800179 pages);
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200180 if (ret) {
181 current->mm->context.vdso = NULL;
Andi Kleen2aae9502007-07-21 17:10:01 +0200182 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200183 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200184
Andi Kleen2aae9502007-07-21 17:10:01 +0200185up_fail:
186 up_write(&mm->mmap_sem);
187 return ret;
188}
189
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800190int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
191{
H. Peter Anvin22e842d2012-02-21 14:32:19 -0800192 return setup_additional_pages(bprm, uses_interp, vdso_pages,
193 vdso_size);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800194}
195
196#ifdef CONFIG_X86_X32_ABI
197int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
198{
H. Peter Anvin22e842d2012-02-21 14:32:19 -0800199 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
200 vdsox32_size);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800201}
202#endif
203
Andi Kleen2aae9502007-07-21 17:10:01 +0200204static __init int vdso_setup(char *s)
205{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700206 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200207 return 0;
208}
209__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100210#endif