blob: dbef622bb5afbc86657fce84b926dbe2d18028bb [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070018#include <asm/hpet.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010019
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010020#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070021unsigned int __read_mostly vdso64_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020022
Andi Kleen2aae9502007-07-21 17:10:01 +020023extern unsigned short vdso_sync_cpuid;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010024#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080025
Andy Lutomirski6f121e52014-05-05 12:19:34 -070026void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080027{
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028 int i;
Andy Lutomirski6f121e52014-05-05 12:19:34 -070029 int npages = (image->size) / PAGE_SIZE;
H. J. Lu1a21d4e2012-02-19 11:38:06 -080030
Andy Lutomirski6f121e52014-05-05 12:19:34 -070031 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
Andy Lutomirskia62c34b2014-05-19 15:58:33 -070033 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080035
Andy Lutomirski6f121e52014-05-05 12:19:34 -070036 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
38 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080039}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070040
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010041#if defined(CONFIG_X86_64)
Andy Lutomirskiaafade22011-07-21 15:47:10 -040042static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020043{
Andy Lutomirski6f121e52014-05-05 12:19:34 -070044 init_vdso_image(&vdso_image_64);
Andi Kleen2aae9502007-07-21 17:10:01 +020045
H. J. Lu1a21d4e2012-02-19 11:38:06 -080046#ifdef CONFIG_X86_X32_ABI
Andy Lutomirski6f121e52014-05-05 12:19:34 -070047 init_vdso_image(&vdso_image_x32);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080048#endif
49
Andi Kleen2aae9502007-07-21 17:10:01 +020050 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +020051}
Andy Lutomirskiaafade22011-07-21 15:47:10 -040052subsys_initcall(init_vdso);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070053#endif
Andi Kleen2aae9502007-07-21 17:10:01 +020054
55struct linux_binprm;
56
57/* Put the vdso above the (randomized) stack with another randomized offset.
58 This way there is no hole in the middle of address space.
59 To save memory make sure it is still in the same PTE as the stack top.
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070060 This doesn't give that many random bits.
61
62 Only used for the 64-bit and x32 vdsos. */
Andi Kleen2aae9502007-07-21 17:10:01 +020063static unsigned long vdso_addr(unsigned long start, unsigned len)
64{
Jan Beulichd0936012014-07-03 15:35:07 +010065#ifdef CONFIG_X86_32
66 return 0;
67#else
Andi Kleen2aae9502007-07-21 17:10:01 +020068 unsigned long addr, end;
69 unsigned offset;
70 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010071 if (end >= TASK_SIZE_MAX)
72 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020073 end -= len;
74 /* This loses some more bits than a modulo, but is cheaper */
75 offset = get_random_int() & (PTRS_PER_PTE - 1);
76 addr = start + (offset << PAGE_SHIFT);
77 if (addr >= end)
78 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +020079
80 /*
81 * page-align it here so that get_unmapped_area doesn't
82 * align it wrongfully again to the next page. addr can come in 4K
83 * unaligned here as a result of stack start randomization.
84 */
85 addr = PAGE_ALIGN(addr);
Michel Lespinassef99024722012-12-11 16:01:52 -080086 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +020087
Andi Kleen2aae9502007-07-21 17:10:01 +020088 return addr;
Jan Beulichd0936012014-07-03 15:35:07 +010089#endif
Andi Kleen2aae9502007-07-21 17:10:01 +020090}
91
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070092static int map_vdso(const struct vdso_image *image, bool calculate_addr)
Andi Kleen2aae9502007-07-21 17:10:01 +020093{
94 struct mm_struct *mm = current->mm;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070095 struct vm_area_struct *vma;
Andy Lutomirskie6577a72014-07-10 18:13:15 -070096 unsigned long addr, text_start;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070097 int ret = 0;
Andy Lutomirski1e844fb2014-05-19 15:58:31 -070098 static struct page *no_pages[] = {NULL};
Andy Lutomirskia62c34b2014-05-19 15:58:33 -070099 static struct vm_special_mapping vvar_mapping = {
100 .name = "[vvar]",
101 .pages = no_pages,
102 };
Andi Kleen2aae9502007-07-21 17:10:01 +0200103
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700104 if (calculate_addr) {
105 addr = vdso_addr(current->mm->start_stack,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700106 image->size - image->sym_vvar_start);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700107 } else {
108 addr = 0;
109 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200110
111 down_write(&mm->mmap_sem);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700112
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700113 addr = get_unmapped_area(NULL, addr,
114 image->size - image->sym_vvar_start, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200115 if (IS_ERR_VALUE(addr)) {
116 ret = addr;
117 goto up_fail;
118 }
119
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700120 text_start = addr - image->sym_vvar_start;
121 current->mm->context.vdso = (void __user *)text_start;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200122
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700123 /*
124 * MAYWRITE to allow gdb to COW and set breakpoints
125 */
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700126 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700127 text_start,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700128 image->size,
129 VM_READ|VM_EXEC|
130 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
131 &image->text_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700132
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700133 if (IS_ERR(vma)) {
134 ret = PTR_ERR(vma);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700135 goto up_fail;
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700136 }
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700137
138 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700139 addr,
140 -image->sym_vvar_start,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700141 VM_READ,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700142 &vvar_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700143
144 if (IS_ERR(vma)) {
145 ret = PTR_ERR(vma);
Andi Kleen2aae9502007-07-21 17:10:01 +0200146 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200147 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200148
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700149 if (image->sym_vvar_page)
150 ret = remap_pfn_range(vma,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700151 text_start + image->sym_vvar_page,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700152 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
153 PAGE_SIZE,
154 PAGE_READONLY);
155
156 if (ret)
157 goto up_fail;
158
159#ifdef CONFIG_HPET_TIMER
160 if (hpet_address && image->sym_hpet_page) {
161 ret = io_remap_pfn_range(vma,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700162 text_start + image->sym_hpet_page,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700163 hpet_address >> PAGE_SHIFT,
164 PAGE_SIZE,
165 pgprot_noncached(PAGE_READONLY));
166
167 if (ret)
168 goto up_fail;
169 }
170#endif
171
Andi Kleen2aae9502007-07-21 17:10:01 +0200172up_fail:
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700173 if (ret)
174 current->mm->context.vdso = NULL;
175
Andi Kleen2aae9502007-07-21 17:10:01 +0200176 up_write(&mm->mmap_sem);
177 return ret;
178}
179
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700180#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
181static int load_vdso32(void)
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800182{
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700183 int ret;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800184
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700185 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
186 return 0;
187
188 ret = map_vdso(selected_vdso32, false);
189 if (ret)
190 return ret;
191
192 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
193 current_thread_info()->sysenter_return =
194 current->mm->context.vdso +
195 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
196
197 return 0;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800198}
199#endif
200
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700201#ifdef CONFIG_X86_64
202int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
203{
204 if (!vdso64_enabled)
205 return 0;
206
207 return map_vdso(&vdso_image_64, true);
208}
209
210#ifdef CONFIG_COMPAT
211int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
212 int uses_interp)
213{
214#ifdef CONFIG_X86_X32_ABI
215 if (test_thread_flag(TIF_X32)) {
216 if (!vdso64_enabled)
217 return 0;
218
219 return map_vdso(&vdso_image_x32, true);
220 }
221#endif
222
223 return load_vdso32();
224}
225#endif
226#else
227int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
228{
229 return load_vdso32();
230}
231#endif
232
233#ifdef CONFIG_X86_64
Andi Kleen2aae9502007-07-21 17:10:01 +0200234static __init int vdso_setup(char *s)
235{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700236 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200237 return 0;
238}
239__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100240#endif