blob: e1513c47872a9a040b9bb3956b54d3f94c5a303d [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070018#include <asm/hpet.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010019
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010020#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070021unsigned int __read_mostly vdso64_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020022
Andi Kleen2aae9502007-07-21 17:10:01 +020023extern unsigned short vdso_sync_cpuid;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010024#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080025
Andy Lutomirski6f121e52014-05-05 12:19:34 -070026void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080027{
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028 int i;
Andy Lutomirski6f121e52014-05-05 12:19:34 -070029 int npages = (image->size) / PAGE_SIZE;
H. J. Lu1a21d4e2012-02-19 11:38:06 -080030
Andy Lutomirski6f121e52014-05-05 12:19:34 -070031 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
Andy Lutomirskia62c34b2014-05-19 15:58:33 -070033 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080035
Andy Lutomirski6f121e52014-05-05 12:19:34 -070036 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
38 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080039}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070040
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010041#if defined(CONFIG_X86_64)
Andy Lutomirskiaafade22011-07-21 15:47:10 -040042static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020043{
Andy Lutomirski6f121e52014-05-05 12:19:34 -070044 init_vdso_image(&vdso_image_64);
Andi Kleen2aae9502007-07-21 17:10:01 +020045
H. J. Lu1a21d4e2012-02-19 11:38:06 -080046#ifdef CONFIG_X86_X32_ABI
Andy Lutomirski6f121e52014-05-05 12:19:34 -070047 init_vdso_image(&vdso_image_x32);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080048#endif
49
Andi Kleen2aae9502007-07-21 17:10:01 +020050 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +020051}
Andy Lutomirskiaafade22011-07-21 15:47:10 -040052subsys_initcall(init_vdso);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070053#endif
Andi Kleen2aae9502007-07-21 17:10:01 +020054
55struct linux_binprm;
56
57/* Put the vdso above the (randomized) stack with another randomized offset.
58 This way there is no hole in the middle of address space.
59 To save memory make sure it is still in the same PTE as the stack top.
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070060 This doesn't give that many random bits.
61
62 Only used for the 64-bit and x32 vdsos. */
Andi Kleen2aae9502007-07-21 17:10:01 +020063static unsigned long vdso_addr(unsigned long start, unsigned len)
64{
65 unsigned long addr, end;
66 unsigned offset;
67 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010068 if (end >= TASK_SIZE_MAX)
69 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020070 end -= len;
71 /* This loses some more bits than a modulo, but is cheaper */
72 offset = get_random_int() & (PTRS_PER_PTE - 1);
73 addr = start + (offset << PAGE_SHIFT);
74 if (addr >= end)
75 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +020076
77 /*
78 * page-align it here so that get_unmapped_area doesn't
79 * align it wrongfully again to the next page. addr can come in 4K
80 * unaligned here as a result of stack start randomization.
81 */
82 addr = PAGE_ALIGN(addr);
Michel Lespinassef99024722012-12-11 16:01:52 -080083 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +020084
Andi Kleen2aae9502007-07-21 17:10:01 +020085 return addr;
86}
87
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070088static int map_vdso(const struct vdso_image *image, bool calculate_addr)
Andi Kleen2aae9502007-07-21 17:10:01 +020089{
90 struct mm_struct *mm = current->mm;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070091 struct vm_area_struct *vma;
Andi Kleen2aae9502007-07-21 17:10:01 +020092 unsigned long addr;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070093 int ret = 0;
Andy Lutomirski1e844fb2014-05-19 15:58:31 -070094 static struct page *no_pages[] = {NULL};
Andy Lutomirskia62c34b2014-05-19 15:58:33 -070095 static struct vm_special_mapping vvar_mapping = {
96 .name = "[vvar]",
97 .pages = no_pages,
98 };
Andi Kleen2aae9502007-07-21 17:10:01 +020099
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700100 if (calculate_addr) {
101 addr = vdso_addr(current->mm->start_stack,
102 image->sym_end_mapping);
103 } else {
104 addr = 0;
105 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200106
107 down_write(&mm->mmap_sem);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700108
109 addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200110 if (IS_ERR_VALUE(addr)) {
111 ret = addr;
112 goto up_fail;
113 }
114
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700115 current->mm->context.vdso = (void __user *)addr;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200116
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700117 /*
118 * MAYWRITE to allow gdb to COW and set breakpoints
119 */
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700120 vma = _install_special_mapping(mm,
121 addr,
122 image->size,
123 VM_READ|VM_EXEC|
124 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
125 &image->text_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700126
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700127 if (IS_ERR(vma)) {
128 ret = PTR_ERR(vma);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700129 goto up_fail;
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700130 }
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700131
132 vma = _install_special_mapping(mm,
133 addr + image->size,
134 image->sym_end_mapping - image->size,
135 VM_READ,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700136 &vvar_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700137
138 if (IS_ERR(vma)) {
139 ret = PTR_ERR(vma);
Andi Kleen2aae9502007-07-21 17:10:01 +0200140 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200141 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200142
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700143 if (image->sym_vvar_page)
144 ret = remap_pfn_range(vma,
145 addr + image->sym_vvar_page,
146 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
147 PAGE_SIZE,
148 PAGE_READONLY);
149
150 if (ret)
151 goto up_fail;
152
153#ifdef CONFIG_HPET_TIMER
154 if (hpet_address && image->sym_hpet_page) {
155 ret = io_remap_pfn_range(vma,
156 addr + image->sym_hpet_page,
157 hpet_address >> PAGE_SHIFT,
158 PAGE_SIZE,
159 pgprot_noncached(PAGE_READONLY));
160
161 if (ret)
162 goto up_fail;
163 }
164#endif
165
Andi Kleen2aae9502007-07-21 17:10:01 +0200166up_fail:
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700167 if (ret)
168 current->mm->context.vdso = NULL;
169
Andi Kleen2aae9502007-07-21 17:10:01 +0200170 up_write(&mm->mmap_sem);
171 return ret;
172}
173
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700174#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
175static int load_vdso32(void)
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800176{
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700177 int ret;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800178
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700179 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
180 return 0;
181
182 ret = map_vdso(selected_vdso32, false);
183 if (ret)
184 return ret;
185
186 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
187 current_thread_info()->sysenter_return =
188 current->mm->context.vdso +
189 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
190
191 return 0;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800192}
193#endif
194
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700195#ifdef CONFIG_X86_64
196int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
197{
198 if (!vdso64_enabled)
199 return 0;
200
201 return map_vdso(&vdso_image_64, true);
202}
203
204#ifdef CONFIG_COMPAT
205int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
206 int uses_interp)
207{
208#ifdef CONFIG_X86_X32_ABI
209 if (test_thread_flag(TIF_X32)) {
210 if (!vdso64_enabled)
211 return 0;
212
213 return map_vdso(&vdso_image_x32, true);
214 }
215#endif
216
217 return load_vdso32();
218}
219#endif
220#else
221int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
222{
223 return load_vdso32();
224}
225#endif
226
227#ifdef CONFIG_X86_64
Andi Kleen2aae9502007-07-21 17:10:01 +0200228static __init int vdso_setup(char *s)
229{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700230 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200231 return 0;
232}
233__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100234#endif