blob: 3329844e3c43913d278af48b4a93f5d1e1ce2ccc [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
Andi Kleen2aae9502007-07-21 17:10:01 +02002 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -07004 *
5 * This contains most of the x86 vDSO kernel-side code.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 */
7#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04008#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02009#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020011#include <linux/init.h>
12#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053013#include <linux/elf.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070014#include <linux/cpu.h>
Dmitry Safonovb059a452016-06-28 14:35:38 +030015#include <linux/ptrace.h>
Andy Lutomirskicc1e24f2015-12-10 19:20:21 -080016#include <asm/pvclock.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020017#include <asm/vgtod.h>
18#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010019#include <asm/vdso.h>
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -070020#include <asm/vvar.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040021#include <asm/page.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070022#include <asm/desc.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010023#include <asm/cpufeature.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010024
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010025#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070026unsigned int __read_mostly vdso64_enabled = 1;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010027#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028
Andy Lutomirski6f121e52014-05-05 12:19:34 -070029void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080030{
Andy Lutomirski6f121e52014-05-05 12:19:34 -070031 BUG_ON(image->size % PAGE_SIZE != 0);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080032
Andy Lutomirski6f121e52014-05-05 12:19:34 -070033 apply_alternatives((struct alt_instr *)(image->data + image->alt),
34 (struct alt_instr *)(image->data + image->alt +
35 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080036}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070037
Andi Kleen2aae9502007-07-21 17:10:01 +020038struct linux_binprm;
39
Andy Lutomirski394f56f2014-12-19 16:04:11 -080040/*
41 * Put the vdso above the (randomized) stack with another randomized
42 * offset. This way there is no hole in the middle of address space.
43 * To save memory make sure it is still in the same PTE as the stack
44 * top. This doesn't give that many random bits.
45 *
46 * Note that this algorithm is imperfect: the distribution of the vdso
47 * start address within a PMD is biased toward the end.
48 *
49 * Only used for the 64-bit and x32 vdsos.
50 */
Andi Kleen2aae9502007-07-21 17:10:01 +020051static unsigned long vdso_addr(unsigned long start, unsigned len)
52{
Jan Beulichd0936012014-07-03 15:35:07 +010053#ifdef CONFIG_X86_32
54 return 0;
55#else
Andi Kleen2aae9502007-07-21 17:10:01 +020056 unsigned long addr, end;
57 unsigned offset;
Andy Lutomirski394f56f2014-12-19 16:04:11 -080058
59 /*
60 * Round up the start address. It can start out unaligned as a result
61 * of stack start randomization.
62 */
63 start = PAGE_ALIGN(start);
64
65 /* Round the lowest possible end address up to a PMD boundary. */
66 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010067 if (end >= TASK_SIZE_MAX)
68 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020069 end -= len;
Andy Lutomirski394f56f2014-12-19 16:04:11 -080070
71 if (end > start) {
72 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
73 addr = start + (offset << PAGE_SHIFT);
74 } else {
75 addr = start;
76 }
Borislav Petkovdfb09f92011-08-05 15:15:08 +020077
78 /*
Andy Lutomirski394f56f2014-12-19 16:04:11 -080079 * Forcibly align the final address in case we have a hardware
80 * issue that requires alignment for performance reasons.
Borislav Petkovdfb09f92011-08-05 15:15:08 +020081 */
Michel Lespinassef99024722012-12-11 16:01:52 -080082 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +020083
Andi Kleen2aae9502007-07-21 17:10:01 +020084 return addr;
Jan Beulichd0936012014-07-03 15:35:07 +010085#endif
Andi Kleen2aae9502007-07-21 17:10:01 +020086}
87
Andy Lutomirski05ef76b2015-12-29 20:12:22 -080088static int vdso_fault(const struct vm_special_mapping *sm,
89 struct vm_area_struct *vma, struct vm_fault *vmf)
90{
91 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
92
93 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
94 return VM_FAULT_SIGBUS;
95
96 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
97 get_page(vmf->page);
98 return 0;
99}
100
Dmitry Safonovb059a452016-06-28 14:35:38 +0300101static void vdso_fix_landing(const struct vdso_image *image,
102 struct vm_area_struct *new_vma)
103{
104#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
105 if (in_ia32_syscall() && image == &vdso_image_32) {
106 struct pt_regs *regs = current_pt_regs();
107 unsigned long vdso_land = image->sym_int80_landing_pad;
108 unsigned long old_land_addr = vdso_land +
109 (unsigned long)current->mm->context.vdso;
110
111 /* Fixing userspace landing - look at do_fast_syscall_32 */
112 if (regs->ip == old_land_addr)
113 regs->ip = new_vma->vm_start + vdso_land;
114 }
115#endif
116}
117
118static int vdso_mremap(const struct vm_special_mapping *sm,
119 struct vm_area_struct *new_vma)
120{
121 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
122 const struct vdso_image *image = current->mm->context.vdso_image;
123
124 if (image->size != new_size)
125 return -EINVAL;
126
127 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
128 return -EFAULT;
129
130 vdso_fix_landing(image, new_vma);
131 current->mm->context.vdso = (void __user *)new_vma->vm_start;
132
133 return 0;
134}
Andy Lutomirski05ef76b2015-12-29 20:12:22 -0800135
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800136static int vvar_fault(const struct vm_special_mapping *sm,
137 struct vm_area_struct *vma, struct vm_fault *vmf)
138{
139 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
140 long sym_offset;
141 int ret = -EFAULT;
142
143 if (!image)
144 return VM_FAULT_SIGBUS;
145
146 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
147 image->sym_vvar_start;
148
149 /*
150 * Sanity check: a symbol offset of zero means that the page
151 * does not exist for this vdso image, not that the page is at
152 * offset zero relative to the text mapping. This should be
153 * impossible here, because sym_offset should only be zero for
154 * the page past the end of the vvar mapping.
155 */
156 if (sym_offset == 0)
157 return VM_FAULT_SIGBUS;
158
159 if (sym_offset == image->sym_vvar_page) {
160 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
161 __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800162 } else if (sym_offset == image->sym_pvclock_page) {
163 struct pvclock_vsyscall_time_info *pvti =
164 pvclock_pvti_cpu0_va();
Andy Lutomirskibd902c52015-12-29 20:12:24 -0800165 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800166 ret = vm_insert_pfn(
167 vma,
168 (unsigned long)vmf->virtual_address,
169 __pa(pvti) >> PAGE_SHIFT);
170 }
171 }
172
173 if (ret == 0 || ret == -EBUSY)
174 return VM_FAULT_NOPAGE;
175
176 return VM_FAULT_SIGBUS;
177}
178
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700179static int map_vdso(const struct vdso_image *image, bool calculate_addr)
Andi Kleen2aae9502007-07-21 17:10:01 +0200180{
181 struct mm_struct *mm = current->mm;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700182 struct vm_area_struct *vma;
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700183 unsigned long addr, text_start;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700184 int ret = 0;
Dmitry Safonovb059a452016-06-28 14:35:38 +0300185
186 static const struct vm_special_mapping vdso_mapping = {
187 .name = "[vdso]",
188 .fault = vdso_fault,
189 .mremap = vdso_mremap,
190 };
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800191 static const struct vm_special_mapping vvar_mapping = {
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700192 .name = "[vvar]",
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800193 .fault = vvar_fault,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700194 };
Andi Kleen2aae9502007-07-21 17:10:01 +0200195
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700196 if (calculate_addr) {
197 addr = vdso_addr(current->mm->start_stack,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700198 image->size - image->sym_vvar_start);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700199 } else {
200 addr = 0;
201 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200202
Michal Hocko69048172016-05-23 16:25:54 -0700203 if (down_write_killable(&mm->mmap_sem))
204 return -EINTR;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700205
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700206 addr = get_unmapped_area(NULL, addr,
207 image->size - image->sym_vvar_start, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200208 if (IS_ERR_VALUE(addr)) {
209 ret = addr;
210 goto up_fail;
211 }
212
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700213 text_start = addr - image->sym_vvar_start;
214 current->mm->context.vdso = (void __user *)text_start;
Andy Lutomirski352b78c2015-12-29 20:12:21 -0800215 current->mm->context.vdso_image = image;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200216
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700217 /*
218 * MAYWRITE to allow gdb to COW and set breakpoints
219 */
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700220 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700221 text_start,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700222 image->size,
223 VM_READ|VM_EXEC|
224 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
Dmitry Safonovb059a452016-06-28 14:35:38 +0300225 &vdso_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700226
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700227 if (IS_ERR(vma)) {
228 ret = PTR_ERR(vma);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700229 goto up_fail;
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700230 }
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700231
232 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700233 addr,
234 -image->sym_vvar_start,
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800235 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
236 VM_PFNMAP,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700237 &vvar_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700238
239 if (IS_ERR(vma)) {
240 ret = PTR_ERR(vma);
Andi Kleen2aae9502007-07-21 17:10:01 +0200241 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200242 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200243
Andi Kleen2aae9502007-07-21 17:10:01 +0200244up_fail:
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700245 if (ret)
246 current->mm->context.vdso = NULL;
247
Andi Kleen2aae9502007-07-21 17:10:01 +0200248 up_write(&mm->mmap_sem);
249 return ret;
250}
251
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400252#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700253static int load_vdso32(void)
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800254{
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700255 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
256 return 0;
257
Andy Lutomirski0a6d1fa2015-10-05 17:47:56 -0700258 return map_vdso(&vdso_image_32, false);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800259}
260#endif
261
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700262#ifdef CONFIG_X86_64
263int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
264{
265 if (!vdso64_enabled)
266 return 0;
267
268 return map_vdso(&vdso_image_64, true);
269}
270
271#ifdef CONFIG_COMPAT
272int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
273 int uses_interp)
274{
275#ifdef CONFIG_X86_X32_ABI
276 if (test_thread_flag(TIF_X32)) {
277 if (!vdso64_enabled)
278 return 0;
279
280 return map_vdso(&vdso_image_x32, true);
281 }
282#endif
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400283#ifdef CONFIG_IA32_EMULATION
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700284 return load_vdso32();
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400285#else
286 return 0;
287#endif
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700288}
289#endif
290#else
291int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
292{
293 return load_vdso32();
294}
295#endif
296
297#ifdef CONFIG_X86_64
Andi Kleen2aae9502007-07-21 17:10:01 +0200298static __init int vdso_setup(char *s)
299{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700300 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200301 return 0;
302}
303__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100304#endif
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700305
306#ifdef CONFIG_X86_64
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700307static void vgetcpu_cpu_init(void *arg)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700308{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700309 int cpu = smp_processor_id();
Andrew Mortona92f1012014-11-01 21:18:26 +0100310 struct desc_struct d = { };
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700311 unsigned long node = 0;
312#ifdef CONFIG_NUMA
313 node = cpu_to_node(cpu);
314#endif
Borislav Petkov8c725302016-01-26 22:12:09 +0100315 if (static_cpu_has(X86_FEATURE_RDTSCP))
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700316 write_rdtscp_aux((node << 12) | cpu);
317
318 /*
Andy Lutomirski25880152014-09-23 10:50:53 -0700319 * Store cpu number in limit so that it can be loaded
320 * quickly in user space in vgetcpu. (12 bits for the CPU
321 * and 8 bits for the node)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700322 */
Andrew Mortona92f1012014-11-01 21:18:26 +0100323 d.limit0 = cpu | ((node & 0xf) << 12);
324 d.limit = node >> 4;
325 d.type = 5; /* RO data, expand down, accessed */
326 d.dpl = 3; /* Visible to user code */
327 d.s = 1; /* Not a system segment */
328 d.p = 1; /* Present */
329 d.d = 1; /* 32-bit */
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700330
331 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
332}
333
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700334static int
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700335vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700336{
337 long cpu = (long)arg;
338
339 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700340 smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700341
342 return NOTIFY_DONE;
343}
344
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700345static int __init init_vdso(void)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700346{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700347 init_vdso_image(&vdso_image_64);
348
349#ifdef CONFIG_X86_X32_ABI
350 init_vdso_image(&vdso_image_x32);
351#endif
352
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700353 cpu_notifier_register_begin();
354
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700355 on_each_cpu(vgetcpu_cpu_init, NULL, 1);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700356 /* notifier priority > KVM */
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700357 __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700358
359 cpu_notifier_register_done();
360
361 return 0;
362}
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700363subsys_initcall(init_vdso);
364#endif /* CONFIG_X86_64 */