blob: cf217626fb47051d06a64c85a901b02b8dbd54b8 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010018
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010019#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070020unsigned int __read_mostly vdso64_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020021
Andi Kleen2aae9502007-07-21 17:10:01 +020022extern unsigned short vdso_sync_cpuid;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010023#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080024
Andy Lutomirski6f121e52014-05-05 12:19:34 -070025void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080026{
H. J. Lu1a21d4e2012-02-19 11:38:06 -080027 int i;
Andy Lutomirski6f121e52014-05-05 12:19:34 -070028 int npages = (image->size) / PAGE_SIZE;
H. J. Lu1a21d4e2012-02-19 11:38:06 -080029
Andy Lutomirski6f121e52014-05-05 12:19:34 -070030 BUG_ON(image->size % PAGE_SIZE != 0);
31 for (i = 0; i < npages; i++)
32 image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080033
Andy Lutomirski6f121e52014-05-05 12:19:34 -070034 apply_alternatives((struct alt_instr *)(image->data + image->alt),
35 (struct alt_instr *)(image->data + image->alt +
36 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080037}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070038
H. J. Lu1a21d4e2012-02-19 11:38:06 -080039
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010040#if defined(CONFIG_X86_64)
Andy Lutomirskiaafade22011-07-21 15:47:10 -040041static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020042{
Andy Lutomirski6f121e52014-05-05 12:19:34 -070043 init_vdso_image(&vdso_image_64);
Andi Kleen2aae9502007-07-21 17:10:01 +020044
H. J. Lu1a21d4e2012-02-19 11:38:06 -080045#ifdef CONFIG_X86_X32_ABI
Andy Lutomirski6f121e52014-05-05 12:19:34 -070046 init_vdso_image(&vdso_image_x32);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080047#endif
48
Andi Kleen2aae9502007-07-21 17:10:01 +020049 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +020050}
Andy Lutomirskiaafade22011-07-21 15:47:10 -040051subsys_initcall(init_vdso);
Andi Kleen2aae9502007-07-21 17:10:01 +020052
53struct linux_binprm;
54
55/* Put the vdso above the (randomized) stack with another randomized offset.
56 This way there is no hole in the middle of address space.
57 To save memory make sure it is still in the same PTE as the stack top.
58 This doesn't give that many random bits */
59static unsigned long vdso_addr(unsigned long start, unsigned len)
60{
61 unsigned long addr, end;
62 unsigned offset;
63 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010064 if (end >= TASK_SIZE_MAX)
65 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020066 end -= len;
67 /* This loses some more bits than a modulo, but is cheaper */
68 offset = get_random_int() & (PTRS_PER_PTE - 1);
69 addr = start + (offset << PAGE_SHIFT);
70 if (addr >= end)
71 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +020072
73 /*
74 * page-align it here so that get_unmapped_area doesn't
75 * align it wrongfully again to the next page. addr can come in 4K
76 * unaligned here as a result of stack start randomization.
77 */
78 addr = PAGE_ALIGN(addr);
Michel Lespinassef99024722012-12-11 16:01:52 -080079 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +020080
Andi Kleen2aae9502007-07-21 17:10:01 +020081 return addr;
82}
83
84/* Setup a VMA at program startup for the vsyscall page.
85 Not called for compat tasks */
H. J. Lu1a21d4e2012-02-19 11:38:06 -080086static int setup_additional_pages(struct linux_binprm *bprm,
87 int uses_interp,
88 struct page **pages,
89 unsigned size)
Andi Kleen2aae9502007-07-21 17:10:01 +020090{
91 struct mm_struct *mm = current->mm;
92 unsigned long addr;
93 int ret;
Andi Kleen2aae9502007-07-21 17:10:01 +020094
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070095 if (!vdso64_enabled)
Andi Kleen2aae9502007-07-21 17:10:01 +020096 return 0;
97
98 down_write(&mm->mmap_sem);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080099 addr = vdso_addr(mm->start_stack, size);
100 addr = get_unmapped_area(NULL, addr, size, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200101 if (IS_ERR_VALUE(addr)) {
102 ret = addr;
103 goto up_fail;
104 }
105
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700106 current->mm->context.vdso = (void __user *)addr;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200107
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800108 ret = install_special_mapping(mm, addr, size,
Andi Kleen2aae9502007-07-21 17:10:01 +0200109 VM_READ|VM_EXEC|
Jason Baron909af762012-03-23 15:02:51 -0700110 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800111 pages);
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200112 if (ret) {
113 current->mm->context.vdso = NULL;
Andi Kleen2aae9502007-07-21 17:10:01 +0200114 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200115 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200116
Andi Kleen2aae9502007-07-21 17:10:01 +0200117up_fail:
118 up_write(&mm->mmap_sem);
119 return ret;
120}
121
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800122int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
123{
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700124 return setup_additional_pages(bprm, uses_interp, vdso_image_64.pages,
125 vdso_image_64.size);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800126}
127
128#ifdef CONFIG_X86_X32_ABI
129int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
130{
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700131 return setup_additional_pages(bprm, uses_interp, vdso_image_x32.pages,
132 vdso_image_x32.size);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800133}
134#endif
135
Andi Kleen2aae9502007-07-21 17:10:01 +0200136static __init int vdso_setup(char *s)
137{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700138 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200139 return 0;
140}
141__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100142#endif