blob: 8ad0081df7a83c489db4be0e528d440e55fae544 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070018#include <asm/hpet.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010019
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010020#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070021unsigned int __read_mostly vdso64_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020022
Andi Kleen2aae9502007-07-21 17:10:01 +020023extern unsigned short vdso_sync_cpuid;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010024#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080025
Andy Lutomirski6f121e52014-05-05 12:19:34 -070026void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080027{
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028 int i;
Andy Lutomirski6f121e52014-05-05 12:19:34 -070029 int npages = (image->size) / PAGE_SIZE;
H. J. Lu1a21d4e2012-02-19 11:38:06 -080030
Andy Lutomirski6f121e52014-05-05 12:19:34 -070031 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
33 image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080034
Andy Lutomirski6f121e52014-05-05 12:19:34 -070035 apply_alternatives((struct alt_instr *)(image->data + image->alt),
36 (struct alt_instr *)(image->data + image->alt +
37 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080038}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070039
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010040#if defined(CONFIG_X86_64)
Andy Lutomirskiaafade22011-07-21 15:47:10 -040041static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020042{
Andy Lutomirski6f121e52014-05-05 12:19:34 -070043 init_vdso_image(&vdso_image_64);
Andi Kleen2aae9502007-07-21 17:10:01 +020044
H. J. Lu1a21d4e2012-02-19 11:38:06 -080045#ifdef CONFIG_X86_X32_ABI
Andy Lutomirski6f121e52014-05-05 12:19:34 -070046 init_vdso_image(&vdso_image_x32);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080047#endif
48
Andi Kleen2aae9502007-07-21 17:10:01 +020049 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +020050}
Andy Lutomirskiaafade22011-07-21 15:47:10 -040051subsys_initcall(init_vdso);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070052#endif
Andi Kleen2aae9502007-07-21 17:10:01 +020053
54struct linux_binprm;
55
56/* Put the vdso above the (randomized) stack with another randomized offset.
57 This way there is no hole in the middle of address space.
58 To save memory make sure it is still in the same PTE as the stack top.
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070059 This doesn't give that many random bits.
60
61 Only used for the 64-bit and x32 vdsos. */
Andi Kleen2aae9502007-07-21 17:10:01 +020062static unsigned long vdso_addr(unsigned long start, unsigned len)
63{
64 unsigned long addr, end;
65 unsigned offset;
66 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010067 if (end >= TASK_SIZE_MAX)
68 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020069 end -= len;
70 /* This loses some more bits than a modulo, but is cheaper */
71 offset = get_random_int() & (PTRS_PER_PTE - 1);
72 addr = start + (offset << PAGE_SHIFT);
73 if (addr >= end)
74 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +020075
76 /*
77 * page-align it here so that get_unmapped_area doesn't
78 * align it wrongfully again to the next page. addr can come in 4K
79 * unaligned here as a result of stack start randomization.
80 */
81 addr = PAGE_ALIGN(addr);
Michel Lespinassef99024722012-12-11 16:01:52 -080082 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +020083
Andi Kleen2aae9502007-07-21 17:10:01 +020084 return addr;
85}
86
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070087static int map_vdso(const struct vdso_image *image, bool calculate_addr)
Andi Kleen2aae9502007-07-21 17:10:01 +020088{
89 struct mm_struct *mm = current->mm;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070090 struct vm_area_struct *vma;
Andi Kleen2aae9502007-07-21 17:10:01 +020091 unsigned long addr;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070092 int ret = 0;
Andy Lutomirski1e844fb2014-05-19 15:58:31 -070093 static struct page *no_pages[] = {NULL};
Andi Kleen2aae9502007-07-21 17:10:01 +020094
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070095 if (calculate_addr) {
96 addr = vdso_addr(current->mm->start_stack,
97 image->sym_end_mapping);
98 } else {
99 addr = 0;
100 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200101
102 down_write(&mm->mmap_sem);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700103
104 addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200105 if (IS_ERR_VALUE(addr)) {
106 ret = addr;
107 goto up_fail;
108 }
109
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700110 current->mm->context.vdso = (void __user *)addr;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200111
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700112 /*
113 * MAYWRITE to allow gdb to COW and set breakpoints
114 */
115 ret = install_special_mapping(mm,
116 addr,
117 image->size,
Andi Kleen2aae9502007-07-21 17:10:01 +0200118 VM_READ|VM_EXEC|
Jason Baron909af762012-03-23 15:02:51 -0700119 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700120 image->pages);
121
122 if (ret)
123 goto up_fail;
124
125 vma = _install_special_mapping(mm,
126 addr + image->size,
127 image->sym_end_mapping - image->size,
128 VM_READ,
Andy Lutomirski1e844fb2014-05-19 15:58:31 -0700129 no_pages);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700130
131 if (IS_ERR(vma)) {
132 ret = PTR_ERR(vma);
Andi Kleen2aae9502007-07-21 17:10:01 +0200133 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200134 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200135
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700136 if (image->sym_vvar_page)
137 ret = remap_pfn_range(vma,
138 addr + image->sym_vvar_page,
139 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
140 PAGE_SIZE,
141 PAGE_READONLY);
142
143 if (ret)
144 goto up_fail;
145
146#ifdef CONFIG_HPET_TIMER
147 if (hpet_address && image->sym_hpet_page) {
148 ret = io_remap_pfn_range(vma,
149 addr + image->sym_hpet_page,
150 hpet_address >> PAGE_SHIFT,
151 PAGE_SIZE,
152 pgprot_noncached(PAGE_READONLY));
153
154 if (ret)
155 goto up_fail;
156 }
157#endif
158
Andi Kleen2aae9502007-07-21 17:10:01 +0200159up_fail:
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700160 if (ret)
161 current->mm->context.vdso = NULL;
162
Andi Kleen2aae9502007-07-21 17:10:01 +0200163 up_write(&mm->mmap_sem);
164 return ret;
165}
166
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700167#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
168static int load_vdso32(void)
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800169{
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700170 int ret;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800171
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700172 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
173 return 0;
174
175 ret = map_vdso(selected_vdso32, false);
176 if (ret)
177 return ret;
178
179 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
180 current_thread_info()->sysenter_return =
181 current->mm->context.vdso +
182 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
183
184 return 0;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800185}
186#endif
187
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700188#ifdef CONFIG_X86_64
189int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
190{
191 if (!vdso64_enabled)
192 return 0;
193
194 return map_vdso(&vdso_image_64, true);
195}
196
197#ifdef CONFIG_COMPAT
198int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
199 int uses_interp)
200{
201#ifdef CONFIG_X86_X32_ABI
202 if (test_thread_flag(TIF_X32)) {
203 if (!vdso64_enabled)
204 return 0;
205
206 return map_vdso(&vdso_image_x32, true);
207 }
208#endif
209
210 return load_vdso32();
211}
212#endif
213#else
214int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
215{
216 return load_vdso32();
217}
218#endif
219
220#ifdef CONFIG_X86_64
Andi Kleen2aae9502007-07-21 17:10:01 +0200221static __init int vdso_setup(char *s)
222{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700223 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200224 return 0;
225}
226__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100227#endif