blob: 40121d14d34d140b5dcd8b14ed0143d9f6b70a02 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
Andi Kleen2aae9502007-07-21 17:10:01 +02002 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -07004 *
5 * This contains most of the x86 vDSO kernel-side code.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 */
7#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04008#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02009#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020011#include <linux/init.h>
12#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053013#include <linux/elf.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070014#include <linux/cpu.h>
Dmitry Safonovb059a452016-06-28 14:35:38 +030015#include <linux/ptrace.h>
Andy Lutomirskicc1e24f2015-12-10 19:20:21 -080016#include <asm/pvclock.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020017#include <asm/vgtod.h>
18#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010019#include <asm/vdso.h>
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -070020#include <asm/vvar.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040021#include <asm/page.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070022#include <asm/desc.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010023#include <asm/cpufeature.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010024
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010025#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070026unsigned int __read_mostly vdso64_enabled = 1;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010027#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028
Andy Lutomirski6f121e52014-05-05 12:19:34 -070029void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080030{
Andy Lutomirski6f121e52014-05-05 12:19:34 -070031 BUG_ON(image->size % PAGE_SIZE != 0);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080032
Andy Lutomirski6f121e52014-05-05 12:19:34 -070033 apply_alternatives((struct alt_instr *)(image->data + image->alt),
34 (struct alt_instr *)(image->data + image->alt +
35 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080036}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070037
Andi Kleen2aae9502007-07-21 17:10:01 +020038struct linux_binprm;
39
Andy Lutomirski05ef76b2015-12-29 20:12:22 -080040static int vdso_fault(const struct vm_special_mapping *sm,
41 struct vm_area_struct *vma, struct vm_fault *vmf)
42{
43 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
44
45 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
46 return VM_FAULT_SIGBUS;
47
48 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
49 get_page(vmf->page);
50 return 0;
51}
52
Dmitry Safonovb059a452016-06-28 14:35:38 +030053static void vdso_fix_landing(const struct vdso_image *image,
54 struct vm_area_struct *new_vma)
55{
56#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
57 if (in_ia32_syscall() && image == &vdso_image_32) {
58 struct pt_regs *regs = current_pt_regs();
59 unsigned long vdso_land = image->sym_int80_landing_pad;
60 unsigned long old_land_addr = vdso_land +
61 (unsigned long)current->mm->context.vdso;
62
63 /* Fixing userspace landing - look at do_fast_syscall_32 */
64 if (regs->ip == old_land_addr)
65 regs->ip = new_vma->vm_start + vdso_land;
66 }
67#endif
68}
69
70static int vdso_mremap(const struct vm_special_mapping *sm,
71 struct vm_area_struct *new_vma)
72{
73 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
74 const struct vdso_image *image = current->mm->context.vdso_image;
75
76 if (image->size != new_size)
77 return -EINVAL;
78
79 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
80 return -EFAULT;
81
82 vdso_fix_landing(image, new_vma);
83 current->mm->context.vdso = (void __user *)new_vma->vm_start;
84
85 return 0;
86}
Andy Lutomirski05ef76b2015-12-29 20:12:22 -080087
Andy Lutomirskia48a7042015-12-29 20:12:23 -080088static int vvar_fault(const struct vm_special_mapping *sm,
89 struct vm_area_struct *vma, struct vm_fault *vmf)
90{
91 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
92 long sym_offset;
93 int ret = -EFAULT;
94
95 if (!image)
96 return VM_FAULT_SIGBUS;
97
98 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
99 image->sym_vvar_start;
100
101 /*
102 * Sanity check: a symbol offset of zero means that the page
103 * does not exist for this vdso image, not that the page is at
104 * offset zero relative to the text mapping. This should be
105 * impossible here, because sym_offset should only be zero for
106 * the page past the end of the vvar mapping.
107 */
108 if (sym_offset == 0)
109 return VM_FAULT_SIGBUS;
110
111 if (sym_offset == image->sym_vvar_page) {
Jan Kara1a29d852016-12-14 15:07:01 -0800112 ret = vm_insert_pfn(vma, vmf->address,
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800113 __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800114 } else if (sym_offset == image->sym_pvclock_page) {
115 struct pvclock_vsyscall_time_info *pvti =
116 pvclock_pvti_cpu0_va();
Andy Lutomirskibd902c52015-12-29 20:12:24 -0800117 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800118 ret = vm_insert_pfn(
119 vma,
Jan Kara1a29d852016-12-14 15:07:01 -0800120 vmf->address,
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800121 __pa(pvti) >> PAGE_SHIFT);
122 }
123 }
124
125 if (ret == 0 || ret == -EBUSY)
126 return VM_FAULT_NOPAGE;
127
128 return VM_FAULT_SIGBUS;
129}
130
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300131static const struct vm_special_mapping vdso_mapping = {
132 .name = "[vdso]",
133 .fault = vdso_fault,
134 .mremap = vdso_mremap,
135};
136static const struct vm_special_mapping vvar_mapping = {
137 .name = "[vvar]",
138 .fault = vvar_fault,
139};
140
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300141/*
142 * Add vdso and vvar mappings to current process.
143 * @image - blob to map
144 * @addr - request a specific address (zero to map at free addr)
145 */
146static int map_vdso(const struct vdso_image *image, unsigned long addr)
Andi Kleen2aae9502007-07-21 17:10:01 +0200147{
148 struct mm_struct *mm = current->mm;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700149 struct vm_area_struct *vma;
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300150 unsigned long text_start;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700151 int ret = 0;
Dmitry Safonovb059a452016-06-28 14:35:38 +0300152
Michal Hocko69048172016-05-23 16:25:54 -0700153 if (down_write_killable(&mm->mmap_sem))
154 return -EINTR;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700155
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700156 addr = get_unmapped_area(NULL, addr,
157 image->size - image->sym_vvar_start, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200158 if (IS_ERR_VALUE(addr)) {
159 ret = addr;
160 goto up_fail;
161 }
162
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700163 text_start = addr - image->sym_vvar_start;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200164
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700165 /*
166 * MAYWRITE to allow gdb to COW and set breakpoints
167 */
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700168 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700169 text_start,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700170 image->size,
171 VM_READ|VM_EXEC|
172 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
Dmitry Safonovb059a452016-06-28 14:35:38 +0300173 &vdso_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700174
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700175 if (IS_ERR(vma)) {
176 ret = PTR_ERR(vma);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700177 goto up_fail;
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700178 }
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700179
180 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700181 addr,
182 -image->sym_vvar_start,
Andy Lutomirskia48a7042015-12-29 20:12:23 -0800183 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
184 VM_PFNMAP,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700185 &vvar_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700186
187 if (IS_ERR(vma)) {
188 ret = PTR_ERR(vma);
Dmitry Safonove38447e2016-09-05 16:33:03 +0300189 do_munmap(mm, text_start, image->size);
Dmitry Safonov67dece72016-10-27 17:15:16 +0300190 } else {
191 current->mm->context.vdso = (void __user *)text_start;
192 current->mm->context.vdso_image = image;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200193 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200194
Andi Kleen2aae9502007-07-21 17:10:01 +0200195up_fail:
196 up_write(&mm->mmap_sem);
197 return ret;
198}
199
Ingo Molnar3947f492016-09-15 08:56:21 +0200200#ifdef CONFIG_X86_64
201/*
202 * Put the vdso above the (randomized) stack with another randomized
203 * offset. This way there is no hole in the middle of address space.
204 * To save memory make sure it is still in the same PTE as the stack
205 * top. This doesn't give that many random bits.
206 *
207 * Note that this algorithm is imperfect: the distribution of the vdso
208 * start address within a PMD is biased toward the end.
209 *
210 * Only used for the 64-bit and x32 vdsos.
211 */
212static unsigned long vdso_addr(unsigned long start, unsigned len)
213{
214 unsigned long addr, end;
215 unsigned offset;
216
217 /*
218 * Round up the start address. It can start out unaligned as a result
219 * of stack start randomization.
220 */
221 start = PAGE_ALIGN(start);
222
223 /* Round the lowest possible end address up to a PMD boundary. */
224 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
225 if (end >= TASK_SIZE_MAX)
226 end = TASK_SIZE_MAX;
227 end -= len;
228
229 if (end > start) {
230 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
231 addr = start + (offset << PAGE_SHIFT);
232 } else {
233 addr = start;
234 }
235
236 /*
237 * Forcibly align the final address in case we have a hardware
238 * issue that requires alignment for performance reasons.
239 */
240 addr = align_vdso_addr(addr);
241
242 return addr;
243}
244
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300245static int map_vdso_randomized(const struct vdso_image *image)
246{
Ingo Molnar3947f492016-09-15 08:56:21 +0200247 unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
248
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300249 return map_vdso(image, addr);
250}
Ingo Molnar3947f492016-09-15 08:56:21 +0200251#endif
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300252
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300253int map_vdso_once(const struct vdso_image *image, unsigned long addr)
254{
255 struct mm_struct *mm = current->mm;
256 struct vm_area_struct *vma;
257
258 down_write(&mm->mmap_sem);
259 /*
260 * Check if we have already mapped vdso blob - fail to prevent
261 * abusing from userspace install_speciall_mapping, which may
262 * not do accounting and rlimit right.
263 * We could search vma near context.vdso, but it's a slowpath,
264 * so let's explicitely check all VMAs to be completely sure.
265 */
266 for (vma = mm->mmap; vma; vma = vma->vm_next) {
267 if (vma_is_special_mapping(vma, &vdso_mapping) ||
268 vma_is_special_mapping(vma, &vvar_mapping)) {
269 up_write(&mm->mmap_sem);
270 return -EEXIST;
271 }
272 }
273 up_write(&mm->mmap_sem);
274
275 return map_vdso(image, addr);
276}
277
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400278#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700279static int load_vdso32(void)
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800280{
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700281 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
282 return 0;
283
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300284 return map_vdso(&vdso_image_32, 0);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800285}
286#endif
287
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700288#ifdef CONFIG_X86_64
289int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
290{
291 if (!vdso64_enabled)
292 return 0;
293
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300294 return map_vdso_randomized(&vdso_image_64);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700295}
296
297#ifdef CONFIG_COMPAT
298int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
299 int uses_interp)
300{
301#ifdef CONFIG_X86_X32_ABI
302 if (test_thread_flag(TIF_X32)) {
303 if (!vdso64_enabled)
304 return 0;
Dmitry Safonov576ebfe2016-09-05 16:33:04 +0300305 return map_vdso_randomized(&vdso_image_x32);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700306 }
307#endif
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400308#ifdef CONFIG_IA32_EMULATION
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700309 return load_vdso32();
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400310#else
311 return 0;
312#endif
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700313}
314#endif
315#else
316int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
317{
318 return load_vdso32();
319}
320#endif
321
322#ifdef CONFIG_X86_64
Andi Kleen2aae9502007-07-21 17:10:01 +0200323static __init int vdso_setup(char *s)
324{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700325 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200326 return 0;
327}
328__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100329#endif
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700330
331#ifdef CONFIG_X86_64
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700332static void vgetcpu_cpu_init(void *arg)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700333{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700334 int cpu = smp_processor_id();
Andrew Mortona92f1012014-11-01 21:18:26 +0100335 struct desc_struct d = { };
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700336 unsigned long node = 0;
337#ifdef CONFIG_NUMA
338 node = cpu_to_node(cpu);
339#endif
Borislav Petkov8c725302016-01-26 22:12:09 +0100340 if (static_cpu_has(X86_FEATURE_RDTSCP))
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700341 write_rdtscp_aux((node << 12) | cpu);
342
343 /*
Andy Lutomirski25880152014-09-23 10:50:53 -0700344 * Store cpu number in limit so that it can be loaded
345 * quickly in user space in vgetcpu. (12 bits for the CPU
346 * and 8 bits for the node)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700347 */
Andrew Mortona92f1012014-11-01 21:18:26 +0100348 d.limit0 = cpu | ((node & 0xf) << 12);
349 d.limit = node >> 4;
350 d.type = 5; /* RO data, expand down, accessed */
351 d.dpl = 3; /* Visible to user code */
352 d.s = 1; /* Not a system segment */
353 d.p = 1; /* Present */
354 d.d = 1; /* 32-bit */
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700355
356 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
357}
358
Sebastian Andrzej Siewior07d36c92016-07-13 17:16:03 +0000359static int vgetcpu_online(unsigned int cpu)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700360{
Sebastian Andrzej Siewior07d36c92016-07-13 17:16:03 +0000361 return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700362}
363
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700364static int __init init_vdso(void)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700365{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700366 init_vdso_image(&vdso_image_64);
367
368#ifdef CONFIG_X86_X32_ABI
369 init_vdso_image(&vdso_image_x32);
370#endif
371
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700372 /* notifier priority > KVM */
Sebastian Andrzej Siewior07d36c92016-07-13 17:16:03 +0000373 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
374 "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700375}
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700376subsys_initcall(init_vdso);
377#endif /* CONFIG_X86_64 */