Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Set up the VMAs to tell the VM about the vDSO. |
| 3 | * Copyright 2007 Andi Kleen, SUSE Labs. |
| 4 | * Subject to the GPL, v.2 |
| 5 | */ |
| 6 | #include <linux/mm.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 7 | #include <linux/err.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 8 | #include <linux/sched.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/slab.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 10 | #include <linux/init.h> |
| 11 | #include <linux/random.h> |
Jaswinder Singh Rajput | 3fa89ca | 2009-04-12 20:37:25 +0530 | [diff] [blame] | 12 | #include <linux/elf.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 13 | #include <asm/vsyscall.h> |
| 14 | #include <asm/vgtod.h> |
| 15 | #include <asm/proto.h> |
Roland McGrath | 7f3646a | 2008-01-30 13:30:41 +0100 | [diff] [blame] | 16 | #include <asm/vdso.h> |
Andy Lutomirski | aafade2 | 2011-07-21 15:47:10 -0400 | [diff] [blame] | 17 | #include <asm/page.h> |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 18 | #include <asm/hpet.h> |
Roland McGrath | 7f3646a | 2008-01-30 13:30:41 +0100 | [diff] [blame] | 19 | |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 20 | #if defined(CONFIG_X86_64) |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 21 | unsigned int __read_mostly vdso64_enabled = 1; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 22 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 23 | extern unsigned short vdso_sync_cpuid; |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 24 | #endif |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 25 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 26 | void __init init_vdso_image(const struct vdso_image *image) |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 27 | { |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 28 | int i; |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 29 | int npages = (image->size) / PAGE_SIZE; |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 30 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 31 | BUG_ON(image->size % PAGE_SIZE != 0); |
| 32 | for (i = 0; i < npages; i++) |
| 33 | image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 34 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 35 | apply_alternatives((struct alt_instr *)(image->data + image->alt), |
| 36 | (struct alt_instr *)(image->data + image->alt + |
| 37 | image->alt_len)); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 38 | } |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 39 | |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 40 | #if defined(CONFIG_X86_64) |
Andy Lutomirski | aafade2 | 2011-07-21 15:47:10 -0400 | [diff] [blame] | 41 | static int __init init_vdso(void) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 42 | { |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 43 | init_vdso_image(&vdso_image_64); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 44 | |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 45 | #ifdef CONFIG_X86_X32_ABI |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 46 | init_vdso_image(&vdso_image_x32); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 47 | #endif |
| 48 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 49 | return 0; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 50 | } |
Andy Lutomirski | aafade2 | 2011-07-21 15:47:10 -0400 | [diff] [blame] | 51 | subsys_initcall(init_vdso); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 52 | #endif |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 53 | |
| 54 | struct linux_binprm; |
| 55 | |
| 56 | /* Put the vdso above the (randomized) stack with another randomized offset. |
| 57 | This way there is no hole in the middle of address space. |
| 58 | To save memory make sure it is still in the same PTE as the stack top. |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 59 | This doesn't give that many random bits. |
| 60 | |
| 61 | Only used for the 64-bit and x32 vdsos. */ |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 62 | static unsigned long vdso_addr(unsigned long start, unsigned len) |
| 63 | { |
| 64 | unsigned long addr, end; |
| 65 | unsigned offset; |
| 66 | end = (start + PMD_SIZE - 1) & PMD_MASK; |
Ingo Molnar | d951734 | 2009-02-20 23:32:28 +0100 | [diff] [blame] | 67 | if (end >= TASK_SIZE_MAX) |
| 68 | end = TASK_SIZE_MAX; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 69 | end -= len; |
| 70 | /* This loses some more bits than a modulo, but is cheaper */ |
| 71 | offset = get_random_int() & (PTRS_PER_PTE - 1); |
| 72 | addr = start + (offset << PAGE_SHIFT); |
| 73 | if (addr >= end) |
| 74 | addr = end; |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 75 | |
| 76 | /* |
| 77 | * page-align it here so that get_unmapped_area doesn't |
| 78 | * align it wrongfully again to the next page. addr can come in 4K |
| 79 | * unaligned here as a result of stack start randomization. |
| 80 | */ |
| 81 | addr = PAGE_ALIGN(addr); |
Michel Lespinasse | f9902472 | 2012-12-11 16:01:52 -0800 | [diff] [blame] | 82 | addr = align_vdso_addr(addr); |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 83 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 84 | return addr; |
| 85 | } |
| 86 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 87 | static int map_vdso(const struct vdso_image *image, bool calculate_addr) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 88 | { |
| 89 | struct mm_struct *mm = current->mm; |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 90 | struct vm_area_struct *vma; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 91 | unsigned long addr; |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 92 | int ret = 0; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 93 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 94 | if (calculate_addr) { |
| 95 | addr = vdso_addr(current->mm->start_stack, |
| 96 | image->sym_end_mapping); |
| 97 | } else { |
| 98 | addr = 0; |
| 99 | } |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 100 | |
| 101 | down_write(&mm->mmap_sem); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 102 | |
| 103 | addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 104 | if (IS_ERR_VALUE(addr)) { |
| 105 | ret = addr; |
| 106 | goto up_fail; |
| 107 | } |
| 108 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 109 | current->mm->context.vdso = (void __user *)addr; |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 110 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 111 | /* |
| 112 | * MAYWRITE to allow gdb to COW and set breakpoints |
| 113 | */ |
| 114 | ret = install_special_mapping(mm, |
| 115 | addr, |
| 116 | image->size, |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 117 | VM_READ|VM_EXEC| |
Jason Baron | 909af76 | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 118 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 119 | image->pages); |
| 120 | |
| 121 | if (ret) |
| 122 | goto up_fail; |
| 123 | |
| 124 | vma = _install_special_mapping(mm, |
| 125 | addr + image->size, |
| 126 | image->sym_end_mapping - image->size, |
| 127 | VM_READ, |
| 128 | NULL); |
| 129 | |
| 130 | if (IS_ERR(vma)) { |
| 131 | ret = PTR_ERR(vma); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 132 | goto up_fail; |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 133 | } |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 134 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 135 | if (image->sym_vvar_page) |
| 136 | ret = remap_pfn_range(vma, |
| 137 | addr + image->sym_vvar_page, |
| 138 | __pa_symbol(&__vvar_page) >> PAGE_SHIFT, |
| 139 | PAGE_SIZE, |
| 140 | PAGE_READONLY); |
| 141 | |
| 142 | if (ret) |
| 143 | goto up_fail; |
| 144 | |
| 145 | #ifdef CONFIG_HPET_TIMER |
| 146 | if (hpet_address && image->sym_hpet_page) { |
| 147 | ret = io_remap_pfn_range(vma, |
| 148 | addr + image->sym_hpet_page, |
| 149 | hpet_address >> PAGE_SHIFT, |
| 150 | PAGE_SIZE, |
| 151 | pgprot_noncached(PAGE_READONLY)); |
| 152 | |
| 153 | if (ret) |
| 154 | goto up_fail; |
| 155 | } |
| 156 | #endif |
| 157 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 158 | up_fail: |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 159 | if (ret) |
| 160 | current->mm->context.vdso = NULL; |
| 161 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 162 | up_write(&mm->mmap_sem); |
| 163 | return ret; |
| 164 | } |
| 165 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 166 | #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) |
| 167 | static int load_vdso32(void) |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 168 | { |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 169 | int ret; |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 170 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 171 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ |
| 172 | return 0; |
| 173 | |
| 174 | ret = map_vdso(selected_vdso32, false); |
| 175 | if (ret) |
| 176 | return ret; |
| 177 | |
| 178 | if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN) |
| 179 | current_thread_info()->sysenter_return = |
| 180 | current->mm->context.vdso + |
| 181 | selected_vdso32->sym_VDSO32_SYSENTER_RETURN; |
| 182 | |
| 183 | return 0; |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 184 | } |
| 185 | #endif |
| 186 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame^] | 187 | #ifdef CONFIG_X86_64 |
| 188 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 189 | { |
| 190 | if (!vdso64_enabled) |
| 191 | return 0; |
| 192 | |
| 193 | return map_vdso(&vdso_image_64, true); |
| 194 | } |
| 195 | |
| 196 | #ifdef CONFIG_COMPAT |
| 197 | int compat_arch_setup_additional_pages(struct linux_binprm *bprm, |
| 198 | int uses_interp) |
| 199 | { |
| 200 | #ifdef CONFIG_X86_X32_ABI |
| 201 | if (test_thread_flag(TIF_X32)) { |
| 202 | if (!vdso64_enabled) |
| 203 | return 0; |
| 204 | |
| 205 | return map_vdso(&vdso_image_x32, true); |
| 206 | } |
| 207 | #endif |
| 208 | |
| 209 | return load_vdso32(); |
| 210 | } |
| 211 | #endif |
| 212 | #else |
| 213 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 214 | { |
| 215 | return load_vdso32(); |
| 216 | } |
| 217 | #endif |
| 218 | |
| 219 | #ifdef CONFIG_X86_64 |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 220 | static __init int vdso_setup(char *s) |
| 221 | { |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 222 | vdso64_enabled = simple_strtoul(s, NULL, 0); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 223 | return 0; |
| 224 | } |
| 225 | __setup("vdso=", vdso_setup); |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 226 | #endif |