Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 1 | /* |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 2 | * Copyright 2007 Andi Kleen, SUSE Labs. |
| 3 | * Subject to the GPL, v.2 |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 4 | * |
| 5 | * This contains most of the x86 vDSO kernel-side code. |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 6 | */ |
| 7 | #include <linux/mm.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 8 | #include <linux/err.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 9 | #include <linux/sched.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 11 | #include <linux/init.h> |
| 12 | #include <linux/random.h> |
Jaswinder Singh Rajput | 3fa89ca | 2009-04-12 20:37:25 +0530 | [diff] [blame] | 13 | #include <linux/elf.h> |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 14 | #include <linux/cpu.h> |
Andy Lutomirski | cc1e24f | 2015-12-10 19:20:21 -0800 | [diff] [blame] | 15 | #include <asm/pvclock.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 16 | #include <asm/vgtod.h> |
| 17 | #include <asm/proto.h> |
Roland McGrath | 7f3646a | 2008-01-30 13:30:41 +0100 | [diff] [blame] | 18 | #include <asm/vdso.h> |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 19 | #include <asm/vvar.h> |
Andy Lutomirski | aafade2 | 2011-07-21 15:47:10 -0400 | [diff] [blame] | 20 | #include <asm/page.h> |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 21 | #include <asm/desc.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 22 | #include <asm/cpufeature.h> |
Roland McGrath | 7f3646a | 2008-01-30 13:30:41 +0100 | [diff] [blame] | 23 | |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 24 | #if defined(CONFIG_X86_64) |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 25 | unsigned int __read_mostly vdso64_enabled = 1; |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 26 | #endif |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 27 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 28 | void __init init_vdso_image(const struct vdso_image *image) |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 29 | { |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 30 | BUG_ON(image->size % PAGE_SIZE != 0); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 31 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 32 | apply_alternatives((struct alt_instr *)(image->data + image->alt), |
| 33 | (struct alt_instr *)(image->data + image->alt + |
| 34 | image->alt_len)); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 35 | } |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 36 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 37 | struct linux_binprm; |
| 38 | |
Andy Lutomirski | 394f56f | 2014-12-19 16:04:11 -0800 | [diff] [blame] | 39 | /* |
| 40 | * Put the vdso above the (randomized) stack with another randomized |
| 41 | * offset. This way there is no hole in the middle of address space. |
| 42 | * To save memory make sure it is still in the same PTE as the stack |
| 43 | * top. This doesn't give that many random bits. |
| 44 | * |
| 45 | * Note that this algorithm is imperfect: the distribution of the vdso |
| 46 | * start address within a PMD is biased toward the end. |
| 47 | * |
| 48 | * Only used for the 64-bit and x32 vdsos. |
| 49 | */ |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 50 | static unsigned long vdso_addr(unsigned long start, unsigned len) |
| 51 | { |
Jan Beulich | d093601 | 2014-07-03 15:35:07 +0100 | [diff] [blame] | 52 | #ifdef CONFIG_X86_32 |
| 53 | return 0; |
| 54 | #else |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 55 | unsigned long addr, end; |
| 56 | unsigned offset; |
Andy Lutomirski | 394f56f | 2014-12-19 16:04:11 -0800 | [diff] [blame] | 57 | |
| 58 | /* |
| 59 | * Round up the start address. It can start out unaligned as a result |
| 60 | * of stack start randomization. |
| 61 | */ |
| 62 | start = PAGE_ALIGN(start); |
| 63 | |
| 64 | /* Round the lowest possible end address up to a PMD boundary. */ |
| 65 | end = (start + len + PMD_SIZE - 1) & PMD_MASK; |
Ingo Molnar | d951734 | 2009-02-20 23:32:28 +0100 | [diff] [blame] | 66 | if (end >= TASK_SIZE_MAX) |
| 67 | end = TASK_SIZE_MAX; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 68 | end -= len; |
Andy Lutomirski | 394f56f | 2014-12-19 16:04:11 -0800 | [diff] [blame] | 69 | |
| 70 | if (end > start) { |
| 71 | offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); |
| 72 | addr = start + (offset << PAGE_SHIFT); |
| 73 | } else { |
| 74 | addr = start; |
| 75 | } |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 76 | |
| 77 | /* |
Andy Lutomirski | 394f56f | 2014-12-19 16:04:11 -0800 | [diff] [blame] | 78 | * Forcibly align the final address in case we have a hardware |
| 79 | * issue that requires alignment for performance reasons. |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 80 | */ |
Michel Lespinasse | f9902472 | 2012-12-11 16:01:52 -0800 | [diff] [blame] | 81 | addr = align_vdso_addr(addr); |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 82 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 83 | return addr; |
Jan Beulich | d093601 | 2014-07-03 15:35:07 +0100 | [diff] [blame] | 84 | #endif |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 85 | } |
| 86 | |
Andy Lutomirski | 05ef76b | 2015-12-29 20:12:22 -0800 | [diff] [blame] | 87 | static int vdso_fault(const struct vm_special_mapping *sm, |
| 88 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 89 | { |
| 90 | const struct vdso_image *image = vma->vm_mm->context.vdso_image; |
| 91 | |
| 92 | if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) |
| 93 | return VM_FAULT_SIGBUS; |
| 94 | |
| 95 | vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); |
| 96 | get_page(vmf->page); |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | static const struct vm_special_mapping text_mapping = { |
| 101 | .name = "[vdso]", |
| 102 | .fault = vdso_fault, |
| 103 | }; |
| 104 | |
Andy Lutomirski | a48a704 | 2015-12-29 20:12:23 -0800 | [diff] [blame] | 105 | static int vvar_fault(const struct vm_special_mapping *sm, |
| 106 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 107 | { |
| 108 | const struct vdso_image *image = vma->vm_mm->context.vdso_image; |
| 109 | long sym_offset; |
| 110 | int ret = -EFAULT; |
| 111 | |
| 112 | if (!image) |
| 113 | return VM_FAULT_SIGBUS; |
| 114 | |
| 115 | sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + |
| 116 | image->sym_vvar_start; |
| 117 | |
| 118 | /* |
| 119 | * Sanity check: a symbol offset of zero means that the page |
| 120 | * does not exist for this vdso image, not that the page is at |
| 121 | * offset zero relative to the text mapping. This should be |
| 122 | * impossible here, because sym_offset should only be zero for |
| 123 | * the page past the end of the vvar mapping. |
| 124 | */ |
| 125 | if (sym_offset == 0) |
| 126 | return VM_FAULT_SIGBUS; |
| 127 | |
| 128 | if (sym_offset == image->sym_vvar_page) { |
| 129 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, |
| 130 | __pa_symbol(&__vvar_page) >> PAGE_SHIFT); |
Andy Lutomirski | a48a704 | 2015-12-29 20:12:23 -0800 | [diff] [blame] | 131 | } else if (sym_offset == image->sym_pvclock_page) { |
| 132 | struct pvclock_vsyscall_time_info *pvti = |
| 133 | pvclock_pvti_cpu0_va(); |
Andy Lutomirski | bd902c5 | 2015-12-29 20:12:24 -0800 | [diff] [blame] | 134 | if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { |
Andy Lutomirski | a48a704 | 2015-12-29 20:12:23 -0800 | [diff] [blame] | 135 | ret = vm_insert_pfn( |
| 136 | vma, |
| 137 | (unsigned long)vmf->virtual_address, |
| 138 | __pa(pvti) >> PAGE_SHIFT); |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | if (ret == 0 || ret == -EBUSY) |
| 143 | return VM_FAULT_NOPAGE; |
| 144 | |
| 145 | return VM_FAULT_SIGBUS; |
| 146 | } |
| 147 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 148 | static int map_vdso(const struct vdso_image *image, bool calculate_addr) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 149 | { |
| 150 | struct mm_struct *mm = current->mm; |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 151 | struct vm_area_struct *vma; |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 152 | unsigned long addr, text_start; |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 153 | int ret = 0; |
Andy Lutomirski | a48a704 | 2015-12-29 20:12:23 -0800 | [diff] [blame] | 154 | static const struct vm_special_mapping vvar_mapping = { |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 155 | .name = "[vvar]", |
Andy Lutomirski | a48a704 | 2015-12-29 20:12:23 -0800 | [diff] [blame] | 156 | .fault = vvar_fault, |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 157 | }; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 158 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 159 | if (calculate_addr) { |
| 160 | addr = vdso_addr(current->mm->start_stack, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 161 | image->size - image->sym_vvar_start); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 162 | } else { |
| 163 | addr = 0; |
| 164 | } |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 165 | |
| 166 | down_write(&mm->mmap_sem); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 167 | |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 168 | addr = get_unmapped_area(NULL, addr, |
| 169 | image->size - image->sym_vvar_start, 0, 0); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 170 | if (IS_ERR_VALUE(addr)) { |
| 171 | ret = addr; |
| 172 | goto up_fail; |
| 173 | } |
| 174 | |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 175 | text_start = addr - image->sym_vvar_start; |
| 176 | current->mm->context.vdso = (void __user *)text_start; |
Andy Lutomirski | 352b78c | 2015-12-29 20:12:21 -0800 | [diff] [blame] | 177 | current->mm->context.vdso_image = image; |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 178 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 179 | /* |
| 180 | * MAYWRITE to allow gdb to COW and set breakpoints |
| 181 | */ |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 182 | vma = _install_special_mapping(mm, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 183 | text_start, |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 184 | image->size, |
| 185 | VM_READ|VM_EXEC| |
| 186 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
Andy Lutomirski | 05ef76b | 2015-12-29 20:12:22 -0800 | [diff] [blame] | 187 | &text_mapping); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 188 | |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 189 | if (IS_ERR(vma)) { |
| 190 | ret = PTR_ERR(vma); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 191 | goto up_fail; |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 192 | } |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 193 | |
| 194 | vma = _install_special_mapping(mm, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 195 | addr, |
| 196 | -image->sym_vvar_start, |
Andy Lutomirski | a48a704 | 2015-12-29 20:12:23 -0800 | [diff] [blame] | 197 | VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP| |
| 198 | VM_PFNMAP, |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 199 | &vvar_mapping); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 200 | |
| 201 | if (IS_ERR(vma)) { |
| 202 | ret = PTR_ERR(vma); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 203 | goto up_fail; |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 204 | } |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 205 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 206 | up_fail: |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 207 | if (ret) |
| 208 | current->mm->context.vdso = NULL; |
| 209 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 210 | up_write(&mm->mmap_sem); |
| 211 | return ret; |
| 212 | } |
| 213 | |
Brian Gerst | ab8b82ee6 | 2015-06-22 07:55:15 -0400 | [diff] [blame] | 214 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 215 | static int load_vdso32(void) |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 216 | { |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 217 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ |
| 218 | return 0; |
| 219 | |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 220 | return map_vdso(&vdso_image_32, false); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 221 | } |
| 222 | #endif |
| 223 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 224 | #ifdef CONFIG_X86_64 |
| 225 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 226 | { |
| 227 | if (!vdso64_enabled) |
| 228 | return 0; |
| 229 | |
| 230 | return map_vdso(&vdso_image_64, true); |
| 231 | } |
| 232 | |
| 233 | #ifdef CONFIG_COMPAT |
| 234 | int compat_arch_setup_additional_pages(struct linux_binprm *bprm, |
| 235 | int uses_interp) |
| 236 | { |
| 237 | #ifdef CONFIG_X86_X32_ABI |
| 238 | if (test_thread_flag(TIF_X32)) { |
| 239 | if (!vdso64_enabled) |
| 240 | return 0; |
| 241 | |
| 242 | return map_vdso(&vdso_image_x32, true); |
| 243 | } |
| 244 | #endif |
Brian Gerst | ab8b82ee6 | 2015-06-22 07:55:15 -0400 | [diff] [blame] | 245 | #ifdef CONFIG_IA32_EMULATION |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 246 | return load_vdso32(); |
Brian Gerst | ab8b82ee6 | 2015-06-22 07:55:15 -0400 | [diff] [blame] | 247 | #else |
| 248 | return 0; |
| 249 | #endif |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 250 | } |
| 251 | #endif |
| 252 | #else |
| 253 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 254 | { |
| 255 | return load_vdso32(); |
| 256 | } |
| 257 | #endif |
| 258 | |
| 259 | #ifdef CONFIG_X86_64 |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 260 | static __init int vdso_setup(char *s) |
| 261 | { |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 262 | vdso64_enabled = simple_strtoul(s, NULL, 0); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 263 | return 0; |
| 264 | } |
| 265 | __setup("vdso=", vdso_setup); |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 266 | #endif |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 267 | |
| 268 | #ifdef CONFIG_X86_64 |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 269 | static void vgetcpu_cpu_init(void *arg) |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 270 | { |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 271 | int cpu = smp_processor_id(); |
Andrew Morton | a92f101 | 2014-11-01 21:18:26 +0100 | [diff] [blame] | 272 | struct desc_struct d = { }; |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 273 | unsigned long node = 0; |
| 274 | #ifdef CONFIG_NUMA |
| 275 | node = cpu_to_node(cpu); |
| 276 | #endif |
Borislav Petkov | 8c72530 | 2016-01-26 22:12:09 +0100 | [diff] [blame] | 277 | if (static_cpu_has(X86_FEATURE_RDTSCP)) |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 278 | write_rdtscp_aux((node << 12) | cpu); |
| 279 | |
| 280 | /* |
Andy Lutomirski | 2588015 | 2014-09-23 10:50:53 -0700 | [diff] [blame] | 281 | * Store cpu number in limit so that it can be loaded |
| 282 | * quickly in user space in vgetcpu. (12 bits for the CPU |
| 283 | * and 8 bits for the node) |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 284 | */ |
Andrew Morton | a92f101 | 2014-11-01 21:18:26 +0100 | [diff] [blame] | 285 | d.limit0 = cpu | ((node & 0xf) << 12); |
| 286 | d.limit = node >> 4; |
| 287 | d.type = 5; /* RO data, expand down, accessed */ |
| 288 | d.dpl = 3; /* Visible to user code */ |
| 289 | d.s = 1; /* Not a system segment */ |
| 290 | d.p = 1; /* Present */ |
| 291 | d.d = 1; /* 32-bit */ |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 292 | |
| 293 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); |
| 294 | } |
| 295 | |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 296 | static int |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 297 | vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg) |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 298 | { |
| 299 | long cpu = (long)arg; |
| 300 | |
| 301 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 302 | smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1); |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 303 | |
| 304 | return NOTIFY_DONE; |
| 305 | } |
| 306 | |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 307 | static int __init init_vdso(void) |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 308 | { |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 309 | init_vdso_image(&vdso_image_64); |
| 310 | |
| 311 | #ifdef CONFIG_X86_X32_ABI |
| 312 | init_vdso_image(&vdso_image_x32); |
| 313 | #endif |
| 314 | |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 315 | cpu_notifier_register_begin(); |
| 316 | |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 317 | on_each_cpu(vgetcpu_cpu_init, NULL, 1); |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 318 | /* notifier priority > KVM */ |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 319 | __hotcpu_notifier(vgetcpu_cpu_notifier, 30); |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 320 | |
| 321 | cpu_notifier_register_done(); |
| 322 | |
| 323 | return 0; |
| 324 | } |
Andy Lutomirski | 1c0c1b9 | 2014-09-23 10:50:57 -0700 | [diff] [blame] | 325 | subsys_initcall(init_vdso); |
| 326 | #endif /* CONFIG_X86_64 */ |