Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Set up the VMAs to tell the VM about the vDSO. |
| 3 | * Copyright 2007 Andi Kleen, SUSE Labs. |
| 4 | * Subject to the GPL, v.2 |
| 5 | */ |
| 6 | #include <linux/mm.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 7 | #include <linux/err.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 8 | #include <linux/sched.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/slab.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 10 | #include <linux/init.h> |
| 11 | #include <linux/random.h> |
Jaswinder Singh Rajput | 3fa89ca | 2009-04-12 20:37:25 +0530 | [diff] [blame] | 12 | #include <linux/elf.h> |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 13 | #include <linux/cpu.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 14 | #include <asm/vsyscall.h> |
| 15 | #include <asm/vgtod.h> |
| 16 | #include <asm/proto.h> |
Roland McGrath | 7f3646a | 2008-01-30 13:30:41 +0100 | [diff] [blame] | 17 | #include <asm/vdso.h> |
Andy Lutomirski | aafade2 | 2011-07-21 15:47:10 -0400 | [diff] [blame] | 18 | #include <asm/page.h> |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 19 | #include <asm/hpet.h> |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 20 | #include <asm/desc.h> |
Roland McGrath | 7f3646a | 2008-01-30 13:30:41 +0100 | [diff] [blame] | 21 | |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 22 | #if defined(CONFIG_X86_64) |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 23 | unsigned int __read_mostly vdso64_enabled = 1; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 24 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 25 | extern unsigned short vdso_sync_cpuid; |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 26 | #endif |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 27 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 28 | void __init init_vdso_image(const struct vdso_image *image) |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 29 | { |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 30 | int i; |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 31 | int npages = (image->size) / PAGE_SIZE; |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 32 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 33 | BUG_ON(image->size % PAGE_SIZE != 0); |
| 34 | for (i = 0; i < npages; i++) |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 35 | image->text_mapping.pages[i] = |
| 36 | virt_to_page(image->data + i*PAGE_SIZE); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 37 | |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 38 | apply_alternatives((struct alt_instr *)(image->data + image->alt), |
| 39 | (struct alt_instr *)(image->data + image->alt + |
| 40 | image->alt_len)); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 41 | } |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 42 | |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 43 | #if defined(CONFIG_X86_64) |
Andy Lutomirski | aafade2 | 2011-07-21 15:47:10 -0400 | [diff] [blame] | 44 | static int __init init_vdso(void) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 45 | { |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 46 | init_vdso_image(&vdso_image_64); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 47 | |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 48 | #ifdef CONFIG_X86_X32_ABI |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 49 | init_vdso_image(&vdso_image_x32); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 50 | #endif |
| 51 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 52 | return 0; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 53 | } |
Andy Lutomirski | aafade2 | 2011-07-21 15:47:10 -0400 | [diff] [blame] | 54 | subsys_initcall(init_vdso); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 55 | #endif |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 56 | |
| 57 | struct linux_binprm; |
| 58 | |
| 59 | /* Put the vdso above the (randomized) stack with another randomized offset. |
| 60 | This way there is no hole in the middle of address space. |
| 61 | To save memory make sure it is still in the same PTE as the stack top. |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 62 | This doesn't give that many random bits. |
| 63 | |
| 64 | Only used for the 64-bit and x32 vdsos. */ |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 65 | static unsigned long vdso_addr(unsigned long start, unsigned len) |
| 66 | { |
Jan Beulich | d093601 | 2014-07-03 15:35:07 +0100 | [diff] [blame] | 67 | #ifdef CONFIG_X86_32 |
| 68 | return 0; |
| 69 | #else |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 70 | unsigned long addr, end; |
| 71 | unsigned offset; |
| 72 | end = (start + PMD_SIZE - 1) & PMD_MASK; |
Ingo Molnar | d951734 | 2009-02-20 23:32:28 +0100 | [diff] [blame] | 73 | if (end >= TASK_SIZE_MAX) |
| 74 | end = TASK_SIZE_MAX; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 75 | end -= len; |
| 76 | /* This loses some more bits than a modulo, but is cheaper */ |
| 77 | offset = get_random_int() & (PTRS_PER_PTE - 1); |
| 78 | addr = start + (offset << PAGE_SHIFT); |
| 79 | if (addr >= end) |
| 80 | addr = end; |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 81 | |
| 82 | /* |
| 83 | * page-align it here so that get_unmapped_area doesn't |
| 84 | * align it wrongfully again to the next page. addr can come in 4K |
| 85 | * unaligned here as a result of stack start randomization. |
| 86 | */ |
| 87 | addr = PAGE_ALIGN(addr); |
Michel Lespinasse | f9902472 | 2012-12-11 16:01:52 -0800 | [diff] [blame] | 88 | addr = align_vdso_addr(addr); |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 89 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 90 | return addr; |
Jan Beulich | d093601 | 2014-07-03 15:35:07 +0100 | [diff] [blame] | 91 | #endif |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 92 | } |
| 93 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 94 | static int map_vdso(const struct vdso_image *image, bool calculate_addr) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 95 | { |
| 96 | struct mm_struct *mm = current->mm; |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 97 | struct vm_area_struct *vma; |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 98 | unsigned long addr, text_start; |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 99 | int ret = 0; |
Andy Lutomirski | 1e844fb | 2014-05-19 15:58:31 -0700 | [diff] [blame] | 100 | static struct page *no_pages[] = {NULL}; |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 101 | static struct vm_special_mapping vvar_mapping = { |
| 102 | .name = "[vvar]", |
| 103 | .pages = no_pages, |
| 104 | }; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 105 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 106 | if (calculate_addr) { |
| 107 | addr = vdso_addr(current->mm->start_stack, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 108 | image->size - image->sym_vvar_start); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 109 | } else { |
| 110 | addr = 0; |
| 111 | } |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 112 | |
| 113 | down_write(&mm->mmap_sem); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 114 | |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 115 | addr = get_unmapped_area(NULL, addr, |
| 116 | image->size - image->sym_vvar_start, 0, 0); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 117 | if (IS_ERR_VALUE(addr)) { |
| 118 | ret = addr; |
| 119 | goto up_fail; |
| 120 | } |
| 121 | |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 122 | text_start = addr - image->sym_vvar_start; |
| 123 | current->mm->context.vdso = (void __user *)text_start; |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 124 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 125 | /* |
| 126 | * MAYWRITE to allow gdb to COW and set breakpoints |
| 127 | */ |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 128 | vma = _install_special_mapping(mm, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 129 | text_start, |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 130 | image->size, |
| 131 | VM_READ|VM_EXEC| |
| 132 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
| 133 | &image->text_mapping); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 134 | |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 135 | if (IS_ERR(vma)) { |
| 136 | ret = PTR_ERR(vma); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 137 | goto up_fail; |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 138 | } |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 139 | |
| 140 | vma = _install_special_mapping(mm, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 141 | addr, |
| 142 | -image->sym_vvar_start, |
Andy Lutomirski | ac37983 | 2014-07-25 16:27:01 -0700 | [diff] [blame] | 143 | VM_READ|VM_MAYREAD, |
Andy Lutomirski | a62c34b | 2014-05-19 15:58:33 -0700 | [diff] [blame] | 144 | &vvar_mapping); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 145 | |
| 146 | if (IS_ERR(vma)) { |
| 147 | ret = PTR_ERR(vma); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 148 | goto up_fail; |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 149 | } |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 150 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 151 | if (image->sym_vvar_page) |
| 152 | ret = remap_pfn_range(vma, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 153 | text_start + image->sym_vvar_page, |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 154 | __pa_symbol(&__vvar_page) >> PAGE_SHIFT, |
| 155 | PAGE_SIZE, |
| 156 | PAGE_READONLY); |
| 157 | |
| 158 | if (ret) |
| 159 | goto up_fail; |
| 160 | |
| 161 | #ifdef CONFIG_HPET_TIMER |
| 162 | if (hpet_address && image->sym_hpet_page) { |
| 163 | ret = io_remap_pfn_range(vma, |
Andy Lutomirski | e6577a7 | 2014-07-10 18:13:15 -0700 | [diff] [blame] | 164 | text_start + image->sym_hpet_page, |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 165 | hpet_address >> PAGE_SHIFT, |
| 166 | PAGE_SIZE, |
| 167 | pgprot_noncached(PAGE_READONLY)); |
| 168 | |
| 169 | if (ret) |
| 170 | goto up_fail; |
| 171 | } |
| 172 | #endif |
| 173 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 174 | up_fail: |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 175 | if (ret) |
| 176 | current->mm->context.vdso = NULL; |
| 177 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 178 | up_write(&mm->mmap_sem); |
| 179 | return ret; |
| 180 | } |
| 181 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 182 | #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) |
| 183 | static int load_vdso32(void) |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 184 | { |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 185 | int ret; |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 186 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 187 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ |
| 188 | return 0; |
| 189 | |
| 190 | ret = map_vdso(selected_vdso32, false); |
| 191 | if (ret) |
| 192 | return ret; |
| 193 | |
| 194 | if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN) |
| 195 | current_thread_info()->sysenter_return = |
| 196 | current->mm->context.vdso + |
| 197 | selected_vdso32->sym_VDSO32_SYSENTER_RETURN; |
| 198 | |
| 199 | return 0; |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 200 | } |
| 201 | #endif |
| 202 | |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 203 | #ifdef CONFIG_X86_64 |
| 204 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 205 | { |
| 206 | if (!vdso64_enabled) |
| 207 | return 0; |
| 208 | |
| 209 | return map_vdso(&vdso_image_64, true); |
| 210 | } |
| 211 | |
| 212 | #ifdef CONFIG_COMPAT |
| 213 | int compat_arch_setup_additional_pages(struct linux_binprm *bprm, |
| 214 | int uses_interp) |
| 215 | { |
| 216 | #ifdef CONFIG_X86_X32_ABI |
| 217 | if (test_thread_flag(TIF_X32)) { |
| 218 | if (!vdso64_enabled) |
| 219 | return 0; |
| 220 | |
| 221 | return map_vdso(&vdso_image_x32, true); |
| 222 | } |
| 223 | #endif |
| 224 | |
| 225 | return load_vdso32(); |
| 226 | } |
| 227 | #endif |
| 228 | #else |
| 229 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 230 | { |
| 231 | return load_vdso32(); |
| 232 | } |
| 233 | #endif |
| 234 | |
| 235 | #ifdef CONFIG_X86_64 |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 236 | static __init int vdso_setup(char *s) |
| 237 | { |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 238 | vdso64_enabled = simple_strtoul(s, NULL, 0); |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 239 | return 0; |
| 240 | } |
| 241 | __setup("vdso=", vdso_setup); |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 242 | #endif |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 243 | |
| 244 | #ifdef CONFIG_X86_64 |
| 245 | /* |
| 246 | * Assume __initcall executes before all user space. Hopefully kmod |
| 247 | * doesn't violate that. We'll find out if it does. |
| 248 | */ |
| 249 | static void vsyscall_set_cpu(int cpu) |
| 250 | { |
Andy Lutomirski | 2588015 | 2014-09-23 10:50:53 -0700 | [diff] [blame] | 251 | struct desc_struct d; |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 252 | unsigned long node = 0; |
| 253 | #ifdef CONFIG_NUMA |
| 254 | node = cpu_to_node(cpu); |
| 255 | #endif |
| 256 | if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) |
| 257 | write_rdtscp_aux((node << 12) | cpu); |
| 258 | |
| 259 | /* |
Andy Lutomirski | 2588015 | 2014-09-23 10:50:53 -0700 | [diff] [blame] | 260 | * Store cpu number in limit so that it can be loaded |
| 261 | * quickly in user space in vgetcpu. (12 bits for the CPU |
| 262 | * and 8 bits for the node) |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 263 | */ |
Andy Lutomirski | 2588015 | 2014-09-23 10:50:53 -0700 | [diff] [blame] | 264 | d = (struct desc_struct) { |
| 265 | .limit0 = cpu | ((node & 0xf) << 12), |
| 266 | .limit = node >> 4, |
Andy Lutomirski | 9c0080e | 2014-09-23 10:50:54 -0700 | [diff] [blame^] | 267 | .type = 5, /* RO data, expand down, accessed */ |
Andy Lutomirski | 2588015 | 2014-09-23 10:50:53 -0700 | [diff] [blame] | 268 | .dpl = 3, /* Visible to user code */ |
| 269 | .s = 1, /* Not a system segment */ |
| 270 | .p = 1, /* Present */ |
| 271 | }; |
Andy Lutomirski | d4f829d | 2014-09-23 10:50:52 -0700 | [diff] [blame] | 272 | |
| 273 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); |
| 274 | } |
| 275 | |
| 276 | static void cpu_vsyscall_init(void *arg) |
| 277 | { |
| 278 | /* preemption should be already off */ |
| 279 | vsyscall_set_cpu(raw_smp_processor_id()); |
| 280 | } |
| 281 | |
| 282 | static int |
| 283 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) |
| 284 | { |
| 285 | long cpu = (long)arg; |
| 286 | |
| 287 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
| 288 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); |
| 289 | |
| 290 | return NOTIFY_DONE; |
| 291 | } |
| 292 | |
| 293 | static int __init vsyscall_init(void) |
| 294 | { |
| 295 | cpu_notifier_register_begin(); |
| 296 | |
| 297 | on_each_cpu(cpu_vsyscall_init, NULL, 1); |
| 298 | /* notifier priority > KVM */ |
| 299 | __hotcpu_notifier(cpu_vsyscall_notifier, 30); |
| 300 | |
| 301 | cpu_notifier_register_done(); |
| 302 | |
| 303 | return 0; |
| 304 | } |
| 305 | __initcall(vsyscall_init); |
| 306 | #endif |