blob: eb50d7c1f161460b20c12ecaff189bf654e61270 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
Andi Kleen2aae9502007-07-21 17:10:01 +02002 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -07004 *
5 * This contains most of the x86 vDSO kernel-side code.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 */
7#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04008#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02009#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020011#include <linux/init.h>
12#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053013#include <linux/elf.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070014#include <linux/cpu.h>
Andy Lutomirskicc1e24f2015-12-10 19:20:21 -080015#include <asm/pvclock.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020016#include <asm/vgtod.h>
17#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010018#include <asm/vdso.h>
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -070019#include <asm/vvar.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040020#include <asm/page.h>
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070021#include <asm/hpet.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070022#include <asm/desc.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010023
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010024#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070025unsigned int __read_mostly vdso64_enabled = 1;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010026#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080027
Andy Lutomirski6f121e52014-05-05 12:19:34 -070028void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080029{
Andy Lutomirski6f121e52014-05-05 12:19:34 -070030 BUG_ON(image->size % PAGE_SIZE != 0);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080031
Andy Lutomirski6f121e52014-05-05 12:19:34 -070032 apply_alternatives((struct alt_instr *)(image->data + image->alt),
33 (struct alt_instr *)(image->data + image->alt +
34 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080035}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070036
Andi Kleen2aae9502007-07-21 17:10:01 +020037struct linux_binprm;
38
Andy Lutomirski394f56f2014-12-19 16:04:11 -080039/*
40 * Put the vdso above the (randomized) stack with another randomized
41 * offset. This way there is no hole in the middle of address space.
42 * To save memory make sure it is still in the same PTE as the stack
43 * top. This doesn't give that many random bits.
44 *
45 * Note that this algorithm is imperfect: the distribution of the vdso
46 * start address within a PMD is biased toward the end.
47 *
48 * Only used for the 64-bit and x32 vdsos.
49 */
Andi Kleen2aae9502007-07-21 17:10:01 +020050static unsigned long vdso_addr(unsigned long start, unsigned len)
51{
Jan Beulichd0936012014-07-03 15:35:07 +010052#ifdef CONFIG_X86_32
53 return 0;
54#else
Andi Kleen2aae9502007-07-21 17:10:01 +020055 unsigned long addr, end;
56 unsigned offset;
Andy Lutomirski394f56f2014-12-19 16:04:11 -080057
58 /*
59 * Round up the start address. It can start out unaligned as a result
60 * of stack start randomization.
61 */
62 start = PAGE_ALIGN(start);
63
64 /* Round the lowest possible end address up to a PMD boundary. */
65 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010066 if (end >= TASK_SIZE_MAX)
67 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020068 end -= len;
Andy Lutomirski394f56f2014-12-19 16:04:11 -080069
70 if (end > start) {
71 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
72 addr = start + (offset << PAGE_SHIFT);
73 } else {
74 addr = start;
75 }
Borislav Petkovdfb09f92011-08-05 15:15:08 +020076
77 /*
Andy Lutomirski394f56f2014-12-19 16:04:11 -080078 * Forcibly align the final address in case we have a hardware
79 * issue that requires alignment for performance reasons.
Borislav Petkovdfb09f92011-08-05 15:15:08 +020080 */
Michel Lespinassef99024722012-12-11 16:01:52 -080081 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +020082
Andi Kleen2aae9502007-07-21 17:10:01 +020083 return addr;
Jan Beulichd0936012014-07-03 15:35:07 +010084#endif
Andi Kleen2aae9502007-07-21 17:10:01 +020085}
86
Andy Lutomirski05ef76b2015-12-29 20:12:22 -080087static int vdso_fault(const struct vm_special_mapping *sm,
88 struct vm_area_struct *vma, struct vm_fault *vmf)
89{
90 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
91
92 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
93 return VM_FAULT_SIGBUS;
94
95 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
96 get_page(vmf->page);
97 return 0;
98}
99
100static const struct vm_special_mapping text_mapping = {
101 .name = "[vdso]",
102 .fault = vdso_fault,
103};
104
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700105static int map_vdso(const struct vdso_image *image, bool calculate_addr)
Andi Kleen2aae9502007-07-21 17:10:01 +0200106{
107 struct mm_struct *mm = current->mm;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700108 struct vm_area_struct *vma;
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700109 unsigned long addr, text_start;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700110 int ret = 0;
Andy Lutomirski1e844fb2014-05-19 15:58:31 -0700111 static struct page *no_pages[] = {NULL};
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700112 static struct vm_special_mapping vvar_mapping = {
113 .name = "[vvar]",
114 .pages = no_pages,
115 };
Andy Lutomirskidac16fb2015-12-10 19:20:20 -0800116 struct pvclock_vsyscall_time_info *pvti;
Andi Kleen2aae9502007-07-21 17:10:01 +0200117
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700118 if (calculate_addr) {
119 addr = vdso_addr(current->mm->start_stack,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700120 image->size - image->sym_vvar_start);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700121 } else {
122 addr = 0;
123 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200124
125 down_write(&mm->mmap_sem);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700126
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700127 addr = get_unmapped_area(NULL, addr,
128 image->size - image->sym_vvar_start, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200129 if (IS_ERR_VALUE(addr)) {
130 ret = addr;
131 goto up_fail;
132 }
133
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700134 text_start = addr - image->sym_vvar_start;
135 current->mm->context.vdso = (void __user *)text_start;
Andy Lutomirski352b78c2015-12-29 20:12:21 -0800136 current->mm->context.vdso_image = image;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200137
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700138 /*
139 * MAYWRITE to allow gdb to COW and set breakpoints
140 */
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700141 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700142 text_start,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700143 image->size,
144 VM_READ|VM_EXEC|
145 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
Andy Lutomirski05ef76b2015-12-29 20:12:22 -0800146 &text_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700147
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700148 if (IS_ERR(vma)) {
149 ret = PTR_ERR(vma);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700150 goto up_fail;
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700151 }
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700152
153 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700154 addr,
155 -image->sym_vvar_start,
Andy Lutomirskiac379832014-07-25 16:27:01 -0700156 VM_READ|VM_MAYREAD,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700157 &vvar_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700158
159 if (IS_ERR(vma)) {
160 ret = PTR_ERR(vma);
Andi Kleen2aae9502007-07-21 17:10:01 +0200161 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200162 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200163
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700164 if (image->sym_vvar_page)
165 ret = remap_pfn_range(vma,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700166 text_start + image->sym_vvar_page,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700167 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
168 PAGE_SIZE,
169 PAGE_READONLY);
170
171 if (ret)
172 goto up_fail;
173
174#ifdef CONFIG_HPET_TIMER
175 if (hpet_address && image->sym_hpet_page) {
176 ret = io_remap_pfn_range(vma,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700177 text_start + image->sym_hpet_page,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700178 hpet_address >> PAGE_SHIFT,
179 PAGE_SIZE,
180 pgprot_noncached(PAGE_READONLY));
181
182 if (ret)
183 goto up_fail;
184 }
185#endif
186
Andy Lutomirskidac16fb2015-12-10 19:20:20 -0800187 pvti = pvclock_pvti_cpu0_va();
188 if (pvti && image->sym_pvclock_page) {
189 ret = remap_pfn_range(vma,
190 text_start + image->sym_pvclock_page,
191 __pa(pvti) >> PAGE_SHIFT,
192 PAGE_SIZE,
193 PAGE_READONLY);
194
195 if (ret)
196 goto up_fail;
197 }
198
Andi Kleen2aae9502007-07-21 17:10:01 +0200199up_fail:
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700200 if (ret)
201 current->mm->context.vdso = NULL;
202
Andi Kleen2aae9502007-07-21 17:10:01 +0200203 up_write(&mm->mmap_sem);
204 return ret;
205}
206
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400207#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700208static int load_vdso32(void)
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800209{
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700210 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
211 return 0;
212
Andy Lutomirski0a6d1fa2015-10-05 17:47:56 -0700213 return map_vdso(&vdso_image_32, false);
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800214}
215#endif
216
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700217#ifdef CONFIG_X86_64
218int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
219{
220 if (!vdso64_enabled)
221 return 0;
222
223 return map_vdso(&vdso_image_64, true);
224}
225
226#ifdef CONFIG_COMPAT
227int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
228 int uses_interp)
229{
230#ifdef CONFIG_X86_X32_ABI
231 if (test_thread_flag(TIF_X32)) {
232 if (!vdso64_enabled)
233 return 0;
234
235 return map_vdso(&vdso_image_x32, true);
236 }
237#endif
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400238#ifdef CONFIG_IA32_EMULATION
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700239 return load_vdso32();
Brian Gerstab8b82ee62015-06-22 07:55:15 -0400240#else
241 return 0;
242#endif
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700243}
244#endif
245#else
246int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
247{
248 return load_vdso32();
249}
250#endif
251
252#ifdef CONFIG_X86_64
Andi Kleen2aae9502007-07-21 17:10:01 +0200253static __init int vdso_setup(char *s)
254{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700255 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200256 return 0;
257}
258__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100259#endif
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700260
261#ifdef CONFIG_X86_64
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700262static void vgetcpu_cpu_init(void *arg)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700263{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700264 int cpu = smp_processor_id();
Andrew Mortona92f1012014-11-01 21:18:26 +0100265 struct desc_struct d = { };
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700266 unsigned long node = 0;
267#ifdef CONFIG_NUMA
268 node = cpu_to_node(cpu);
269#endif
270 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
271 write_rdtscp_aux((node << 12) | cpu);
272
273 /*
Andy Lutomirski25880152014-09-23 10:50:53 -0700274 * Store cpu number in limit so that it can be loaded
275 * quickly in user space in vgetcpu. (12 bits for the CPU
276 * and 8 bits for the node)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700277 */
Andrew Mortona92f1012014-11-01 21:18:26 +0100278 d.limit0 = cpu | ((node & 0xf) << 12);
279 d.limit = node >> 4;
280 d.type = 5; /* RO data, expand down, accessed */
281 d.dpl = 3; /* Visible to user code */
282 d.s = 1; /* Not a system segment */
283 d.p = 1; /* Present */
284 d.d = 1; /* 32-bit */
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700285
286 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
287}
288
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700289static int
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700290vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700291{
292 long cpu = (long)arg;
293
294 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700295 smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700296
297 return NOTIFY_DONE;
298}
299
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700300static int __init init_vdso(void)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700301{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700302 init_vdso_image(&vdso_image_64);
303
304#ifdef CONFIG_X86_X32_ABI
305 init_vdso_image(&vdso_image_x32);
306#endif
307
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700308 cpu_notifier_register_begin();
309
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700310 on_each_cpu(vgetcpu_cpu_init, NULL, 1);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700311 /* notifier priority > KVM */
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700312 __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700313
314 cpu_notifier_register_done();
315
316 return 0;
317}
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700318subsys_initcall(init_vdso);
319#endif /* CONFIG_X86_64 */