blob: 1c9f750c38592c7278c95d7f8dbe6e1a88835c0c [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
Andi Kleen2aae9502007-07-21 17:10:01 +02002 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -07004 *
5 * This contains most of the x86 vDSO kernel-side code.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 */
7#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04008#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02009#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020011#include <linux/init.h>
12#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053013#include <linux/elf.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070014#include <linux/cpu.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020015#include <asm/vgtod.h>
16#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010017#include <asm/vdso.h>
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -070018#include <asm/vvar.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040019#include <asm/page.h>
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070020#include <asm/hpet.h>
Andy Lutomirskid4f829d2014-09-23 10:50:52 -070021#include <asm/desc.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010022
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010023#if defined(CONFIG_X86_64)
Andy Lutomirski3d7ee962014-05-05 12:19:32 -070024unsigned int __read_mostly vdso64_enabled = 1;
Andy Lutomirskib4b541a2014-03-17 23:22:08 +010025#endif
H. J. Lu1a21d4e2012-02-19 11:38:06 -080026
Andy Lutomirski6f121e52014-05-05 12:19:34 -070027void __init init_vdso_image(const struct vdso_image *image)
H. J. Lu1a21d4e2012-02-19 11:38:06 -080028{
H. J. Lu1a21d4e2012-02-19 11:38:06 -080029 int i;
Andy Lutomirski6f121e52014-05-05 12:19:34 -070030 int npages = (image->size) / PAGE_SIZE;
H. J. Lu1a21d4e2012-02-19 11:38:06 -080031
Andy Lutomirski6f121e52014-05-05 12:19:34 -070032 BUG_ON(image->size % PAGE_SIZE != 0);
33 for (i = 0; i < npages; i++)
Andy Lutomirskia62c34b2014-05-19 15:58:33 -070034 image->text_mapping.pages[i] =
35 virt_to_page(image->data + i*PAGE_SIZE);
H. J. Lu1a21d4e2012-02-19 11:38:06 -080036
Andy Lutomirski6f121e52014-05-05 12:19:34 -070037 apply_alternatives((struct alt_instr *)(image->data + image->alt),
38 (struct alt_instr *)(image->data + image->alt +
39 image->alt_len));
H. J. Lu1a21d4e2012-02-19 11:38:06 -080040}
Andy Lutomirski6f121e52014-05-05 12:19:34 -070041
Andi Kleen2aae9502007-07-21 17:10:01 +020042struct linux_binprm;
43
Andy Lutomirski394f56f2014-12-19 16:04:11 -080044/*
45 * Put the vdso above the (randomized) stack with another randomized
46 * offset. This way there is no hole in the middle of address space.
47 * To save memory make sure it is still in the same PTE as the stack
48 * top. This doesn't give that many random bits.
49 *
50 * Note that this algorithm is imperfect: the distribution of the vdso
51 * start address within a PMD is biased toward the end.
52 *
53 * Only used for the 64-bit and x32 vdsos.
54 */
Andi Kleen2aae9502007-07-21 17:10:01 +020055static unsigned long vdso_addr(unsigned long start, unsigned len)
56{
Jan Beulichd0936012014-07-03 15:35:07 +010057#ifdef CONFIG_X86_32
58 return 0;
59#else
Andi Kleen2aae9502007-07-21 17:10:01 +020060 unsigned long addr, end;
61 unsigned offset;
Andy Lutomirski394f56f2014-12-19 16:04:11 -080062
63 /*
64 * Round up the start address. It can start out unaligned as a result
65 * of stack start randomization.
66 */
67 start = PAGE_ALIGN(start);
68
69 /* Round the lowest possible end address up to a PMD boundary. */
70 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010071 if (end >= TASK_SIZE_MAX)
72 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020073 end -= len;
Andy Lutomirski394f56f2014-12-19 16:04:11 -080074
75 if (end > start) {
76 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
77 addr = start + (offset << PAGE_SHIFT);
78 } else {
79 addr = start;
80 }
Borislav Petkovdfb09f92011-08-05 15:15:08 +020081
82 /*
Andy Lutomirski394f56f2014-12-19 16:04:11 -080083 * Forcibly align the final address in case we have a hardware
84 * issue that requires alignment for performance reasons.
Borislav Petkovdfb09f92011-08-05 15:15:08 +020085 */
Michel Lespinassef99024722012-12-11 16:01:52 -080086 addr = align_vdso_addr(addr);
Borislav Petkovdfb09f92011-08-05 15:15:08 +020087
Andi Kleen2aae9502007-07-21 17:10:01 +020088 return addr;
Jan Beulichd0936012014-07-03 15:35:07 +010089#endif
Andi Kleen2aae9502007-07-21 17:10:01 +020090}
91
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070092static int map_vdso(const struct vdso_image *image, bool calculate_addr)
Andi Kleen2aae9502007-07-21 17:10:01 +020093{
94 struct mm_struct *mm = current->mm;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070095 struct vm_area_struct *vma;
Andy Lutomirskie6577a72014-07-10 18:13:15 -070096 unsigned long addr, text_start;
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -070097 int ret = 0;
Andy Lutomirski1e844fb2014-05-19 15:58:31 -070098 static struct page *no_pages[] = {NULL};
Andy Lutomirskia62c34b2014-05-19 15:58:33 -070099 static struct vm_special_mapping vvar_mapping = {
100 .name = "[vvar]",
101 .pages = no_pages,
102 };
Andi Kleen2aae9502007-07-21 17:10:01 +0200103
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700104 if (calculate_addr) {
105 addr = vdso_addr(current->mm->start_stack,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700106 image->size - image->sym_vvar_start);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700107 } else {
108 addr = 0;
109 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200110
111 down_write(&mm->mmap_sem);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700112
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700113 addr = get_unmapped_area(NULL, addr,
114 image->size - image->sym_vvar_start, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200115 if (IS_ERR_VALUE(addr)) {
116 ret = addr;
117 goto up_fail;
118 }
119
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700120 text_start = addr - image->sym_vvar_start;
121 current->mm->context.vdso = (void __user *)text_start;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200122
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700123 /*
124 * MAYWRITE to allow gdb to COW and set breakpoints
125 */
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700126 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700127 text_start,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700128 image->size,
129 VM_READ|VM_EXEC|
130 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
131 &image->text_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700132
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700133 if (IS_ERR(vma)) {
134 ret = PTR_ERR(vma);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700135 goto up_fail;
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700136 }
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700137
138 vma = _install_special_mapping(mm,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700139 addr,
140 -image->sym_vvar_start,
Andy Lutomirskiac379832014-07-25 16:27:01 -0700141 VM_READ|VM_MAYREAD,
Andy Lutomirskia62c34b2014-05-19 15:58:33 -0700142 &vvar_mapping);
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700143
144 if (IS_ERR(vma)) {
145 ret = PTR_ERR(vma);
Andi Kleen2aae9502007-07-21 17:10:01 +0200146 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200147 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200148
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700149 if (image->sym_vvar_page)
150 ret = remap_pfn_range(vma,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700151 text_start + image->sym_vvar_page,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700152 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
153 PAGE_SIZE,
154 PAGE_READONLY);
155
156 if (ret)
157 goto up_fail;
158
159#ifdef CONFIG_HPET_TIMER
160 if (hpet_address && image->sym_hpet_page) {
161 ret = io_remap_pfn_range(vma,
Andy Lutomirskie6577a72014-07-10 18:13:15 -0700162 text_start + image->sym_hpet_page,
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700163 hpet_address >> PAGE_SHIFT,
164 PAGE_SIZE,
165 pgprot_noncached(PAGE_READONLY));
166
167 if (ret)
168 goto up_fail;
169 }
170#endif
171
Andi Kleen2aae9502007-07-21 17:10:01 +0200172up_fail:
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700173 if (ret)
174 current->mm->context.vdso = NULL;
175
Andi Kleen2aae9502007-07-21 17:10:01 +0200176 up_write(&mm->mmap_sem);
177 return ret;
178}
179
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700180#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
181static int load_vdso32(void)
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800182{
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700183 int ret;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800184
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700185 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
186 return 0;
187
188 ret = map_vdso(selected_vdso32, false);
189 if (ret)
190 return ret;
191
192 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
193 current_thread_info()->sysenter_return =
194 current->mm->context.vdso +
195 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
196
197 return 0;
H. J. Lu1a21d4e2012-02-19 11:38:06 -0800198}
199#endif
200
Andy Lutomirski18d0a6f2014-05-05 12:19:35 -0700201#ifdef CONFIG_X86_64
202int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
203{
204 if (!vdso64_enabled)
205 return 0;
206
207 return map_vdso(&vdso_image_64, true);
208}
209
210#ifdef CONFIG_COMPAT
211int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
212 int uses_interp)
213{
214#ifdef CONFIG_X86_X32_ABI
215 if (test_thread_flag(TIF_X32)) {
216 if (!vdso64_enabled)
217 return 0;
218
219 return map_vdso(&vdso_image_x32, true);
220 }
221#endif
222
223 return load_vdso32();
224}
225#endif
226#else
227int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
228{
229 return load_vdso32();
230}
231#endif
232
233#ifdef CONFIG_X86_64
Andi Kleen2aae9502007-07-21 17:10:01 +0200234static __init int vdso_setup(char *s)
235{
Andy Lutomirski3d7ee962014-05-05 12:19:32 -0700236 vdso64_enabled = simple_strtoul(s, NULL, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200237 return 0;
238}
239__setup("vdso=", vdso_setup);
Andy Lutomirskib4b541a2014-03-17 23:22:08 +0100240#endif
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700241
242#ifdef CONFIG_X86_64
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700243static void vgetcpu_cpu_init(void *arg)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700244{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700245 int cpu = smp_processor_id();
Andrew Mortona92f1012014-11-01 21:18:26 +0100246 struct desc_struct d = { };
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700247 unsigned long node = 0;
248#ifdef CONFIG_NUMA
249 node = cpu_to_node(cpu);
250#endif
251 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
252 write_rdtscp_aux((node << 12) | cpu);
253
254 /*
Andy Lutomirski25880152014-09-23 10:50:53 -0700255 * Store cpu number in limit so that it can be loaded
256 * quickly in user space in vgetcpu. (12 bits for the CPU
257 * and 8 bits for the node)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700258 */
Andrew Mortona92f1012014-11-01 21:18:26 +0100259 d.limit0 = cpu | ((node & 0xf) << 12);
260 d.limit = node >> 4;
261 d.type = 5; /* RO data, expand down, accessed */
262 d.dpl = 3; /* Visible to user code */
263 d.s = 1; /* Not a system segment */
264 d.p = 1; /* Present */
265 d.d = 1; /* 32-bit */
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700266
267 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
268}
269
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700270static int
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700271vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700272{
273 long cpu = (long)arg;
274
275 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700276 smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700277
278 return NOTIFY_DONE;
279}
280
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700281static int __init init_vdso(void)
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700282{
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700283 init_vdso_image(&vdso_image_64);
284
285#ifdef CONFIG_X86_X32_ABI
286 init_vdso_image(&vdso_image_x32);
287#endif
288
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700289 cpu_notifier_register_begin();
290
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700291 on_each_cpu(vgetcpu_cpu_init, NULL, 1);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700292 /* notifier priority > KVM */
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700293 __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
Andy Lutomirskid4f829d2014-09-23 10:50:52 -0700294
295 cpu_notifier_register_done();
296
297 return 0;
298}
Andy Lutomirski1c0c1b92014-09-23 10:50:57 -0700299subsys_initcall(init_vdso);
300#endif /* CONFIG_X86_64 */