blob: 153407c35b75bda37e2b5d2675d3abb6280cc9cd [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/err.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02008#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020010#include <linux/init.h>
11#include <linux/random.h>
Jaswinder Singh Rajput3fa89ca2009-04-12 20:37:25 +053012#include <linux/elf.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020013#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010016#include <asm/vdso.h>
Andy Lutomirskiaafade22011-07-21 15:47:10 -040017#include <asm/page.h>
Roland McGrath7f3646a2008-01-30 13:30:41 +010018
OGAWA Hirofumie6b0ede2008-05-12 15:43:38 +020019unsigned int __read_mostly vdso_enabled = 1;
Andi Kleen2aae9502007-07-21 17:10:01 +020020
Roland McGrath7f3646a2008-01-30 13:30:41 +010021extern char vdso_start[], vdso_end[];
Andi Kleen2aae9502007-07-21 17:10:01 +020022extern unsigned short vdso_sync_cpuid;
23
Andy Lutomirskiaafade22011-07-21 15:47:10 -040024extern struct page *vdso_pages[];
Jan Beulich369c9922008-07-18 13:37:53 +010025static unsigned vdso_size;
Andi Kleen2aae9502007-07-21 17:10:01 +020026
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040027static void __init patch_vdso(void *vdso, size_t len)
28{
29 Elf64_Ehdr *hdr = vdso;
30 Elf64_Shdr *sechdrs, *alt_sec = 0;
31 char *secstrings;
32 void *alt_data;
33 int i;
34
35 BUG_ON(len < sizeof(Elf64_Ehdr));
36 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
37
38 sechdrs = (void *)hdr + hdr->e_shoff;
39 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
40
41 for (i = 1; i < hdr->e_shnum; i++) {
42 Elf64_Shdr *shdr = &sechdrs[i];
43 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
44 alt_sec = shdr;
45 goto found;
46 }
47 }
48
49 /* If we get here, it's probably a bug. */
50 pr_warning("patch_vdso: .altinstructions not found\n");
51 return; /* nothing to patch */
52
53found:
54 alt_data = (void *)hdr + alt_sec->sh_offset;
55 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
56}
57
Andy Lutomirskiaafade22011-07-21 15:47:10 -040058static int __init init_vdso(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020059{
60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
61 int i;
Andi Kleen2aae9502007-07-21 17:10:01 +020062
Andy Lutomirski1b3f2a72011-07-13 09:24:11 -040063 patch_vdso(vdso_start, vdso_end - vdso_start);
64
Jan Beulich369c9922008-07-18 13:37:53 +010065 vdso_size = npages << PAGE_SHIFT;
Andy Lutomirskiaafade22011-07-21 15:47:10 -040066 for (i = 0; i < npages; i++)
67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
Andi Kleen2aae9502007-07-21 17:10:01 +020068
Andi Kleen2aae9502007-07-21 17:10:01 +020069 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +020070}
Andy Lutomirskiaafade22011-07-21 15:47:10 -040071subsys_initcall(init_vdso);
Andi Kleen2aae9502007-07-21 17:10:01 +020072
73struct linux_binprm;
74
75/* Put the vdso above the (randomized) stack with another randomized offset.
76 This way there is no hole in the middle of address space.
77 To save memory make sure it is still in the same PTE as the stack top.
78 This doesn't give that many random bits */
79static unsigned long vdso_addr(unsigned long start, unsigned len)
80{
81 unsigned long addr, end;
82 unsigned offset;
83 end = (start + PMD_SIZE - 1) & PMD_MASK;
Ingo Molnard9517342009-02-20 23:32:28 +010084 if (end >= TASK_SIZE_MAX)
85 end = TASK_SIZE_MAX;
Andi Kleen2aae9502007-07-21 17:10:01 +020086 end -= len;
87 /* This loses some more bits than a modulo, but is cheaper */
88 offset = get_random_int() & (PTRS_PER_PTE - 1);
89 addr = start + (offset << PAGE_SHIFT);
90 if (addr >= end)
91 addr = end;
Borislav Petkovdfb09f92011-08-05 15:15:08 +020092
93 /*
94 * page-align it here so that get_unmapped_area doesn't
95 * align it wrongfully again to the next page. addr can come in 4K
96 * unaligned here as a result of stack start randomization.
97 */
98 addr = PAGE_ALIGN(addr);
99 addr = align_addr(addr, NULL, ALIGN_VDSO);
100
Andi Kleen2aae9502007-07-21 17:10:01 +0200101 return addr;
102}
103
104/* Setup a VMA at program startup for the vsyscall page.
105 Not called for compat tasks */
Martin Schwidefskyfc5243d2008-12-25 13:38:35 +0100106int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
Andi Kleen2aae9502007-07-21 17:10:01 +0200107{
108 struct mm_struct *mm = current->mm;
109 unsigned long addr;
110 int ret;
Andi Kleen2aae9502007-07-21 17:10:01 +0200111
112 if (!vdso_enabled)
113 return 0;
114
115 down_write(&mm->mmap_sem);
Jan Beulich369c9922008-07-18 13:37:53 +0100116 addr = vdso_addr(mm->start_stack, vdso_size);
117 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
Andi Kleen2aae9502007-07-21 17:10:01 +0200118 if (IS_ERR_VALUE(addr)) {
119 ret = addr;
120 goto up_fail;
121 }
122
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200123 current->mm->context.vdso = (void *)addr;
124
Jan Beulich369c9922008-07-18 13:37:53 +0100125 ret = install_special_mapping(mm, addr, vdso_size,
Andi Kleen2aae9502007-07-21 17:10:01 +0200126 VM_READ|VM_EXEC|
127 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
128 VM_ALWAYSDUMP,
129 vdso_pages);
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200130 if (ret) {
131 current->mm->context.vdso = NULL;
Andi Kleen2aae9502007-07-21 17:10:01 +0200132 goto up_fail;
Peter Zijlstraf7b6eb32009-06-05 14:04:51 +0200133 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200134
Andi Kleen2aae9502007-07-21 17:10:01 +0200135up_fail:
136 up_write(&mm->mmap_sem);
137 return ret;
138}
139
140static __init int vdso_setup(char *s)
141{
142 vdso_enabled = simple_strtoul(s, NULL, 0);
143 return 0;
144}
145__setup("vdso=", vdso_setup);