blob: f0b3171842f22e75796d2aa68a7713f30af2f111 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/stat.h>
16#include <linux/time.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/a.out.h>
20#include <linux/errno.h>
21#include <linux/signal.h>
22#include <linux/binfmts.h>
23#include <linux/string.h>
24#include <linux/file.h>
25#include <linux/fcntl.h>
26#include <linux/ptrace.h>
27#include <linux/slab.h>
28#include <linux/shm.h>
29#include <linux/personality.h>
30#include <linux/elfcore.h>
31#include <linux/init.h>
32#include <linux/highuid.h>
33#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/compiler.h>
35#include <linux/highmem.h>
36#include <linux/pagemap.h>
37#include <linux/security.h>
38#include <linux/syscalls.h>
39#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070040#include <linux/elf.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070041#include <linux/utsname.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/uaccess.h>
43#include <asm/param.h>
44#include <asm/page.h>
45
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070046static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47static int load_elf_library(struct file *);
Andrew Mortond4e3cc32007-07-21 04:37:32 -070048static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
51 * If we don't support core dumping, then supply a NULL so we
52 * don't even try.
53 */
Matt Mackall708e9a72006-01-08 01:05:25 -080054#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
Neil Horman7dc0b222007-10-16 23:26:34 -070055static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#else
57#define elf_core_dump NULL
58#endif
59
60#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070061#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070063#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#endif
65
66#ifndef ELF_CORE_EFLAGS
67#define ELF_CORE_EFLAGS 0
68#endif
69
70#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
73
74static struct linux_binfmt elf_format = {
75 .module = THIS_MODULE,
76 .load_binary = load_elf_binary,
77 .load_shlib = load_elf_library,
78 .core_dump = elf_core_dump,
Andi Kleen9fbbd4d2007-02-13 13:26:26 +010079 .min_coredump = ELF_EXEC_PAGESIZE,
80 .hasvdso = 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070081};
82
Andrew Mortond4e3cc32007-07-21 04:37:32 -070083#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85static int set_brk(unsigned long start, unsigned long end)
86{
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
89 if (end > start) {
90 unsigned long addr;
91 down_write(&current->mm->mmap_sem);
92 addr = do_brk(start, end - start);
93 up_write(&current->mm->mmap_sem);
94 if (BAD_ADDR(addr))
95 return addr;
96 }
97 current->mm->start_brk = current->mm->brk = end;
98 return 0;
99}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700104 be in memory
105 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106static int padzero(unsigned long elf_bss)
107{
108 unsigned long nbyte;
109
110 nbyte = ELF_PAGEOFFSET(elf_bss);
111 if (nbyte) {
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 if (clear_user((void __user *) elf_bss, nbyte))
114 return -EFAULT;
115 }
116 return 0;
117}
118
119/* Let's use some macros to make this stack manipulation a litle clearer */
120#ifdef CONFIG_STACK_GROWSUP
121#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122#define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700124#define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
126 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#else
128#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129#define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132#endif
133
134static int
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700135create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 int interp_aout, unsigned long load_addr,
137 unsigned long interp_load_addr)
138{
139 unsigned long p = bprm->p;
140 int argc = bprm->argc;
141 int envc = bprm->envc;
142 elf_addr_t __user *argv;
143 elf_addr_t __user *envp;
144 elf_addr_t __user *sp;
145 elf_addr_t __user *u_platform;
146 const char *k_platform = ELF_PLATFORM;
147 int items;
148 elf_addr_t *elf_info;
149 int ei_index = 0;
150 struct task_struct *tsk = current;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700151 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700154 * In some cases (e.g. Hyper-Threading), we want to avoid L1
155 * evictions by the processes running on the same package. One
156 * thing we can do is to shuffle the initial stack for them.
157 */
158
159 p = arch_align_stack(p);
160
161 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * If this architecture has a platform capability string, copy it
163 * to userspace. In some cases (Sparc), this info is impossible
164 * for userspace to get any other way, in others (i386) it is
165 * merely difficult.
166 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 u_platform = NULL;
168 if (k_platform) {
169 size_t len = strlen(k_platform) + 1;
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
172 if (__copy_to_user(u_platform, k_platform, len))
173 return -EFAULT;
174 }
175
176 /* Create the ELF interpreter info */
Jesper Juhl785d5572006-06-23 02:05:35 -0700177 elf_info = (elf_addr_t *)current->mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700178 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700180 do { \
Jesper Juhl785d5572006-06-23 02:05:35 -0700181 elf_info[ei_index++] = id; \
182 elf_info[ei_index++] = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700183 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185#ifdef ARCH_DLINFO
186 /*
187 * ARCH_DLINFO must come first so PPC can do its special alignment of
188 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700189 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
190 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 */
192 ARCH_DLINFO;
193#endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700198 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
Jesper Juhl785d5572006-06-23 02:05:35 -0700203 NEW_AUX_ENT(AT_UID, tsk->uid);
204 NEW_AUX_ENT(AT_EUID, tsk->euid);
205 NEW_AUX_ENT(AT_GID, tsk->gid);
206 NEW_AUX_ENT(AT_EGID, tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700209 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700210 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700213 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 }
215#undef NEW_AUX_ENT
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info[ei_index], 0,
218 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
219
220 /* And advance past the AT_NULL entry. */
221 ei_index += 2;
222
223 sp = STACK_ADD(p, ei_index);
224
225 items = (argc + 1) + (envc + 1);
226 if (interp_aout) {
227 items += 3; /* a.out interpreters require argv & envp too */
228 } else {
229 items += 1; /* ELF interpreters only put argc on the stack */
230 }
231 bprm->p = STACK_ROUND(sp, items);
232
233 /* Point sp at the lowest address on the stack */
234#ifdef CONFIG_STACK_GROWSUP
235 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700236 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237#else
238 sp = (elf_addr_t __user *)bprm->p;
239#endif
240
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700241
242 /*
243 * Grow the stack manually; some architectures have a limit on how
244 * far ahead a user-space access may be in order to grow the stack.
245 */
246 vma = find_extend_vma(current->mm, bprm->p);
247 if (!vma)
248 return -EFAULT;
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
251 if (__put_user(argc, sp++))
252 return -EFAULT;
253 if (interp_aout) {
254 argv = sp + 2;
255 envp = argv + argc + 1;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800256 if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
257 __put_user((elf_addr_t)(unsigned long)envp, sp++))
258 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 } else {
260 argv = sp;
261 envp = argv + argc + 1;
262 }
263
264 /* Populate argv and envp */
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -0700265 p = current->mm->arg_end = current->mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 while (argc-- > 0) {
267 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800268 if (__put_user((elf_addr_t)p, argv++))
269 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700270 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
271 if (!len || len > MAX_ARG_STRLEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 return 0;
273 p += len;
274 }
275 if (__put_user(0, argv))
276 return -EFAULT;
277 current->mm->arg_end = current->mm->env_start = p;
278 while (envc-- > 0) {
279 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800280 if (__put_user((elf_addr_t)p, envp++))
281 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700282 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
283 if (!len || len > MAX_ARG_STRLEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 return 0;
285 p += len;
286 }
287 if (__put_user(0, envp))
288 return -EFAULT;
289 current->mm->env_end = p;
290
291 /* Put the elf_info on the stack in the right place. */
292 sp = (elf_addr_t __user *)envp + 1;
293 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
294 return -EFAULT;
295 return 0;
296}
297
298#ifndef elf_map
299
300static unsigned long elf_map(struct file *filep, unsigned long addr,
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700301 struct elf_phdr *eppnt, int prot, int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
303 unsigned long map_addr;
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700304 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700305
306 down_write(&current->mm->mmap_sem);
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700307 /* mmap() will return -EINVAL if given a zero size, but a
308 * segment with zero filesize is perfectly valid */
309 if (eppnt->p_filesz + pageoffset)
310 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
311 eppnt->p_filesz + pageoffset, prot, type,
312 eppnt->p_offset - pageoffset);
313 else
314 map_addr = ELF_PAGESTART(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 up_write(&current->mm->mmap_sem);
316 return(map_addr);
317}
318
319#endif /* !elf_map */
320
321/* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have
324 an ELF header */
325
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700326static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700327 struct file *interpreter, unsigned long *interp_load_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 struct elf_phdr *elf_phdata;
330 struct elf_phdr *eppnt;
331 unsigned long load_addr = 0;
332 int load_addr_set = 0;
333 unsigned long last_bss = 0, elf_bss = 0;
334 unsigned long error = ~0UL;
335 int retval, i, size;
336
337 /* First of all, some simple consistency checks */
338 if (interp_elf_ex->e_type != ET_EXEC &&
339 interp_elf_ex->e_type != ET_DYN)
340 goto out;
341 if (!elf_check_arch(interp_elf_ex))
342 goto out;
343 if (!interpreter->f_op || !interpreter->f_op->mmap)
344 goto out;
345
346 /*
347 * If the size of this structure has changed, then punt, since
348 * we will be doing the wrong thing.
349 */
350 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
351 goto out;
352 if (interp_elf_ex->e_phnum < 1 ||
353 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
354 goto out;
355
356 /* Now read in all of the header information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
358 if (size > ELF_MIN_ALIGN)
359 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700360 elf_phdata = kmalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 if (!elf_phdata)
362 goto out;
363
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700364 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
365 (char *)elf_phdata,size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 error = -EIO;
367 if (retval != size) {
368 if (retval < 0)
369 error = retval;
370 goto out_close;
371 }
372
373 eppnt = elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700374 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
375 if (eppnt->p_type == PT_LOAD) {
376 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
377 int elf_prot = 0;
378 unsigned long vaddr = 0;
379 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700381 if (eppnt->p_flags & PF_R)
382 elf_prot = PROT_READ;
383 if (eppnt->p_flags & PF_W)
384 elf_prot |= PROT_WRITE;
385 if (eppnt->p_flags & PF_X)
386 elf_prot |= PROT_EXEC;
387 vaddr = eppnt->p_vaddr;
388 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
389 elf_type |= MAP_FIXED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700391 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700392 eppnt, elf_prot, elf_type);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700393 error = map_addr;
394 if (BAD_ADDR(map_addr))
395 goto out_close;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700397 if (!load_addr_set &&
398 interp_elf_ex->e_type == ET_DYN) {
399 load_addr = map_addr - ELF_PAGESTART(vaddr);
400 load_addr_set = 1;
401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700403 /*
404 * Check to see if the section's size will overflow the
405 * allowed task size. Note that p_filesz must always be
406 * <= p_memsize so it's only necessary to check p_memsz.
407 */
408 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700409 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700410 eppnt->p_filesz > eppnt->p_memsz ||
411 eppnt->p_memsz > TASK_SIZE ||
412 TASK_SIZE - eppnt->p_memsz < k) {
413 error = -ENOMEM;
414 goto out_close;
415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700417 /*
418 * Find the end of the file mapping for this phdr, and
419 * keep track of the largest address we see for this.
420 */
421 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
422 if (k > elf_bss)
423 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700425 /*
426 * Do the same thing for the memory mapping - between
427 * elf_bss and last_bss is the bss section.
428 */
429 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
430 if (k > last_bss)
431 last_bss = k;
432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 }
434
435 /*
436 * Now fill out the bss section. First pad the last page up
437 * to the page boundary, and then perform a mmap to make sure
438 * that there are zero-mapped pages up to and including the
439 * last bss page.
440 */
441 if (padzero(elf_bss)) {
442 error = -EFAULT;
443 goto out_close;
444 }
445
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700446 /* What we have mapped so far */
447 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 /* Map the last of the bss segment */
450 if (last_bss > elf_bss) {
451 down_write(&current->mm->mmap_sem);
452 error = do_brk(elf_bss, last_bss - elf_bss);
453 up_write(&current->mm->mmap_sem);
454 if (BAD_ADDR(error))
455 goto out_close;
456 }
457
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700458 *interp_load_addr = load_addr;
459 error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461out_close:
462 kfree(elf_phdata);
463out:
464 return error;
465}
466
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700467static unsigned long load_aout_interp(struct exec *interp_ex,
468 struct file *interpreter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469{
470 unsigned long text_data, elf_entry = ~0UL;
471 char __user * addr;
472 loff_t offset;
473
474 current->mm->end_code = interp_ex->a_text;
475 text_data = interp_ex->a_text + interp_ex->a_data;
476 current->mm->end_data = text_data;
477 current->mm->brk = interp_ex->a_bss + text_data;
478
479 switch (N_MAGIC(*interp_ex)) {
480 case OMAGIC:
481 offset = 32;
482 addr = (char __user *)0;
483 break;
484 case ZMAGIC:
485 case QMAGIC:
486 offset = N_TXTOFF(*interp_ex);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700487 addr = (char __user *)N_TXTADDR(*interp_ex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 break;
489 default:
490 goto out;
491 }
492
493 down_write(&current->mm->mmap_sem);
494 do_brk(0, text_data);
495 up_write(&current->mm->mmap_sem);
496 if (!interpreter->f_op || !interpreter->f_op->read)
497 goto out;
498 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
499 goto out;
500 flush_icache_range((unsigned long)addr,
501 (unsigned long)addr + text_data);
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 down_write(&current->mm->mmap_sem);
504 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
505 interp_ex->a_bss);
506 up_write(&current->mm->mmap_sem);
507 elf_entry = interp_ex->a_entry;
508
509out:
510 return elf_entry;
511}
512
513/*
514 * These are the functions used to load ELF style executables and shared
515 * libraries. There is no binary dependent code anywhere else.
516 */
517
518#define INTERPRETER_NONE 0
519#define INTERPRETER_AOUT 1
520#define INTERPRETER_ELF 2
521
Andi Kleen913bd902006-03-25 16:29:09 +0100522#ifndef STACK_RND_MASK
James Bottomleyd1cabd62007-03-16 13:38:35 -0800523#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
Andi Kleen913bd902006-03-25 16:29:09 +0100524#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526static unsigned long randomize_stack_top(unsigned long stack_top)
527{
528 unsigned int random_variable = 0;
529
Andi Kleenc16b63e02006-09-26 10:52:28 +0200530 if ((current->flags & PF_RANDOMIZE) &&
531 !(current->personality & ADDR_NO_RANDOMIZE)) {
Andi Kleen913bd902006-03-25 16:29:09 +0100532 random_variable = get_random_int() & STACK_RND_MASK;
533 random_variable <<= PAGE_SHIFT;
534 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535#ifdef CONFIG_STACK_GROWSUP
Andi Kleen913bd902006-03-25 16:29:09 +0100536 return PAGE_ALIGN(stack_top) + random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537#else
Andi Kleen913bd902006-03-25 16:29:09 +0100538 return PAGE_ALIGN(stack_top) - random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539#endif
540}
541
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700542static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 struct file *interpreter = NULL; /* to shut gcc up */
545 unsigned long load_addr = 0, load_bias = 0;
546 int load_addr_set = 0;
547 char * elf_interpreter = NULL;
548 unsigned int interpreter_type = INTERPRETER_NONE;
549 unsigned char ibcs2_interpreter = 0;
550 unsigned long error;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700551 struct elf_phdr *elf_ppnt, *elf_phdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 unsigned long elf_bss, elf_brk;
553 int elf_exec_fileno;
554 int retval, i;
555 unsigned int size;
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700556 unsigned long elf_entry, interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 unsigned long start_code, end_code, start_data, end_data;
558 unsigned long reloc_func_desc = 0;
559 char passed_fileno[6];
560 struct files_struct *files;
David Rientjes8de61e62006-12-06 20:40:16 -0800561 int executable_stack = EXSTACK_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 unsigned long def_flags = 0;
563 struct {
564 struct elfhdr elf_ex;
565 struct elfhdr interp_elf_ex;
566 struct exec interp_ex;
567 } *loc;
568
569 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
570 if (!loc) {
571 retval = -ENOMEM;
572 goto out_ret;
573 }
574
575 /* Get the exec-header */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700576 loc->elf_ex = *((struct elfhdr *)bprm->buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 retval = -ENOEXEC;
579 /* First of all, some simple consistency checks */
580 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
581 goto out;
582
583 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
584 goto out;
585 if (!elf_check_arch(&loc->elf_ex))
586 goto out;
587 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
588 goto out;
589
590 /* Now read in all of the header information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
592 goto out;
593 if (loc->elf_ex.e_phnum < 1 ||
594 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
595 goto out;
596 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
597 retval = -ENOMEM;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700598 elf_phdata = kmalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 if (!elf_phdata)
600 goto out;
601
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700602 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
603 (char *)elf_phdata, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (retval != size) {
605 if (retval >= 0)
606 retval = -EIO;
607 goto out_free_ph;
608 }
609
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700610 files = current->files; /* Refcounted so ok */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 retval = unshare_files();
612 if (retval < 0)
613 goto out_free_ph;
614 if (files == current->files) {
615 put_files_struct(files);
616 files = NULL;
617 }
618
619 /* exec will make our files private anyway, but for the a.out
620 loader stuff we need to do it earlier */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 retval = get_unused_fd();
622 if (retval < 0)
623 goto out_free_fh;
624 get_file(bprm->file);
625 fd_install(elf_exec_fileno = retval, bprm->file);
626
627 elf_ppnt = elf_phdata;
628 elf_bss = 0;
629 elf_brk = 0;
630
631 start_code = ~0UL;
632 end_code = 0;
633 start_data = 0;
634 end_data = 0;
635
636 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
637 if (elf_ppnt->p_type == PT_INTERP) {
638 /* This is the program interpreter used for
639 * shared libraries - for now assume that this
640 * is an a.out format binary
641 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 retval = -ENOEXEC;
643 if (elf_ppnt->p_filesz > PATH_MAX ||
644 elf_ppnt->p_filesz < 2)
645 goto out_free_file;
646
647 retval = -ENOMEM;
Jesper Juhl792db3a2006-01-09 20:54:45 -0800648 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700649 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if (!elf_interpreter)
651 goto out_free_file;
652
653 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700654 elf_interpreter,
655 elf_ppnt->p_filesz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 if (retval != elf_ppnt->p_filesz) {
657 if (retval >= 0)
658 retval = -EIO;
659 goto out_free_interp;
660 }
661 /* make sure path is NULL terminated */
662 retval = -ENOEXEC;
663 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
664 goto out_free_interp;
665
666 /* If the program interpreter is one of these two,
667 * then assume an iBCS2 image. Otherwise assume
668 * a native linux image.
669 */
670 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
671 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
672 ibcs2_interpreter = 1;
673
674 /*
675 * The early SET_PERSONALITY here is so that the lookup
676 * for the interpreter happens in the namespace of the
677 * to-be-execed image. SET_PERSONALITY can select an
678 * alternate root.
679 *
680 * However, SET_PERSONALITY is NOT allowed to switch
681 * this task into the new images's memory mapping
682 * policy - that is, TASK_SIZE must still evaluate to
683 * that which is appropriate to the execing application.
684 * This is because exit_mmap() needs to have TASK_SIZE
685 * evaluate to the size of the old image.
686 *
687 * So if (say) a 64-bit application is execing a 32-bit
688 * application it is the architecture's responsibility
689 * to defer changing the value of TASK_SIZE until the
690 * switch really is going to happen - do this in
691 * flush_thread(). - akpm
692 */
693 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
694
695 interpreter = open_exec(elf_interpreter);
696 retval = PTR_ERR(interpreter);
697 if (IS_ERR(interpreter))
698 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800699
700 /*
701 * If the binary is not readable then enforce
702 * mm->dumpable = 0 regardless of the interpreter's
703 * permissions.
704 */
705 if (file_permission(interpreter, MAY_READ) < 0)
706 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
707
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700708 retval = kernel_read(interpreter, 0, bprm->buf,
709 BINPRM_BUF_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 if (retval != BINPRM_BUF_SIZE) {
711 if (retval >= 0)
712 retval = -EIO;
713 goto out_free_dentry;
714 }
715
716 /* Get the exec headers */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700717 loc->interp_ex = *((struct exec *)bprm->buf);
718 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 break;
720 }
721 elf_ppnt++;
722 }
723
724 elf_ppnt = elf_phdata;
725 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
726 if (elf_ppnt->p_type == PT_GNU_STACK) {
727 if (elf_ppnt->p_flags & PF_X)
728 executable_stack = EXSTACK_ENABLE_X;
729 else
730 executable_stack = EXSTACK_DISABLE_X;
731 break;
732 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 /* Some simple consistency checks for the interpreter */
735 if (elf_interpreter) {
Andi Kleen8e9073e2007-10-16 23:26:48 -0700736 static int warn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
738
739 /* Now figure out which format our binary is */
740 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
741 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
742 (N_MAGIC(loc->interp_ex) != QMAGIC))
743 interpreter_type = INTERPRETER_ELF;
744
745 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
746 interpreter_type &= ~INTERPRETER_ELF;
747
Andi Kleen8e9073e2007-10-16 23:26:48 -0700748 if (interpreter_type == INTERPRETER_AOUT && warn < 10) {
749 printk(KERN_WARNING "a.out ELF interpreter %s is "
750 "deprecated and will not be supported "
751 "after Linux 2.6.25\n", elf_interpreter);
752 warn++;
753 }
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 retval = -ELIBBAD;
756 if (!interpreter_type)
757 goto out_free_dentry;
758
759 /* Make sure only one type was selected */
760 if ((interpreter_type & INTERPRETER_ELF) &&
761 interpreter_type != INTERPRETER_ELF) {
762 // FIXME - ratelimit this before re-enabling
763 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
764 interpreter_type = INTERPRETER_ELF;
765 }
766 /* Verify the interpreter has a valid arch */
767 if ((interpreter_type == INTERPRETER_ELF) &&
768 !elf_check_arch(&loc->interp_elf_ex))
769 goto out_free_dentry;
770 } else {
771 /* Executables without an interpreter also need a personality */
772 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
773 }
774
775 /* OK, we are done with that, now set up the arg stuff,
776 and then start this sucker up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
778 char *passed_p = passed_fileno;
779 sprintf(passed_fileno, "%d", elf_exec_fileno);
780
781 if (elf_interpreter) {
782 retval = copy_strings_kernel(1, &passed_p, bprm);
783 if (retval)
784 goto out_free_dentry;
785 bprm->argc++;
786 }
787 }
788
789 /* Flush all traces of the currently running executable */
790 retval = flush_old_exec(bprm);
791 if (retval)
792 goto out_free_dentry;
793
794 /* Discard our unneeded old files struct */
795 if (files) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 put_files_struct(files);
797 files = NULL;
798 }
799
800 /* OK, This is the point of no return */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 current->flags &= ~PF_FORKNOEXEC;
802 current->mm->def_flags = def_flags;
803
804 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
805 may depend on the personality. */
806 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
807 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
808 current->personality |= READ_IMPLIES_EXEC;
809
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700810 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 current->flags |= PF_RANDOMIZE;
812 arch_pick_mmap_layout(current->mm);
813
814 /* Do this so that we can load the interpreter, if need be. We will
815 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 current->mm->free_area_cache = current->mm->mmap_base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700817 current->mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
819 executable_stack);
820 if (retval < 0) {
821 send_sig(SIGKILL, current, 0);
822 goto out_free_dentry;
823 }
824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 current->mm->start_stack = bprm->p;
826
827 /* Now we do a little grungy work by mmaping the ELF image into
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700828 the correct location in memory. At this point, we assume that
829 the image should be loaded at fixed address, not at a variable
830 address. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700831 for(i = 0, elf_ppnt = elf_phdata;
832 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 int elf_prot = 0, elf_flags;
834 unsigned long k, vaddr;
835
836 if (elf_ppnt->p_type != PT_LOAD)
837 continue;
838
839 if (unlikely (elf_brk > elf_bss)) {
840 unsigned long nbyte;
841
842 /* There was a PT_LOAD segment with p_memsz > p_filesz
843 before this one. Map anonymous pages, if needed,
844 and clear the area. */
845 retval = set_brk (elf_bss + load_bias,
846 elf_brk + load_bias);
847 if (retval) {
848 send_sig(SIGKILL, current, 0);
849 goto out_free_dentry;
850 }
851 nbyte = ELF_PAGEOFFSET(elf_bss);
852 if (nbyte) {
853 nbyte = ELF_MIN_ALIGN - nbyte;
854 if (nbyte > elf_brk - elf_bss)
855 nbyte = elf_brk - elf_bss;
856 if (clear_user((void __user *)elf_bss +
857 load_bias, nbyte)) {
858 /*
859 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700860 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 * we don't check the return value
862 */
863 }
864 }
865 }
866
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700867 if (elf_ppnt->p_flags & PF_R)
868 elf_prot |= PROT_READ;
869 if (elf_ppnt->p_flags & PF_W)
870 elf_prot |= PROT_WRITE;
871 if (elf_ppnt->p_flags & PF_X)
872 elf_prot |= PROT_EXEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700874 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
876 vaddr = elf_ppnt->p_vaddr;
877 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
878 elf_flags |= MAP_FIXED;
879 } else if (loc->elf_ex.e_type == ET_DYN) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700880 /* Try and get dynamic programs out of the way of the
881 * default mmap base, as well as whatever program they
882 * might try to exec. This is because the brk will
883 * follow the loader, and is not movable. */
Linus Torvalds90cb28e2007-01-06 13:28:21 -0800884 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 }
886
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700887 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700888 elf_prot, elf_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (BAD_ADDR(error)) {
890 send_sig(SIGKILL, current, 0);
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700891 retval = IS_ERR((void *)error) ?
892 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 goto out_free_dentry;
894 }
895
896 if (!load_addr_set) {
897 load_addr_set = 1;
898 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
899 if (loc->elf_ex.e_type == ET_DYN) {
900 load_bias += error -
901 ELF_PAGESTART(load_bias + vaddr);
902 load_addr += load_bias;
903 reloc_func_desc = load_bias;
904 }
905 }
906 k = elf_ppnt->p_vaddr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700907 if (k < start_code)
908 start_code = k;
909 if (start_data < k)
910 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
912 /*
913 * Check to see if the section's size will overflow the
914 * allowed task size. Note that p_filesz must always be
915 * <= p_memsz so it is only necessary to check p_memsz.
916 */
Chuck Ebbertce510592006-07-03 00:24:14 -0700917 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 elf_ppnt->p_memsz > TASK_SIZE ||
919 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700920 /* set_brk can never work. Avoid overflows. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 send_sig(SIGKILL, current, 0);
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700922 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 goto out_free_dentry;
924 }
925
926 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
927
928 if (k > elf_bss)
929 elf_bss = k;
930 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
931 end_code = k;
932 if (end_data < k)
933 end_data = k;
934 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
935 if (k > elf_brk)
936 elf_brk = k;
937 }
938
939 loc->elf_ex.e_entry += load_bias;
940 elf_bss += load_bias;
941 elf_brk += load_bias;
942 start_code += load_bias;
943 end_code += load_bias;
944 start_data += load_bias;
945 end_data += load_bias;
946
947 /* Calling set_brk effectively mmaps the pages that we need
948 * for the bss and break sections. We must do this before
949 * mapping in the interpreter, to make sure it doesn't wind
950 * up getting placed where the bss needs to go.
951 */
952 retval = set_brk(elf_bss, elf_brk);
953 if (retval) {
954 send_sig(SIGKILL, current, 0);
955 goto out_free_dentry;
956 }
akpm@osdl.org6de50512005-10-11 08:29:08 -0700957 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 send_sig(SIGSEGV, current, 0);
959 retval = -EFAULT; /* Nobody gets to see this, but.. */
960 goto out_free_dentry;
961 }
962
963 if (elf_interpreter) {
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700964 if (interpreter_type == INTERPRETER_AOUT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 elf_entry = load_aout_interp(&loc->interp_ex,
966 interpreter);
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700967 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 elf_entry = load_elf_interp(&loc->interp_elf_ex,
969 interpreter,
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700970 &interp_load_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 if (BAD_ADDR(elf_entry)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 force_sig(SIGSEGV, current);
Chuck Ebbertce510592006-07-03 00:24:14 -0700973 retval = IS_ERR((void *)elf_entry) ?
974 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 goto out_free_dentry;
976 }
977 reloc_func_desc = interp_load_addr;
978
979 allow_write_access(interpreter);
980 fput(interpreter);
981 kfree(elf_interpreter);
982 } else {
983 elf_entry = loc->elf_ex.e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +0100984 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -0700985 force_sig(SIGSEGV, current);
986 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +0100987 goto out_free_dentry;
988 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 }
990
991 kfree(elf_phdata);
992
993 if (interpreter_type != INTERPRETER_AOUT)
994 sys_close(elf_exec_fileno);
995
996 set_binfmt(&elf_format);
997
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -0700998#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
999 retval = arch_setup_additional_pages(bprm, executable_stack);
1000 if (retval < 0) {
1001 send_sig(SIGKILL, current, 0);
Roland McGrath18c8baf2005-04-28 15:17:19 -07001002 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001003 }
1004#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 compute_creds(bprm);
1007 current->flags &= ~PF_FORKNOEXEC;
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001008 retval = create_elf_tables(bprm, &loc->elf_ex,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001009 (interpreter_type == INTERPRETER_AOUT),
1010 load_addr, interp_load_addr);
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001011 if (retval < 0) {
1012 send_sig(SIGKILL, current, 0);
1013 goto out;
1014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 /* N.B. passed_fileno might not be initialized? */
1016 if (interpreter_type == INTERPRETER_AOUT)
1017 current->mm->arg_start += strlen(passed_fileno) + 1;
1018 current->mm->end_code = end_code;
1019 current->mm->start_code = start_code;
1020 current->mm->start_data = start_data;
1021 current->mm->end_data = end_data;
1022 current->mm->start_stack = bprm->p;
1023
1024 if (current->personality & MMAP_PAGE_ZERO) {
1025 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1026 and some applications "depend" upon this behavior.
1027 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001028 emulate the SVr4 behavior. Sigh. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 down_write(&current->mm->mmap_sem);
1030 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1031 MAP_FIXED | MAP_PRIVATE, 0);
1032 up_write(&current->mm->mmap_sem);
1033 }
1034
1035#ifdef ELF_PLAT_INIT
1036 /*
1037 * The ABI may specify that certain registers be set up in special
1038 * ways (on i386 %edx is the address of a DT_FINI function, for
1039 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1040 * that the e_entry field is the address of the function descriptor
1041 * for the startup routine, rather than the address of the startup
1042 * routine itself. This macro performs whatever initialization to
1043 * the regs structure is required as well as any relocations to the
1044 * function descriptor entries when executing dynamically links apps.
1045 */
1046 ELF_PLAT_INIT(regs, reloc_func_desc);
1047#endif
1048
1049 start_thread(regs, elf_entry, bprm->p);
1050 if (unlikely(current->ptrace & PT_PTRACED)) {
1051 if (current->ptrace & PT_TRACE_EXEC)
1052 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1053 else
1054 send_sig(SIGTRAP, current, 0);
1055 }
1056 retval = 0;
1057out:
1058 kfree(loc);
1059out_ret:
1060 return retval;
1061
1062 /* error cleanup */
1063out_free_dentry:
1064 allow_write_access(interpreter);
1065 if (interpreter)
1066 fput(interpreter);
1067out_free_interp:
Jesper Juhlf99d49a2005-11-07 01:01:34 -08001068 kfree(elf_interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069out_free_file:
1070 sys_close(elf_exec_fileno);
1071out_free_fh:
Kirill Korotaev3b9b8ab2006-09-29 02:00:05 -07001072 if (files)
1073 reset_files_struct(current, files);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074out_free_ph:
1075 kfree(elf_phdata);
1076 goto out;
1077}
1078
1079/* This is really simpleminded and specialized - we are loading an
1080 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081static int load_elf_library(struct file *file)
1082{
1083 struct elf_phdr *elf_phdata;
1084 struct elf_phdr *eppnt;
1085 unsigned long elf_bss, bss, len;
1086 int retval, error, i, j;
1087 struct elfhdr elf_ex;
1088
1089 error = -ENOEXEC;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001090 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 if (retval != sizeof(elf_ex))
1092 goto out;
1093
1094 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1095 goto out;
1096
1097 /* First of all, some simple consistency checks */
1098 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001099 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 goto out;
1101
1102 /* Now read in all of the header information */
1103
1104 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1105 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1106
1107 error = -ENOMEM;
1108 elf_phdata = kmalloc(j, GFP_KERNEL);
1109 if (!elf_phdata)
1110 goto out;
1111
1112 eppnt = elf_phdata;
1113 error = -ENOEXEC;
1114 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1115 if (retval != j)
1116 goto out_free_ph;
1117
1118 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1119 if ((eppnt + i)->p_type == PT_LOAD)
1120 j++;
1121 if (j != 1)
1122 goto out_free_ph;
1123
1124 while (eppnt->p_type != PT_LOAD)
1125 eppnt++;
1126
1127 /* Now use mmap to map the library into memory. */
1128 down_write(&current->mm->mmap_sem);
1129 error = do_mmap(file,
1130 ELF_PAGESTART(eppnt->p_vaddr),
1131 (eppnt->p_filesz +
1132 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1133 PROT_READ | PROT_WRITE | PROT_EXEC,
1134 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1135 (eppnt->p_offset -
1136 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1137 up_write(&current->mm->mmap_sem);
1138 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1139 goto out_free_ph;
1140
1141 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1142 if (padzero(elf_bss)) {
1143 error = -EFAULT;
1144 goto out_free_ph;
1145 }
1146
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001147 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1148 ELF_MIN_ALIGN - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 bss = eppnt->p_memsz + eppnt->p_vaddr;
1150 if (bss > len) {
1151 down_write(&current->mm->mmap_sem);
1152 do_brk(len, bss - len);
1153 up_write(&current->mm->mmap_sem);
1154 }
1155 error = 0;
1156
1157out_free_ph:
1158 kfree(elf_phdata);
1159out:
1160 return error;
1161}
1162
1163/*
1164 * Note that some platforms still use traditional core dumps and not
1165 * the ELF core dump. Each platform can select it as appropriate.
1166 */
Matt Mackall708e9a72006-01-08 01:05:25 -08001167#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169/*
1170 * ELF core dumper
1171 *
1172 * Modelled on fs/exec.c:aout_core_dump()
1173 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1174 */
1175/*
1176 * These are the only things you should do on a core-file: use only these
1177 * functions to write out all the necessary info.
1178 */
1179static int dump_write(struct file *file, const void *addr, int nr)
1180{
1181 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1182}
1183
Daniel Jacobowitz5db92852005-06-15 22:26:34 -07001184static int dump_seek(struct file *file, loff_t off)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185{
Andi Kleend025c9d2006-09-30 23:29:28 -07001186 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
Petr Vandrovec7f14daa2006-10-13 04:13:16 +02001187 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 return 0;
Andi Kleend025c9d2006-09-30 23:29:28 -07001189 } else {
1190 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
1191 if (!buf)
1192 return 0;
1193 while (off > 0) {
1194 unsigned long n = off;
1195 if (n > PAGE_SIZE)
1196 n = PAGE_SIZE;
1197 if (!dump_write(file, buf, n))
1198 return 0;
1199 off -= n;
1200 }
1201 free_page((unsigned long)buf);
1202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return 1;
1204}
1205
1206/*
Roland McGrath82df3972007-10-16 23:27:02 -07001207 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 */
Roland McGrath82df3972007-10-16 23:27:02 -07001209static unsigned long vma_dump_size(struct vm_area_struct *vma,
1210 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001212 /* The vma can be set up to tell us the answer directly. */
1213 if (vma->vm_flags & VM_ALWAYSDUMP)
Roland McGrath82df3972007-10-16 23:27:02 -07001214 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /* Do not dump I/O mapped devices or special mappings */
1217 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1218 return 0;
1219
Roland McGrath82df3972007-10-16 23:27:02 -07001220#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1221
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001222 /* By default, dump shared memory if mapped from an anonymous file. */
1223 if (vma->vm_flags & VM_SHARED) {
Roland McGrath82df3972007-10-16 23:27:02 -07001224 if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
1225 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1226 goto whole;
1227 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
Roland McGrath82df3972007-10-16 23:27:02 -07001230 /* Dump segments that have been written to. */
1231 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1232 goto whole;
1233 if (vma->vm_file == NULL)
1234 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Roland McGrath82df3972007-10-16 23:27:02 -07001236 if (FILTER(MAPPED_PRIVATE))
1237 goto whole;
1238
1239 /*
1240 * If this looks like the beginning of a DSO or executable mapping,
1241 * check for an ELF header. If we find one, dump the first page to
1242 * aid in determining what was mapped here.
1243 */
1244 if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
1245 u32 __user *header = (u32 __user *) vma->vm_start;
1246 u32 word;
1247 /*
1248 * Doing it this way gets the constant folded by GCC.
1249 */
1250 union {
1251 u32 cmp;
1252 char elfmag[SELFMAG];
1253 } magic;
1254 BUILD_BUG_ON(SELFMAG != sizeof word);
1255 magic.elfmag[EI_MAG0] = ELFMAG0;
1256 magic.elfmag[EI_MAG1] = ELFMAG1;
1257 magic.elfmag[EI_MAG2] = ELFMAG2;
1258 magic.elfmag[EI_MAG3] = ELFMAG3;
1259 if (get_user(word, header) == 0 && word == magic.cmp)
1260 return PAGE_SIZE;
1261 }
1262
1263#undef FILTER
1264
1265 return 0;
1266
1267whole:
1268 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269}
1270
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271/* An ELF note in memory */
1272struct memelfnote
1273{
1274 const char *name;
1275 int type;
1276 unsigned int datasz;
1277 void *data;
1278};
1279
1280static int notesize(struct memelfnote *en)
1281{
1282 int sz;
1283
1284 sz = sizeof(struct elf_note);
1285 sz += roundup(strlen(en->name) + 1, 4);
1286 sz += roundup(en->datasz, 4);
1287
1288 return sz;
1289}
1290
Andi Kleend025c9d2006-09-30 23:29:28 -07001291#define DUMP_WRITE(addr, nr, foffset) \
1292 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Andi Kleend025c9d2006-09-30 23:29:28 -07001294static int alignfile(struct file *file, loff_t *foffset)
1295{
Petr Vandroveca7a0d862006-10-13 18:42:07 +02001296 static const char buf[4] = { 0, };
Andi Kleend025c9d2006-09-30 23:29:28 -07001297 DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
1298 return 1;
1299}
1300
1301static int writenote(struct memelfnote *men, struct file *file,
1302 loff_t *foffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303{
1304 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 en.n_namesz = strlen(men->name) + 1;
1306 en.n_descsz = men->datasz;
1307 en.n_type = men->type;
1308
Andi Kleend025c9d2006-09-30 23:29:28 -07001309 DUMP_WRITE(&en, sizeof(en), foffset);
1310 DUMP_WRITE(men->name, en.n_namesz, foffset);
1311 if (!alignfile(file, foffset))
1312 return 0;
1313 DUMP_WRITE(men->data, men->datasz, foffset);
1314 if (!alignfile(file, foffset))
1315 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
1317 return 1;
1318}
1319#undef DUMP_WRITE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321#define DUMP_WRITE(addr, nr) \
1322 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1323 goto end_coredump;
1324#define DUMP_SEEK(off) \
1325 if (!dump_seek(file, (off))) \
1326 goto end_coredump;
1327
Arjan van de Ven858119e2006-01-14 13:20:43 -08001328static void fill_elf_header(struct elfhdr *elf, int segs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329{
1330 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1331 elf->e_ident[EI_CLASS] = ELF_CLASS;
1332 elf->e_ident[EI_DATA] = ELF_DATA;
1333 elf->e_ident[EI_VERSION] = EV_CURRENT;
1334 elf->e_ident[EI_OSABI] = ELF_OSABI;
1335 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1336
1337 elf->e_type = ET_CORE;
1338 elf->e_machine = ELF_ARCH;
1339 elf->e_version = EV_CURRENT;
1340 elf->e_entry = 0;
1341 elf->e_phoff = sizeof(struct elfhdr);
1342 elf->e_shoff = 0;
1343 elf->e_flags = ELF_CORE_EFLAGS;
1344 elf->e_ehsize = sizeof(struct elfhdr);
1345 elf->e_phentsize = sizeof(struct elf_phdr);
1346 elf->e_phnum = segs;
1347 elf->e_shentsize = 0;
1348 elf->e_shnum = 0;
1349 elf->e_shstrndx = 0;
1350 return;
1351}
1352
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001353static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354{
1355 phdr->p_type = PT_NOTE;
1356 phdr->p_offset = offset;
1357 phdr->p_vaddr = 0;
1358 phdr->p_paddr = 0;
1359 phdr->p_filesz = sz;
1360 phdr->p_memsz = 0;
1361 phdr->p_flags = 0;
1362 phdr->p_align = 0;
1363 return;
1364}
1365
1366static void fill_note(struct memelfnote *note, const char *name, int type,
1367 unsigned int sz, void *data)
1368{
1369 note->name = name;
1370 note->type = type;
1371 note->datasz = sz;
1372 note->data = data;
1373 return;
1374}
1375
1376/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001377 * fill up all the fields in prstatus from the given task struct, except
1378 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 */
1380static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001381 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382{
1383 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1384 prstatus->pr_sigpend = p->pending.signal.sig[0];
1385 prstatus->pr_sighold = p->blocked.sig[0];
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001386 prstatus->pr_pid = task_pid_vnr(p);
Roland McGrath45626bb2008-01-07 14:22:44 -08001387 prstatus->pr_ppid = task_pid_vnr(p->real_parent);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001388 prstatus->pr_pgrp = task_pgrp_vnr(p);
1389 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 if (thread_group_leader(p)) {
1391 /*
1392 * This is the record for the group leader. Add in the
1393 * cumulative times of previous dead threads. This total
1394 * won't include the time of each live thread whose state
1395 * is included in the core dump. The final total reported
1396 * to our parent process when it calls wait4 will include
1397 * those sums as well as the little bit more time it takes
1398 * this and each other thread to finish dying after the
1399 * core dump synchronization phase.
1400 */
1401 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1402 &prstatus->pr_utime);
1403 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1404 &prstatus->pr_stime);
1405 } else {
1406 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1407 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1408 }
1409 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1410 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1411}
1412
1413static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1414 struct mm_struct *mm)
1415{
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001416 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 /* first copy the parameters from user space */
1419 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1420
1421 len = mm->arg_end - mm->arg_start;
1422 if (len >= ELF_PRARGSZ)
1423 len = ELF_PRARGSZ-1;
1424 if (copy_from_user(&psinfo->pr_psargs,
1425 (const char __user *)mm->arg_start, len))
1426 return -EFAULT;
1427 for(i = 0; i < len; i++)
1428 if (psinfo->pr_psargs[i] == 0)
1429 psinfo->pr_psargs[i] = ' ';
1430 psinfo->pr_psargs[len] = 0;
1431
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001432 psinfo->pr_pid = task_pid_vnr(p);
Roland McGrath45626bb2008-01-07 14:22:44 -08001433 psinfo->pr_ppid = task_pid_vnr(p->real_parent);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001434 psinfo->pr_pgrp = task_pgrp_vnr(p);
1435 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
1437 i = p->state ? ffz(~p->state) + 1 : 0;
1438 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001439 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1441 psinfo->pr_nice = task_nice(p);
1442 psinfo->pr_flag = p->flags;
1443 SET_UID(psinfo->pr_uid, p->uid);
1444 SET_GID(psinfo->pr_gid, p->gid);
1445 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1446
1447 return 0;
1448}
1449
1450/* Here is the structure in which status of each thread is captured. */
1451struct elf_thread_status
1452{
1453 struct list_head list;
1454 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1455 elf_fpregset_t fpu; /* NT_PRFPREG */
1456 struct task_struct *thread;
1457#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07001458 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459#endif
1460 struct memelfnote notes[3];
1461 int num_notes;
1462};
1463
1464/*
1465 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001466 * we need to keep a linked list of every threads pr_status and then create
1467 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 */
1469static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1470{
1471 int sz = 0;
1472 struct task_struct *p = t->thread;
1473 t->num_notes = 0;
1474
1475 fill_prstatus(&t->prstatus, p, signr);
1476 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1477
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001478 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1479 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 t->num_notes++;
1481 sz += notesize(&t->notes[0]);
1482
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001483 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1484 &t->fpu))) {
1485 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1486 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 t->num_notes++;
1488 sz += notesize(&t->notes[1]);
1489 }
1490
1491#ifdef ELF_CORE_COPY_XFPREGS
1492 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07001493 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1494 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 t->num_notes++;
1496 sz += notesize(&t->notes[2]);
1497 }
1498#endif
1499 return sz;
1500}
1501
Roland McGrathf47aef52007-01-26 00:56:49 -08001502static struct vm_area_struct *first_vma(struct task_struct *tsk,
1503 struct vm_area_struct *gate_vma)
1504{
1505 struct vm_area_struct *ret = tsk->mm->mmap;
1506
1507 if (ret)
1508 return ret;
1509 return gate_vma;
1510}
1511/*
1512 * Helper function for iterating across a vma list. It ensures that the caller
1513 * will visit `gate_vma' prior to terminating the search.
1514 */
1515static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1516 struct vm_area_struct *gate_vma)
1517{
1518 struct vm_area_struct *ret;
1519
1520 ret = this_vma->vm_next;
1521 if (ret)
1522 return ret;
1523 if (this_vma == gate_vma)
1524 return NULL;
1525 return gate_vma;
1526}
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528/*
1529 * Actual dumper
1530 *
1531 * This is a two-pass process; first we find the offsets of the bits,
1532 * and then they are actually written out. If we run out of core limit
1533 * we just truncate.
1534 */
Neil Horman7dc0b222007-10-16 23:26:34 -07001535static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536{
1537#define NUM_NOTES 6
1538 int has_dumped = 0;
1539 mm_segment_t fs;
1540 int segs;
1541 size_t size = 0;
1542 int i;
Roland McGrathf47aef52007-01-26 00:56:49 -08001543 struct vm_area_struct *vma, *gate_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 struct elfhdr *elf = NULL;
Andi Kleend025c9d2006-09-30 23:29:28 -07001545 loff_t offset = 0, dataoff, foffset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 int numnote;
1547 struct memelfnote *notes = NULL;
1548 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1549 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1550 struct task_struct *g, *p;
1551 LIST_HEAD(thread_list);
1552 struct list_head *t;
1553 elf_fpregset_t *fpu = NULL;
1554#ifdef ELF_CORE_COPY_XFPREGS
1555 elf_fpxregset_t *xfpu = NULL;
1556#endif
1557 int thread_status_size = 0;
1558 elf_addr_t *auxv;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001559 unsigned long mm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
1561 /*
1562 * We no longer stop all VM operations.
1563 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001564 * This is because those proceses that could possibly change map_count
1565 * or the mmap / vma pages are now blocked in do_exit on current
1566 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 *
1568 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001569 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 * exists while dumping the mm->vm_next areas to the core file.
1571 */
1572
1573 /* alloc memory for large data structures: too large to be on stack */
1574 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1575 if (!elf)
1576 goto cleanup;
1577 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1578 if (!prstatus)
1579 goto cleanup;
1580 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1581 if (!psinfo)
1582 goto cleanup;
1583 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1584 if (!notes)
1585 goto cleanup;
1586 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1587 if (!fpu)
1588 goto cleanup;
1589#ifdef ELF_CORE_COPY_XFPREGS
1590 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1591 if (!xfpu)
1592 goto cleanup;
1593#endif
1594
1595 if (signr) {
1596 struct elf_thread_status *tmp;
Oleg Nesterov486ccb02006-09-29 02:00:24 -07001597 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 do_each_thread(g,p)
1599 if (current->mm == p->mm && current != p) {
Oliver Neukum11b0b5a2006-03-25 03:08:13 -08001600 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 if (!tmp) {
Oleg Nesterov486ccb02006-09-29 02:00:24 -07001602 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 goto cleanup;
1604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 tmp->thread = p;
1606 list_add(&tmp->list, &thread_list);
1607 }
1608 while_each_thread(g,p);
Oleg Nesterov486ccb02006-09-29 02:00:24 -07001609 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 list_for_each(t, &thread_list) {
1611 struct elf_thread_status *tmp;
1612 int sz;
1613
1614 tmp = list_entry(t, struct elf_thread_status, list);
1615 sz = elf_dump_thread_status(signr, tmp);
1616 thread_status_size += sz;
1617 }
1618 }
1619 /* now collect the dump for the current */
1620 memset(prstatus, 0, sizeof(*prstatus));
1621 fill_prstatus(prstatus, current, signr);
1622 elf_core_copy_regs(&prstatus->pr_reg, regs);
1623
1624 segs = current->mm->map_count;
1625#ifdef ELF_CORE_EXTRA_PHDRS
1626 segs += ELF_CORE_EXTRA_PHDRS;
1627#endif
1628
Roland McGrathf47aef52007-01-26 00:56:49 -08001629 gate_vma = get_gate_vma(current);
1630 if (gate_vma != NULL)
1631 segs++;
1632
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 /* Set up header */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001634 fill_elf_header(elf, segs + 1); /* including notes section */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 has_dumped = 1;
1637 current->flags |= PF_DUMPCORE;
1638
1639 /*
1640 * Set up the notes in similar form to SVR4 core dumps made
1641 * with info from their /proc.
1642 */
1643
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001644 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 fill_psinfo(psinfo, current->group_leader, current->mm);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001646 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
Eric W. Biedermana9289722005-10-30 15:02:08 -08001648 numnote = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001650 auxv = (elf_addr_t *)current->mm->saved_auxv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
1652 i = 0;
1653 do
1654 i += 2;
1655 while (auxv[i - 2] != AT_NULL);
1656 fill_note(&notes[numnote++], "CORE", NT_AUXV,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001657 i * sizeof(elf_addr_t), auxv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
1659 /* Try to dump the FPU. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001660 if ((prstatus->pr_fpvalid =
1661 elf_core_copy_task_fpregs(current, regs, fpu)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 fill_note(notes + numnote++,
1663 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1664#ifdef ELF_CORE_COPY_XFPREGS
1665 if (elf_core_copy_task_xfpregs(current, xfpu))
1666 fill_note(notes + numnote++,
Mark Nelson5b20cd82007-10-16 23:25:39 -07001667 "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668#endif
1669
1670 fs = get_fs();
1671 set_fs(KERNEL_DS);
1672
1673 DUMP_WRITE(elf, sizeof(*elf));
1674 offset += sizeof(*elf); /* Elf header */
Petr Vandroveca7a0d862006-10-13 18:42:07 +02001675 offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
1676 foffset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 /* Write notes phdr entry */
1679 {
1680 struct elf_phdr phdr;
1681 int sz = 0;
1682
1683 for (i = 0; i < numnote; i++)
1684 sz += notesize(notes + i);
1685
1686 sz += thread_status_size;
1687
Michael Ellermane5501492007-09-19 14:38:12 +10001688 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 fill_elf_note_phdr(&phdr, sz, offset);
1691 offset += sz;
1692 DUMP_WRITE(&phdr, sizeof(phdr));
1693 }
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1696
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001697 /*
1698 * We must use the same mm->flags while dumping core to avoid
1699 * inconsistency between the program headers and bodies, otherwise an
1700 * unusable core file can be generated.
1701 */
1702 mm_flags = current->mm->flags;
1703
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 /* Write program headers for segments dump */
Roland McGrathf47aef52007-01-26 00:56:49 -08001705 for (vma = first_vma(current, gate_vma); vma != NULL;
1706 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 phdr.p_type = PT_LOAD;
1710 phdr.p_offset = offset;
1711 phdr.p_vaddr = vma->vm_start;
1712 phdr.p_paddr = 0;
Roland McGrath82df3972007-10-16 23:27:02 -07001713 phdr.p_filesz = vma_dump_size(vma, mm_flags);
1714 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 offset += phdr.p_filesz;
1716 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001717 if (vma->vm_flags & VM_WRITE)
1718 phdr.p_flags |= PF_W;
1719 if (vma->vm_flags & VM_EXEC)
1720 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 phdr.p_align = ELF_EXEC_PAGESIZE;
1722
1723 DUMP_WRITE(&phdr, sizeof(phdr));
1724 }
1725
1726#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1727 ELF_CORE_WRITE_EXTRA_PHDRS;
1728#endif
1729
1730 /* write out the notes section */
1731 for (i = 0; i < numnote; i++)
Andi Kleend025c9d2006-09-30 23:29:28 -07001732 if (!writenote(notes + i, file, &foffset))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 goto end_coredump;
1734
Michael Ellermane5501492007-09-19 14:38:12 +10001735 if (elf_coredump_extra_notes_write(file, &foffset))
1736 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 /* write out the thread status notes section */
1739 list_for_each(t, &thread_list) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001740 struct elf_thread_status *tmp =
1741 list_entry(t, struct elf_thread_status, list);
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 for (i = 0; i < tmp->num_notes; i++)
Andi Kleend025c9d2006-09-30 23:29:28 -07001744 if (!writenote(&tmp->notes[i], file, &foffset))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 goto end_coredump;
1746 }
Andi Kleend025c9d2006-09-30 23:29:28 -07001747
1748 /* Align to page */
1749 DUMP_SEEK(dataoff - foffset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
Roland McGrathf47aef52007-01-26 00:56:49 -08001751 for (vma = first_vma(current, gate_vma); vma != NULL;
1752 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07001754 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
Roland McGrath82df3972007-10-16 23:27:02 -07001756 end = vma->vm_start + vma_dump_size(vma, mm_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
Roland McGrath82df3972007-10-16 23:27:02 -07001758 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001759 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 struct vm_area_struct *vma;
1761
1762 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1763 &page, &vma) <= 0) {
Andi Kleend025c9d2006-09-30 23:29:28 -07001764 DUMP_SEEK(PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 } else {
Nick Piggin557ed1f2007-10-16 01:24:40 -07001766 if (page == ZERO_PAGE(0)) {
Brian Pomerantz03221702007-04-01 23:49:41 -07001767 if (!dump_seek(file, PAGE_SIZE)) {
1768 page_cache_release(page);
1769 goto end_coredump;
1770 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 } else {
1772 void *kaddr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001773 flush_cache_page(vma, addr,
1774 page_to_pfn(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 kaddr = kmap(page);
1776 if ((size += PAGE_SIZE) > limit ||
1777 !dump_write(file, kaddr,
1778 PAGE_SIZE)) {
1779 kunmap(page);
1780 page_cache_release(page);
1781 goto end_coredump;
1782 }
1783 kunmap(page);
1784 }
1785 page_cache_release(page);
1786 }
1787 }
1788 }
1789
1790#ifdef ELF_CORE_WRITE_EXTRA_DATA
1791 ELF_CORE_WRITE_EXTRA_DATA;
1792#endif
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794end_coredump:
1795 set_fs(fs);
1796
1797cleanup:
Jesper Juhl74da6cd2006-01-11 01:51:26 +01001798 while (!list_empty(&thread_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 struct list_head *tmp = thread_list.next;
1800 list_del(tmp);
1801 kfree(list_entry(tmp, struct elf_thread_status, list));
1802 }
1803
1804 kfree(elf);
1805 kfree(prstatus);
1806 kfree(psinfo);
1807 kfree(notes);
1808 kfree(fpu);
1809#ifdef ELF_CORE_COPY_XFPREGS
1810 kfree(xfpu);
1811#endif
1812 return has_dumped;
1813#undef NUM_NOTES
1814}
1815
1816#endif /* USE_ELF_CORE_DUMP */
1817
1818static int __init init_elf_binfmt(void)
1819{
1820 return register_binfmt(&elf_format);
1821}
1822
1823static void __exit exit_elf_binfmt(void)
1824{
1825 /* Remove the COFF and ELF loaders. */
1826 unregister_binfmt(&elf_format);
1827}
1828
1829core_initcall(init_elf_binfmt);
1830module_exit(exit_elf_binfmt);
1831MODULE_LICENSE("GPL");