blob: 92c00a13b28bd6c560a77ad0637acbb762661f73 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
16#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/errno.h>
18#include <linux/signal.h>
19#include <linux/binfmts.h>
20#include <linux/string.h>
21#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/personality.h>
24#include <linux/elfcore.h>
25#include <linux/init.h>
26#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/compiler.h>
28#include <linux/highmem.h>
29#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070030#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070033#include <linux/elf.h>
Kees Cookd1fd8362015-04-14 15:48:07 -070034#include <linux/elf-randomize.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070035#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080036#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010037#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010038#include <linux/sched/coredump.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010039#include <linux/sched/task_stack.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010040#include <linux/cred.h>
Ross Zwisler50378352015-10-05 16:33:36 -060041#include <linux/dax.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/param.h>
44#include <asm/page.h>
45
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070046#ifndef user_long_t
47#define user_long_t long
48#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070049#ifndef user_siginfo_t
50#define user_siginfo_t siginfo_t
51#endif
52
Al Viro71613c32012-10-20 22:00:48 -040053static int load_elf_binary(struct linux_binprm *bprm);
Andrew Mortonbb1ad822008-01-30 13:31:07 +010054static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
55 int, int, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Josh Triplett69369a72014-04-03 14:48:27 -070057#ifdef CONFIG_USELIB
58static int load_elf_library(struct file *);
59#else
60#define load_elf_library NULL
61#endif
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
64 * If we don't support core dumping, then supply a NULL so we
65 * don't even try.
66 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080067#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080068static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#else
70#define elf_core_dump NULL
71#endif
72
73#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070074#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070076#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#endif
78
79#ifndef ELF_CORE_EFLAGS
80#define ELF_CORE_EFLAGS 0
81#endif
82
83#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
84#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
85#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
86
87static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -080088 .module = THIS_MODULE,
89 .load_binary = load_elf_binary,
90 .load_shlib = load_elf_library,
91 .core_dump = elf_core_dump,
92 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070093};
94
Andrew Mortond4e3cc32007-07-21 04:37:32 -070095#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Denys Vlasenko16e72e92017-02-22 15:45:16 -080097static int set_brk(unsigned long start, unsigned long end, int prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
99 start = ELF_PAGEALIGN(start);
100 end = ELF_PAGEALIGN(end);
101 if (end > start) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800102 /*
103 * Map the last of the bss segment.
104 * If the header is requesting these pages to be
105 * executable, honour that (ppc32 needs this).
106 */
107 int error = vm_brk_flags(start, end - start,
108 prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700109 if (error)
110 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 }
112 current->mm->start_brk = current->mm->brk = end;
113 return 0;
114}
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/* We need to explicitly zero any fractional pages
117 after the data section (i.e. bss). This would
118 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700119 be in memory
120 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int padzero(unsigned long elf_bss)
122{
123 unsigned long nbyte;
124
125 nbyte = ELF_PAGEOFFSET(elf_bss);
126 if (nbyte) {
127 nbyte = ELF_MIN_ALIGN - nbyte;
128 if (clear_user((void __user *) elf_bss, nbyte))
129 return -EFAULT;
130 }
131 return 0;
132}
133
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200134/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135#ifdef CONFIG_STACK_GROWSUP
136#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
137#define STACK_ROUND(sp, items) \
138 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700139#define STACK_ALLOC(sp, len) ({ \
140 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
141 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142#else
143#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
144#define STACK_ROUND(sp, items) \
145 (((unsigned long) (sp - items)) &~ 15UL)
146#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
147#endif
148
Nathan Lynch483fad12008-07-22 04:48:46 +1000149#ifndef ELF_BASE_PLATFORM
150/*
151 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
152 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
153 * will be copied to the user stack in the same manner as AT_PLATFORM.
154 */
155#define ELF_BASE_PLATFORM NULL
156#endif
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static int
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700159create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
Andi Kleend20894a2008-02-08 04:21:54 -0800160 unsigned long load_addr, unsigned long interp_load_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
162 unsigned long p = bprm->p;
163 int argc = bprm->argc;
164 int envc = bprm->envc;
165 elf_addr_t __user *argv;
166 elf_addr_t __user *envp;
167 elf_addr_t __user *sp;
168 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000169 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800170 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000172 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800173 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 int items;
175 elf_addr_t *elf_info;
176 int ei_index = 0;
David Howells86a264a2008-11-14 10:39:18 +1100177 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700178 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700181 * In some cases (e.g. Hyper-Threading), we want to avoid L1
182 * evictions by the processes running on the same package. One
183 * thing we can do is to shuffle the initial stack for them.
184 */
185
186 p = arch_align_stack(p);
187
188 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * If this architecture has a platform capability string, copy it
190 * to userspace. In some cases (Sparc), this info is impossible
191 * for userspace to get any other way, in others (i386) it is
192 * merely difficult.
193 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 u_platform = NULL;
195 if (k_platform) {
196 size_t len = strlen(k_platform) + 1;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
199 if (__copy_to_user(u_platform, k_platform, len))
200 return -EFAULT;
201 }
202
Nathan Lynch483fad12008-07-22 04:48:46 +1000203 /*
204 * If this architecture has a "base" platform capability
205 * string, copy it to userspace.
206 */
207 u_base_platform = NULL;
208 if (k_base_platform) {
209 size_t len = strlen(k_base_platform) + 1;
210
211 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
212 if (__copy_to_user(u_base_platform, k_base_platform, len))
213 return -EFAULT;
214 }
215
Kees Cookf06295b2009-01-07 18:08:52 -0800216 /*
217 * Generate 16 random bytes for userspace PRNG seeding.
218 */
219 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
220 u_rand_bytes = (elf_addr_t __user *)
221 STACK_ALLOC(p, sizeof(k_rand_bytes));
222 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
223 return -EFAULT;
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 /* Create the ELF interpreter info */
Jesper Juhl785d5572006-06-23 02:05:35 -0700226 elf_info = (elf_addr_t *)current->mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700227 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700229 do { \
Jesper Juhl785d5572006-06-23 02:05:35 -0700230 elf_info[ei_index++] = id; \
231 elf_info[ei_index++] = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700232 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234#ifdef ARCH_DLINFO
235 /*
236 * ARCH_DLINFO must come first so PPC can do its special alignment of
237 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700238 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
239 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 */
241 ARCH_DLINFO;
242#endif
243 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
244 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
245 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
246 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700247 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
249 NEW_AUX_ENT(AT_BASE, interp_load_addr);
250 NEW_AUX_ENT(AT_FLAGS, 0);
251 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800252 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
253 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
254 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
255 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Jesper Juhl785d5572006-06-23 02:05:35 -0700256 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
Kees Cookf06295b2009-01-07 18:08:52 -0800257 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000258#ifdef ELF_HWCAP2
259 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
260#endif
John Reiser65191082008-07-21 14:21:32 -0700261 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700263 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700264 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000266 if (k_base_platform) {
267 NEW_AUX_ENT(AT_BASE_PLATFORM,
268 (elf_addr_t)(unsigned long)u_base_platform);
269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700271 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 }
273#undef NEW_AUX_ENT
274 /* AT_NULL is zero; clear the rest too */
275 memset(&elf_info[ei_index], 0,
276 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
277
278 /* And advance past the AT_NULL entry. */
279 ei_index += 2;
280
281 sp = STACK_ADD(p, ei_index);
282
Andi Kleend20894a2008-02-08 04:21:54 -0800283 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 bprm->p = STACK_ROUND(sp, items);
285
286 /* Point sp at the lowest address on the stack */
287#ifdef CONFIG_STACK_GROWSUP
288 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700289 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290#else
291 sp = (elf_addr_t __user *)bprm->p;
292#endif
293
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700294
295 /*
296 * Grow the stack manually; some architectures have a limit on how
297 * far ahead a user-space access may be in order to grow the stack.
298 */
299 vma = find_extend_vma(current->mm, bprm->p);
300 if (!vma)
301 return -EFAULT;
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
304 if (__put_user(argc, sp++))
305 return -EFAULT;
Andi Kleend20894a2008-02-08 04:21:54 -0800306 argv = sp;
307 envp = argv + argc + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
309 /* Populate argv and envp */
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -0700310 p = current->mm->arg_end = current->mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 while (argc-- > 0) {
312 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800313 if (__put_user((elf_addr_t)p, argv++))
314 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700315 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
316 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800317 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 p += len;
319 }
320 if (__put_user(0, argv))
321 return -EFAULT;
322 current->mm->arg_end = current->mm->env_start = p;
323 while (envc-- > 0) {
324 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800325 if (__put_user((elf_addr_t)p, envp++))
326 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700327 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
328 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800329 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 p += len;
331 }
332 if (__put_user(0, envp))
333 return -EFAULT;
334 current->mm->env_end = p;
335
336 /* Put the elf_info on the stack in the right place. */
337 sp = (elf_addr_t __user *)envp + 1;
338 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
339 return -EFAULT;
340 return 0;
341}
342
James Hoganc07380b2011-05-09 10:58:40 +0100343#ifndef elf_map
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static unsigned long elf_map(struct file *filep, unsigned long addr,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100346 struct elf_phdr *eppnt, int prot, int type,
347 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348{
349 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100350 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
351 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
352 addr = ELF_PAGESTART(addr);
353 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700354
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700355 /* mmap() will return -EINVAL if given a zero size, but a
356 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100357 if (!size)
358 return addr;
359
Jiri Kosinacc503c12008-01-30 13:31:07 +0100360 /*
361 * total_size is the size of the ELF (interpreter) image.
362 * The _first_ mmap needs to know the full size, otherwise
363 * randomization might put this image into an overlapping
364 * position with the ELF binary image. (since size < total_size)
365 * So we first map the 'big' image - and unmap the remainder at
366 * the end. (which unmap is needed for ELF images with holes.)
367 */
368 if (total_size) {
369 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400370 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100371 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400372 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100373 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400374 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 return(map_addr);
377}
378
James Hoganc07380b2011-05-09 10:58:40 +0100379#endif /* !elf_map */
380
Jiri Kosinacc503c12008-01-30 13:31:07 +0100381static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
382{
383 int i, first_idx = -1, last_idx = -1;
384
385 for (i = 0; i < nr; i++) {
386 if (cmds[i].p_type == PT_LOAD) {
387 last_idx = i;
388 if (first_idx == -1)
389 first_idx = i;
390 }
391 }
392 if (first_idx == -1)
393 return 0;
394
395 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
396 ELF_PAGESTART(cmds[first_idx].p_vaddr);
397}
398
Paul Burton6a8d3892014-09-11 08:30:14 +0100399/**
400 * load_elf_phdrs() - load ELF program headers
401 * @elf_ex: ELF header of the binary whose program headers should be loaded
402 * @elf_file: the opened ELF binary file
403 *
404 * Loads ELF program headers from the binary file elf_file, which has the ELF
405 * header pointed to by elf_ex, into a newly allocated array. The caller is
406 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
407 */
408static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
409 struct file *elf_file)
410{
411 struct elf_phdr *elf_phdata = NULL;
412 int retval, size, err = -1;
413
414 /*
415 * If the size of this structure has changed, then punt, since
416 * we will be doing the wrong thing.
417 */
418 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
419 goto out;
420
421 /* Sanity check the number of program headers... */
422 if (elf_ex->e_phnum < 1 ||
423 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
424 goto out;
425
426 /* ...and their total size. */
427 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
428 if (size > ELF_MIN_ALIGN)
429 goto out;
430
431 elf_phdata = kmalloc(size, GFP_KERNEL);
432 if (!elf_phdata)
433 goto out;
434
435 /* Read in the program headers */
436 retval = kernel_read(elf_file, elf_ex->e_phoff,
437 (char *)elf_phdata, size);
438 if (retval != size) {
439 err = (retval < 0) ? retval : -EIO;
440 goto out;
441 }
442
443 /* Success! */
444 err = 0;
445out:
446 if (err) {
447 kfree(elf_phdata);
448 elf_phdata = NULL;
449 }
450 return elf_phdata;
451}
Jiri Kosinacc503c12008-01-30 13:31:07 +0100452
Paul Burton774c1052014-09-11 08:30:16 +0100453#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
454
455/**
456 * struct arch_elf_state - arch-specific ELF loading state
457 *
458 * This structure is used to preserve architecture specific data during
459 * the loading of an ELF file, throughout the checking of architecture
460 * specific ELF headers & through to the point where the ELF load is
461 * known to be proceeding (ie. SET_PERSONALITY).
462 *
463 * This implementation is a dummy for architectures which require no
464 * specific state.
465 */
466struct arch_elf_state {
467};
468
469#define INIT_ARCH_ELF_STATE {}
470
471/**
472 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
473 * @ehdr: The main ELF header
474 * @phdr: The program header to check
475 * @elf: The open ELF file
476 * @is_interp: True if the phdr is from the interpreter of the ELF being
477 * loaded, else false.
478 * @state: Architecture-specific state preserved throughout the process
479 * of loading the ELF.
480 *
481 * Inspects the program header phdr to validate its correctness and/or
482 * suitability for the system. Called once per ELF program header in the
483 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
484 * interpreter.
485 *
486 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
487 * with that return code.
488 */
489static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
490 struct elf_phdr *phdr,
491 struct file *elf, bool is_interp,
492 struct arch_elf_state *state)
493{
494 /* Dummy implementation, always proceed */
495 return 0;
496}
497
498/**
Maciej W. Rozycki54d157142015-10-26 15:47:57 +0000499 * arch_check_elf() - check an ELF executable
Paul Burton774c1052014-09-11 08:30:16 +0100500 * @ehdr: The main ELF header
501 * @has_interp: True if the ELF has an interpreter, else false.
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000502 * @interp_ehdr: The interpreter's ELF header
Paul Burton774c1052014-09-11 08:30:16 +0100503 * @state: Architecture-specific state preserved throughout the process
504 * of loading the ELF.
505 *
506 * Provides a final opportunity for architecture code to reject the loading
507 * of the ELF & cause an exec syscall to return an error. This is called after
508 * all program headers to be checked by arch_elf_pt_proc have been.
509 *
510 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
511 * with that return code.
512 */
513static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000514 struct elfhdr *interp_ehdr,
Paul Burton774c1052014-09-11 08:30:16 +0100515 struct arch_elf_state *state)
516{
517 /* Dummy implementation, always proceed */
518 return 0;
519}
520
521#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
523/* This is much more generalized than the library routine read function,
524 so we keep this separate. Technically the library read function
525 is only provided so that we can read a.out libraries that have
526 an ELF header */
527
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700528static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100529 struct file *interpreter, unsigned long *interp_map_addr,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100530 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct elf_phdr *eppnt;
533 unsigned long load_addr = 0;
534 int load_addr_set = 0;
535 unsigned long last_bss = 0, elf_bss = 0;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800536 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100538 unsigned long total_size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100539 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
541 /* First of all, some simple consistency checks */
542 if (interp_elf_ex->e_type != ET_EXEC &&
543 interp_elf_ex->e_type != ET_DYN)
544 goto out;
545 if (!elf_check_arch(interp_elf_ex))
546 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400547 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 goto out;
549
Paul Burtona9d9ef12014-09-11 08:30:15 +0100550 total_size = total_mapping_size(interp_elf_phdata,
551 interp_elf_ex->e_phnum);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100552 if (!total_size) {
553 error = -EINVAL;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100554 goto out;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100555 }
556
Paul Burtona9d9ef12014-09-11 08:30:15 +0100557 eppnt = interp_elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700558 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
559 if (eppnt->p_type == PT_LOAD) {
560 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
561 int elf_prot = 0;
562 unsigned long vaddr = 0;
563 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700565 if (eppnt->p_flags & PF_R)
566 elf_prot = PROT_READ;
567 if (eppnt->p_flags & PF_W)
568 elf_prot |= PROT_WRITE;
569 if (eppnt->p_flags & PF_X)
570 elf_prot |= PROT_EXEC;
571 vaddr = eppnt->p_vaddr;
572 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
573 elf_type |= MAP_FIXED;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100574 else if (no_base && interp_elf_ex->e_type == ET_DYN)
575 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700577 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100578 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100579 total_size = 0;
580 if (!*interp_map_addr)
581 *interp_map_addr = map_addr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700582 error = map_addr;
583 if (BAD_ADDR(map_addr))
Paul Burtona9d9ef12014-09-11 08:30:15 +0100584 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700586 if (!load_addr_set &&
587 interp_elf_ex->e_type == ET_DYN) {
588 load_addr = map_addr - ELF_PAGESTART(vaddr);
589 load_addr_set = 1;
590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700592 /*
593 * Check to see if the section's size will overflow the
594 * allowed task size. Note that p_filesz must always be
595 * <= p_memsize so it's only necessary to check p_memsz.
596 */
597 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700598 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700599 eppnt->p_filesz > eppnt->p_memsz ||
600 eppnt->p_memsz > TASK_SIZE ||
601 TASK_SIZE - eppnt->p_memsz < k) {
602 error = -ENOMEM;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100603 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700606 /*
607 * Find the end of the file mapping for this phdr, and
608 * keep track of the largest address we see for this.
609 */
610 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
611 if (k > elf_bss)
612 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700614 /*
615 * Do the same thing for the memory mapping - between
616 * elf_bss and last_bss is the bss section.
617 */
Kees Cook0036d1f2016-08-02 14:04:51 -0700618 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800619 if (k > last_bss) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700620 last_bss = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800621 bss_prot = elf_prot;
622 }
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 }
625
Kees Cook0036d1f2016-08-02 14:04:51 -0700626 /*
627 * Now fill out the bss section: first pad the last page from
628 * the file up to the page boundary, and zero it from elf_bss
629 * up to the end of the page.
630 */
631 if (padzero(elf_bss)) {
632 error = -EFAULT;
633 goto out;
634 }
635 /*
636 * Next, align both the file and mem bss up to the page size,
637 * since this is where elf_bss was just zeroed up to, and where
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800638 * last_bss will end after the vm_brk_flags() below.
Kees Cook0036d1f2016-08-02 14:04:51 -0700639 */
640 elf_bss = ELF_PAGEALIGN(elf_bss);
641 last_bss = ELF_PAGEALIGN(last_bss);
642 /* Finally, if there is still more bss to allocate, do it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 if (last_bss > elf_bss) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800644 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
645 bss_prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700646 if (error)
Paul Burtona9d9ef12014-09-11 08:30:15 +0100647 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 }
649
Jiri Kosinacc503c12008-01-30 13:31:07 +0100650 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651out:
652 return error;
653}
654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655/*
656 * These are the functions used to load ELF style executables and shared
657 * libraries. There is no binary dependent code anywhere else.
658 */
659
Andi Kleen913bd902006-03-25 16:29:09 +0100660#ifndef STACK_RND_MASK
James Bottomleyd1cabd62007-03-16 13:38:35 -0800661#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
Andi Kleen913bd902006-03-25 16:29:09 +0100662#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
664static unsigned long randomize_stack_top(unsigned long stack_top)
665{
Hector Marco-Gisbert4e7c22d2015-02-14 09:33:50 -0800666 unsigned long random_variable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Andi Kleenc16b63e02006-09-26 10:52:28 +0200668 if ((current->flags & PF_RANDOMIZE) &&
669 !(current->personality & ADDR_NO_RANDOMIZE)) {
Daniel Cashman5ef11c32016-02-26 15:19:37 -0800670 random_variable = get_random_long();
Hector Marco-Gisbert4e7c22d2015-02-14 09:33:50 -0800671 random_variable &= STACK_RND_MASK;
Andi Kleen913bd902006-03-25 16:29:09 +0100672 random_variable <<= PAGE_SHIFT;
673 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674#ifdef CONFIG_STACK_GROWSUP
Andi Kleen913bd902006-03-25 16:29:09 +0100675 return PAGE_ALIGN(stack_top) + random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676#else
Andi Kleen913bd902006-03-25 16:29:09 +0100677 return PAGE_ALIGN(stack_top) - random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678#endif
679}
680
Al Viro71613c32012-10-20 22:00:48 -0400681static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct file *interpreter = NULL; /* to shut gcc up */
684 unsigned long load_addr = 0, load_bias = 0;
685 int load_addr_set = 0;
686 char * elf_interpreter = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 unsigned long error;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100688 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 unsigned long elf_bss, elf_brk;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800690 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 int retval, i;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100692 unsigned long elf_entry;
693 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700695 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800696 int executable_stack = EXSTACK_DEFAULT;
Al Viro71613c32012-10-20 22:00:48 -0400697 struct pt_regs *regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 struct {
699 struct elfhdr elf_ex;
700 struct elfhdr interp_elf_ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 } *loc;
Paul Burton774c1052014-09-11 08:30:16 +0100702 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
705 if (!loc) {
706 retval = -ENOMEM;
707 goto out_ret;
708 }
709
710 /* Get the exec-header */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700711 loc->elf_ex = *((struct elfhdr *)bprm->buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713 retval = -ENOEXEC;
714 /* First of all, some simple consistency checks */
715 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
716 goto out;
717
718 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
719 goto out;
720 if (!elf_check_arch(&loc->elf_ex))
721 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400722 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 goto out;
724
Paul Burton6a8d3892014-09-11 08:30:14 +0100725 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (!elf_phdata)
727 goto out;
728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 elf_ppnt = elf_phdata;
730 elf_bss = 0;
731 elf_brk = 0;
732
733 start_code = ~0UL;
734 end_code = 0;
735 start_data = 0;
736 end_data = 0;
737
738 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
739 if (elf_ppnt->p_type == PT_INTERP) {
740 /* This is the program interpreter used for
741 * shared libraries - for now assume that this
742 * is an a.out format binary
743 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 retval = -ENOEXEC;
745 if (elf_ppnt->p_filesz > PATH_MAX ||
746 elf_ppnt->p_filesz < 2)
Al Viroe7b9b552009-03-29 16:31:16 -0400747 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
749 retval = -ENOMEM;
Jesper Juhl792db3a2006-01-09 20:54:45 -0800750 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700751 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (!elf_interpreter)
Al Viroe7b9b552009-03-29 16:31:16 -0400753 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
755 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700756 elf_interpreter,
757 elf_ppnt->p_filesz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 if (retval != elf_ppnt->p_filesz) {
759 if (retval >= 0)
760 retval = -EIO;
761 goto out_free_interp;
762 }
763 /* make sure path is NULL terminated */
764 retval = -ENOEXEC;
765 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
766 goto out_free_interp;
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 interpreter = open_exec(elf_interpreter);
769 retval = PTR_ERR(interpreter);
770 if (IS_ERR(interpreter))
771 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800772
773 /*
774 * If the binary is not readable then enforce
775 * mm->dumpable = 0 regardless of the interpreter's
776 * permissions.
777 */
Al Viro1b5d7832011-06-19 12:49:47 -0400778 would_dump(bprm, interpreter);
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800779
Maciej W. Rozyckib582ef5c2015-10-26 15:48:19 +0000780 /* Get the exec headers */
781 retval = kernel_read(interpreter, 0,
782 (void *)&loc->interp_elf_ex,
783 sizeof(loc->interp_elf_ex));
784 if (retval != sizeof(loc->interp_elf_ex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 if (retval >= 0)
786 retval = -EIO;
787 goto out_free_dentry;
788 }
789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 break;
791 }
792 elf_ppnt++;
793 }
794
795 elf_ppnt = elf_phdata;
796 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100797 switch (elf_ppnt->p_type) {
798 case PT_GNU_STACK:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 if (elf_ppnt->p_flags & PF_X)
800 executable_stack = EXSTACK_ENABLE_X;
801 else
802 executable_stack = EXSTACK_DISABLE_X;
803 break;
Paul Burton774c1052014-09-11 08:30:16 +0100804
805 case PT_LOPROC ... PT_HIPROC:
806 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
807 bprm->file, false,
808 &arch_state);
809 if (retval)
810 goto out_free_dentry;
811 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
814 /* Some simple consistency checks for the interpreter */
815 if (elf_interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800817 /* Not an ELF interpreter */
818 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 /* Verify the interpreter has a valid arch */
Andi Kleend20894a2008-02-08 04:21:54 -0800821 if (!elf_check_arch(&loc->interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 goto out_free_dentry;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100823
824 /* Load the interpreter program headers */
825 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
826 interpreter);
827 if (!interp_elf_phdata)
828 goto out_free_dentry;
Paul Burton774c1052014-09-11 08:30:16 +0100829
830 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
831 elf_ppnt = interp_elf_phdata;
832 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
833 switch (elf_ppnt->p_type) {
834 case PT_LOPROC ... PT_HIPROC:
835 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
836 elf_ppnt, interpreter,
837 true, &arch_state);
838 if (retval)
839 goto out_free_dentry;
840 break;
841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 }
843
Paul Burton774c1052014-09-11 08:30:16 +0100844 /*
845 * Allow arch code to reject the ELF at this point, whilst it's
846 * still possible to return an error to the code that invoked
847 * the exec syscall.
848 */
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000849 retval = arch_check_elf(&loc->elf_ex,
850 !!interpreter, &loc->interp_elf_ex,
851 &arch_state);
Paul Burton774c1052014-09-11 08:30:16 +0100852 if (retval)
853 goto out_free_dentry;
854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 /* Flush all traces of the currently running executable */
856 retval = flush_old_exec(bprm);
857 if (retval)
858 goto out_free_dentry;
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
861 may depend on the personality. */
Paul Burton774c1052014-09-11 08:30:16 +0100862 SET_PERSONALITY2(loc->elf_ex, &arch_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
864 current->personality |= READ_IMPLIES_EXEC;
865
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700866 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -0800868
869 setup_new_exec(bprm);
Linus Torvalds9f834ec2016-08-22 16:41:46 -0700870 install_exec_creds(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 /* Do this so that we can load the interpreter, if need be. We will
873 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
875 executable_stack);
Al Viro19d860a2014-05-04 20:11:36 -0400876 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 current->mm->start_stack = bprm->p;
880
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200881 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +0100882 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700883 for(i = 0, elf_ppnt = elf_phdata;
884 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 int elf_prot = 0, elf_flags;
886 unsigned long k, vaddr;
Michael Davidsona87938b2015-04-14 15:47:38 -0700887 unsigned long total_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889 if (elf_ppnt->p_type != PT_LOAD)
890 continue;
891
892 if (unlikely (elf_brk > elf_bss)) {
893 unsigned long nbyte;
894
895 /* There was a PT_LOAD segment with p_memsz > p_filesz
896 before this one. Map anonymous pages, if needed,
897 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -0800898 retval = set_brk(elf_bss + load_bias,
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800899 elf_brk + load_bias,
900 bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -0400901 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 nbyte = ELF_PAGEOFFSET(elf_bss);
904 if (nbyte) {
905 nbyte = ELF_MIN_ALIGN - nbyte;
906 if (nbyte > elf_brk - elf_bss)
907 nbyte = elf_brk - elf_bss;
908 if (clear_user((void __user *)elf_bss +
909 load_bias, nbyte)) {
910 /*
911 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700912 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 * we don't check the return value
914 */
915 }
916 }
917 }
918
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700919 if (elf_ppnt->p_flags & PF_R)
920 elf_prot |= PROT_READ;
921 if (elf_ppnt->p_flags & PF_W)
922 elf_prot |= PROT_WRITE;
923 if (elf_ppnt->p_flags & PF_X)
924 elf_prot |= PROT_EXEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700926 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
928 vaddr = elf_ppnt->p_vaddr;
929 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
930 elf_flags |= MAP_FIXED;
931 } else if (loc->elf_ex.e_type == ET_DYN) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700932 /* Try and get dynamic programs out of the way of the
933 * default mmap base, as well as whatever program they
934 * might try to exec. This is because the brk will
935 * follow the loader, and is not movable. */
Kees Cookd1fd8362015-04-14 15:48:07 -0700936 load_bias = ELF_ET_DYN_BASE - vaddr;
Jiri Kosinaa3defbe2011-11-02 13:37:41 -0700937 if (current->flags & PF_RANDOMIZE)
Kees Cookd1fd8362015-04-14 15:48:07 -0700938 load_bias += arch_mmap_rnd();
939 load_bias = ELF_PAGESTART(load_bias);
Michael Davidsona87938b2015-04-14 15:47:38 -0700940 total_size = total_mapping_size(elf_phdata,
941 loc->elf_ex.e_phnum);
942 if (!total_size) {
Andrew Morton2b1d3ae2015-05-28 15:44:24 -0700943 retval = -EINVAL;
Michael Davidsona87938b2015-04-14 15:47:38 -0700944 goto out_free_dentry;
945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 }
947
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700948 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidsona87938b2015-04-14 15:47:38 -0700949 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (BAD_ADDR(error)) {
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700951 retval = IS_ERR((void *)error) ?
952 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 goto out_free_dentry;
954 }
955
956 if (!load_addr_set) {
957 load_addr_set = 1;
958 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
959 if (loc->elf_ex.e_type == ET_DYN) {
960 load_bias += error -
961 ELF_PAGESTART(load_bias + vaddr);
962 load_addr += load_bias;
963 reloc_func_desc = load_bias;
964 }
965 }
966 k = elf_ppnt->p_vaddr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700967 if (k < start_code)
968 start_code = k;
969 if (start_data < k)
970 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 /*
973 * Check to see if the section's size will overflow the
974 * allowed task size. Note that p_filesz must always be
975 * <= p_memsz so it is only necessary to check p_memsz.
976 */
Chuck Ebbertce510592006-07-03 00:24:14 -0700977 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 elf_ppnt->p_memsz > TASK_SIZE ||
979 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700980 /* set_brk can never work. Avoid overflows. */
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700981 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 goto out_free_dentry;
983 }
984
985 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
986
987 if (k > elf_bss)
988 elf_bss = k;
989 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
990 end_code = k;
991 if (end_data < k)
992 end_data = k;
993 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800994 if (k > elf_brk) {
995 bss_prot = elf_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 elf_brk = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 }
999
1000 loc->elf_ex.e_entry += load_bias;
1001 elf_bss += load_bias;
1002 elf_brk += load_bias;
1003 start_code += load_bias;
1004 end_code += load_bias;
1005 start_data += load_bias;
1006 end_data += load_bias;
1007
1008 /* Calling set_brk effectively mmaps the pages that we need
1009 * for the bss and break sections. We must do this before
1010 * mapping in the interpreter, to make sure it doesn't wind
1011 * up getting placed where the bss needs to go.
1012 */
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001013 retval = set_brk(elf_bss, elf_brk, bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001014 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 goto out_free_dentry;
akpm@osdl.org6de50512005-10-11 08:29:08 -07001016 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 retval = -EFAULT; /* Nobody gets to see this, but.. */
1018 goto out_free_dentry;
1019 }
1020
1021 if (elf_interpreter) {
Alan Cox6eec4822012-10-04 17:13:42 -07001022 unsigned long interp_map_addr = 0;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001023
Andi Kleend20894a2008-02-08 04:21:54 -08001024 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1025 interpreter,
1026 &interp_map_addr,
Paul Burtona9d9ef12014-09-11 08:30:15 +01001027 load_bias, interp_elf_phdata);
Andi Kleend20894a2008-02-08 04:21:54 -08001028 if (!IS_ERR((void *)elf_entry)) {
1029 /*
1030 * load_elf_interp() returns relocation
1031 * adjustment
1032 */
1033 interp_load_addr = elf_entry;
1034 elf_entry += loc->interp_elf_ex.e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001035 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001037 retval = IS_ERR((void *)elf_entry) ?
1038 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 goto out_free_dentry;
1040 }
1041 reloc_func_desc = interp_load_addr;
1042
1043 allow_write_access(interpreter);
1044 fput(interpreter);
1045 kfree(elf_interpreter);
1046 } else {
1047 elf_entry = loc->elf_ex.e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001048 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001049 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001050 goto out_free_dentry;
1051 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 }
1053
Paul Burton774c1052014-09-11 08:30:16 +01001054 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 kfree(elf_phdata);
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 set_binfmt(&elf_format);
1058
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001059#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Martin Schwidefskyfc5243d2008-12-25 13:38:35 +01001060 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
Al Viro19d860a2014-05-04 20:11:36 -04001061 if (retval < 0)
Roland McGrath18c8baf2005-04-28 15:17:19 -07001062 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001063#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1064
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001065 retval = create_elf_tables(bprm, &loc->elf_ex,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001066 load_addr, interp_load_addr);
Al Viro19d860a2014-05-04 20:11:36 -04001067 if (retval < 0)
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001068 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 /* N.B. passed_fileno might not be initialized? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 current->mm->end_code = end_code;
1071 current->mm->start_code = start_code;
1072 current->mm->start_data = start_data;
1073 current->mm->end_data = end_data;
1074 current->mm->start_stack = bprm->p;
1075
Jiri Kosina4471a672011-04-14 15:22:09 -07001076 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001077 current->mm->brk = current->mm->start_brk =
1078 arch_randomize_brk(current->mm);
Kees Cook204db6e2015-04-14 15:48:12 -07001079#ifdef compat_brk_randomized
Jiri Kosina4471a672011-04-14 15:22:09 -07001080 current->brk_randomized = 1;
1081#endif
1082 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (current->personality & MMAP_PAGE_ZERO) {
1085 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1086 and some applications "depend" upon this behavior.
1087 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001088 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001089 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 }
1092
1093#ifdef ELF_PLAT_INIT
1094 /*
1095 * The ABI may specify that certain registers be set up in special
1096 * ways (on i386 %edx is the address of a DT_FINI function, for
1097 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1098 * that the e_entry field is the address of the function descriptor
1099 * for the startup routine, rather than the address of the startup
1100 * routine itself. This macro performs whatever initialization to
1101 * the regs structure is required as well as any relocations to the
1102 * function descriptor entries when executing dynamically links apps.
1103 */
1104 ELF_PLAT_INIT(regs, reloc_func_desc);
1105#endif
1106
1107 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 retval = 0;
1109out:
1110 kfree(loc);
1111out_ret:
1112 return retval;
1113
1114 /* error cleanup */
1115out_free_dentry:
Paul Burtona9d9ef12014-09-11 08:30:15 +01001116 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 allow_write_access(interpreter);
1118 if (interpreter)
1119 fput(interpreter);
1120out_free_interp:
Jesper Juhlf99d49a2005-11-07 01:01:34 -08001121 kfree(elf_interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122out_free_ph:
1123 kfree(elf_phdata);
1124 goto out;
1125}
1126
Josh Triplett69369a72014-04-03 14:48:27 -07001127#ifdef CONFIG_USELIB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128/* This is really simpleminded and specialized - we are loading an
1129 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130static int load_elf_library(struct file *file)
1131{
1132 struct elf_phdr *elf_phdata;
1133 struct elf_phdr *eppnt;
1134 unsigned long elf_bss, bss, len;
1135 int retval, error, i, j;
1136 struct elfhdr elf_ex;
1137
1138 error = -ENOEXEC;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001139 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 if (retval != sizeof(elf_ex))
1141 goto out;
1142
1143 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1144 goto out;
1145
1146 /* First of all, some simple consistency checks */
1147 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001148 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 goto out;
1150
1151 /* Now read in all of the header information */
1152
1153 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1154 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1155
1156 error = -ENOMEM;
1157 elf_phdata = kmalloc(j, GFP_KERNEL);
1158 if (!elf_phdata)
1159 goto out;
1160
1161 eppnt = elf_phdata;
1162 error = -ENOEXEC;
1163 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1164 if (retval != j)
1165 goto out_free_ph;
1166
1167 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1168 if ((eppnt + i)->p_type == PT_LOAD)
1169 j++;
1170 if (j != 1)
1171 goto out_free_ph;
1172
1173 while (eppnt->p_type != PT_LOAD)
1174 eppnt++;
1175
1176 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001177 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 ELF_PAGESTART(eppnt->p_vaddr),
1179 (eppnt->p_filesz +
1180 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1181 PROT_READ | PROT_WRITE | PROT_EXEC,
1182 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1183 (eppnt->p_offset -
1184 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1186 goto out_free_ph;
1187
1188 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1189 if (padzero(elf_bss)) {
1190 error = -EFAULT;
1191 goto out_free_ph;
1192 }
1193
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001194 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1195 ELF_MIN_ALIGN - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 bss = eppnt->p_memsz + eppnt->p_vaddr;
Michal Hockoecc2bc82016-05-23 16:25:39 -07001197 if (bss > len) {
1198 error = vm_brk(len, bss - len);
Linus Torvalds5d22fc22016-05-27 15:57:31 -07001199 if (error)
Michal Hockoecc2bc82016-05-23 16:25:39 -07001200 goto out_free_ph;
1201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 error = 0;
1203
1204out_free_ph:
1205 kfree(elf_phdata);
1206out:
1207 return error;
1208}
Josh Triplett69369a72014-04-03 14:48:27 -07001209#endif /* #ifdef CONFIG_USELIB */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001211#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212/*
1213 * ELF core dumper
1214 *
1215 * Modelled on fs/exec.c:aout_core_dump()
1216 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1217 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219/*
Jason Baron909af762012-03-23 15:02:51 -07001220 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1221 * that are useful for post-mortem analysis are included in every core dump.
1222 * In that way we ensure that the core dump is fully interpretable later
1223 * without matching up the same kernel and hardware config to see what PC values
1224 * meant. These special mappings include - vDSO, vsyscall, and other
1225 * architecture specific mappings
1226 */
1227static bool always_dump_vma(struct vm_area_struct *vma)
1228{
1229 /* Any vsyscall mappings? */
1230 if (vma == get_gate_vma(vma->vm_mm))
1231 return true;
Andy Lutomirski78d683e2014-05-19 15:58:32 -07001232
1233 /*
1234 * Assume that all vmas with a .name op should always be dumped.
1235 * If this changes, a new vm_ops field can easily be added.
1236 */
1237 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1238 return true;
1239
Jason Baron909af762012-03-23 15:02:51 -07001240 /*
1241 * arch_vma_name() returns non-NULL for special architecture mappings,
1242 * such as vDSO sections.
1243 */
1244 if (arch_vma_name(vma))
1245 return true;
1246
1247 return false;
1248}
1249
1250/*
Roland McGrath82df3972007-10-16 23:27:02 -07001251 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 */
Roland McGrath82df3972007-10-16 23:27:02 -07001253static unsigned long vma_dump_size(struct vm_area_struct *vma,
1254 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001256#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1257
Jason Baron909af762012-03-23 15:02:51 -07001258 /* always dump the vdso and vsyscall sections */
1259 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001260 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001261
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001262 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001263 return 0;
1264
Ross Zwisler50378352015-10-05 16:33:36 -06001265 /* support for DAX */
1266 if (vma_is_dax(vma)) {
1267 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1268 goto whole;
1269 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1270 goto whole;
1271 return 0;
1272 }
1273
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001274 /* Hugetlb memory check */
1275 if (vma->vm_flags & VM_HUGETLB) {
1276 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1277 goto whole;
1278 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1279 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001280 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001281 }
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001284 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return 0;
1286
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001287 /* By default, dump shared memory if mapped from an anonymous file. */
1288 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001289 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001290 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1291 goto whole;
1292 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001293 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294
Roland McGrath82df3972007-10-16 23:27:02 -07001295 /* Dump segments that have been written to. */
1296 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1297 goto whole;
1298 if (vma->vm_file == NULL)
1299 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
Roland McGrath82df3972007-10-16 23:27:02 -07001301 if (FILTER(MAPPED_PRIVATE))
1302 goto whole;
1303
1304 /*
1305 * If this looks like the beginning of a DSO or executable mapping,
1306 * check for an ELF header. If we find one, dump the first page to
1307 * aid in determining what was mapped here.
1308 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001309 if (FILTER(ELF_HEADERS) &&
1310 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001311 u32 __user *header = (u32 __user *) vma->vm_start;
1312 u32 word;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001313 mm_segment_t fs = get_fs();
Roland McGrath82df3972007-10-16 23:27:02 -07001314 /*
1315 * Doing it this way gets the constant folded by GCC.
1316 */
1317 union {
1318 u32 cmp;
1319 char elfmag[SELFMAG];
1320 } magic;
1321 BUILD_BUG_ON(SELFMAG != sizeof word);
1322 magic.elfmag[EI_MAG0] = ELFMAG0;
1323 magic.elfmag[EI_MAG1] = ELFMAG1;
1324 magic.elfmag[EI_MAG2] = ELFMAG2;
1325 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001326 /*
1327 * Switch to the user "segment" for get_user(),
1328 * then put back what elf_core_dump() had in place.
1329 */
1330 set_fs(USER_DS);
1331 if (unlikely(get_user(word, header)))
1332 word = 0;
1333 set_fs(fs);
1334 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001335 return PAGE_SIZE;
1336 }
1337
1338#undef FILTER
1339
1340 return 0;
1341
1342whole:
1343 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344}
1345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346/* An ELF note in memory */
1347struct memelfnote
1348{
1349 const char *name;
1350 int type;
1351 unsigned int datasz;
1352 void *data;
1353};
1354
1355static int notesize(struct memelfnote *en)
1356{
1357 int sz;
1358
1359 sz = sizeof(struct elf_note);
1360 sz += roundup(strlen(en->name) + 1, 4);
1361 sz += roundup(en->datasz, 4);
1362
1363 return sz;
1364}
1365
Al Viroecc8c772013-10-05 15:32:35 -04001366static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367{
1368 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 en.n_namesz = strlen(men->name) + 1;
1370 en.n_descsz = men->datasz;
1371 en.n_type = men->type;
1372
Al Viroecc8c772013-10-05 15:32:35 -04001373 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001374 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1375 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Roland McGrath3aba4812008-01-30 13:31:44 +01001378static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001379 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001381 memset(elf, 0, sizeof(*elf));
1382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1384 elf->e_ident[EI_CLASS] = ELF_CLASS;
1385 elf->e_ident[EI_DATA] = ELF_DATA;
1386 elf->e_ident[EI_VERSION] = EV_CURRENT;
1387 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001390 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001393 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 elf->e_ehsize = sizeof(struct elfhdr);
1395 elf->e_phentsize = sizeof(struct elf_phdr);
1396 elf->e_phnum = segs;
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return;
1399}
1400
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001401static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402{
1403 phdr->p_type = PT_NOTE;
1404 phdr->p_offset = offset;
1405 phdr->p_vaddr = 0;
1406 phdr->p_paddr = 0;
1407 phdr->p_filesz = sz;
1408 phdr->p_memsz = 0;
1409 phdr->p_flags = 0;
1410 phdr->p_align = 0;
1411 return;
1412}
1413
1414static void fill_note(struct memelfnote *note, const char *name, int type,
1415 unsigned int sz, void *data)
1416{
1417 note->name = name;
1418 note->type = type;
1419 note->datasz = sz;
1420 note->data = data;
1421 return;
1422}
1423
1424/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001425 * fill up all the fields in prstatus from the given task struct, except
1426 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 */
1428static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001429 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430{
1431 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1432 prstatus->pr_sigpend = p->pending.signal.sig[0];
1433 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001434 rcu_read_lock();
1435 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1436 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001437 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001438 prstatus->pr_pgrp = task_pgrp_vnr(p);
1439 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 if (thread_group_leader(p)) {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001441 struct task_cputime cputime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001444 * This is the record for the group leader. It shows the
1445 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 */
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001447 thread_group_cputime(p, &cputime);
1448 prstatus->pr_utime = ns_to_timeval(cputime.utime);
1449 prstatus->pr_stime = ns_to_timeval(cputime.stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 } else {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001451 u64 utime, stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001452
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001453 task_cputime(p, &utime, &stime);
1454 prstatus->pr_utime = ns_to_timeval(utime);
1455 prstatus->pr_stime = ns_to_timeval(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 }
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001457
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001458 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
1459 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460}
1461
1462static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1463 struct mm_struct *mm)
1464{
David Howellsc69e8d92008-11-14 10:39:19 +11001465 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001466 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 /* first copy the parameters from user space */
1469 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1470
1471 len = mm->arg_end - mm->arg_start;
1472 if (len >= ELF_PRARGSZ)
1473 len = ELF_PRARGSZ-1;
1474 if (copy_from_user(&psinfo->pr_psargs,
1475 (const char __user *)mm->arg_start, len))
1476 return -EFAULT;
1477 for(i = 0; i < len; i++)
1478 if (psinfo->pr_psargs[i] == 0)
1479 psinfo->pr_psargs[i] = ' ';
1480 psinfo->pr_psargs[len] = 0;
1481
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001482 rcu_read_lock();
1483 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1484 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001485 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001486 psinfo->pr_pgrp = task_pgrp_vnr(p);
1487 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489 i = p->state ? ffz(~p->state) + 1 : 0;
1490 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001491 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1493 psinfo->pr_nice = task_nice(p);
1494 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001495 rcu_read_lock();
1496 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001497 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1498 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001499 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1501
1502 return 0;
1503}
1504
Roland McGrath3aba4812008-01-30 13:31:44 +01001505static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1506{
1507 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1508 int i = 0;
1509 do
1510 i += 2;
1511 while (auxv[i - 2] != AT_NULL);
1512 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1513}
1514
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001515static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Al Viroce395962013-10-13 17:23:53 -04001516 const siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001517{
1518 mm_segment_t old_fs = get_fs();
1519 set_fs(KERNEL_DS);
1520 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1521 set_fs(old_fs);
1522 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1523}
1524
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001525#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1526/*
1527 * Format of NT_FILE note:
1528 *
1529 * long count -- how many files are mapped
1530 * long page_size -- units for file_ofs
1531 * array of [COUNT] elements of
1532 * long start
1533 * long end
1534 * long file_ofs
1535 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1536 */
Dan Aloni72023652013-09-30 13:45:02 -07001537static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001538{
1539 struct vm_area_struct *vma;
1540 unsigned count, size, names_ofs, remaining, n;
1541 user_long_t *data;
1542 user_long_t *start_end_ofs;
1543 char *name_base, *name_curpos;
1544
1545 /* *Estimated* file count and total data size needed */
1546 count = current->mm->map_count;
1547 size = count * 64;
1548
1549 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1550 alloc:
1551 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001552 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001553 size = round_up(size, PAGE_SIZE);
1554 data = vmalloc(size);
1555 if (!data)
Dan Aloni72023652013-09-30 13:45:02 -07001556 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001557
1558 start_end_ofs = data + 2;
1559 name_base = name_curpos = ((char *)data) + names_ofs;
1560 remaining = size - names_ofs;
1561 count = 0;
1562 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1563 struct file *file;
1564 const char *filename;
1565
1566 file = vma->vm_file;
1567 if (!file)
1568 continue;
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001569 filename = file_path(file, name_curpos, remaining);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001570 if (IS_ERR(filename)) {
1571 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1572 vfree(data);
1573 size = size * 5 / 4;
1574 goto alloc;
1575 }
1576 continue;
1577 }
1578
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001579 /* file_path() fills at the end, move name down */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001580 /* n = strlen(filename) + 1: */
1581 n = (name_curpos + remaining) - filename;
1582 remaining = filename - name_curpos;
1583 memmove(name_curpos, filename, n);
1584 name_curpos += n;
1585
1586 *start_end_ofs++ = vma->vm_start;
1587 *start_end_ofs++ = vma->vm_end;
1588 *start_end_ofs++ = vma->vm_pgoff;
1589 count++;
1590 }
1591
1592 /* Now we know exact count of files, can store it */
1593 data[0] = count;
1594 data[1] = PAGE_SIZE;
1595 /*
1596 * Count usually is less than current->mm->map_count,
1597 * we need to move filenames down.
1598 */
1599 n = current->mm->map_count - count;
1600 if (n != 0) {
1601 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1602 memmove(name_base - shift_bytes, name_base,
1603 name_curpos - name_base);
1604 name_curpos -= shift_bytes;
1605 }
1606
1607 size = name_curpos - (char *)data;
1608 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001609 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001610}
1611
Roland McGrath4206d3a2008-01-30 13:31:45 +01001612#ifdef CORE_DUMP_USE_REGSET
1613#include <linux/regset.h>
1614
1615struct elf_thread_core_info {
1616 struct elf_thread_core_info *next;
1617 struct task_struct *task;
1618 struct elf_prstatus prstatus;
1619 struct memelfnote notes[0];
1620};
1621
1622struct elf_note_info {
1623 struct elf_thread_core_info *thread;
1624 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001625 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001626 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001627 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001628 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001629 size_t size;
1630 int thread_notes;
1631};
1632
Roland McGrathd31472b2008-03-04 14:28:30 -08001633/*
1634 * When a regset has a writeback hook, we call it on each thread before
1635 * dumping user memory. On register window machines, this makes sure the
1636 * user memory backing the register data is up to date before we read it.
1637 */
1638static void do_thread_regset_writeback(struct task_struct *task,
1639 const struct user_regset *regset)
1640{
1641 if (regset->writeback)
1642 regset->writeback(task, regset, 1);
1643}
1644
H. J. Lu0953f65d2012-02-14 13:34:52 -08001645#ifndef PRSTATUS_SIZE
Dmitry Safonov90954e72016-09-05 16:33:06 +03001646#define PRSTATUS_SIZE(S, R) sizeof(S)
H. J. Lu0953f65d2012-02-14 13:34:52 -08001647#endif
1648
1649#ifndef SET_PR_FPVALID
Dmitry Safonov90954e72016-09-05 16:33:06 +03001650#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
H. J. Lu0953f65d2012-02-14 13:34:52 -08001651#endif
1652
Roland McGrath4206d3a2008-01-30 13:31:45 +01001653static int fill_thread_core_info(struct elf_thread_core_info *t,
1654 const struct user_regset_view *view,
1655 long signr, size_t *total)
1656{
1657 unsigned int i;
Dmitry Safonov90954e72016-09-05 16:33:06 +03001658 unsigned int regset_size = view->regsets[0].n * view->regsets[0].size;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001659
1660 /*
1661 * NT_PRSTATUS is the one special case, because the regset data
1662 * goes into the pr_reg field inside the note contents, rather
1663 * than being the whole note contents. We fill the reset in here.
1664 * We assume that regset 0 is NT_PRSTATUS.
1665 */
1666 fill_prstatus(&t->prstatus, t->task, signr);
Dmitry Safonov90954e72016-09-05 16:33:06 +03001667 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size,
1668 &t->prstatus.pr_reg, NULL);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001669
1670 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
Dmitry Safonov90954e72016-09-05 16:33:06 +03001671 PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001672 *total += notesize(&t->notes[0]);
1673
Roland McGrathd31472b2008-03-04 14:28:30 -08001674 do_thread_regset_writeback(t->task, &view->regsets[0]);
1675
Roland McGrath4206d3a2008-01-30 13:31:45 +01001676 /*
1677 * Each other regset might generate a note too. For each regset
1678 * that has no core_note_type or is inactive, we leave t->notes[i]
1679 * all zero and we'll know to skip writing it later.
1680 */
1681 for (i = 1; i < view->n; ++i) {
1682 const struct user_regset *regset = &view->regsets[i];
Roland McGrathd31472b2008-03-04 14:28:30 -08001683 do_thread_regset_writeback(t->task, regset);
H. Peter Anvinc8e25252012-03-02 10:43:48 -08001684 if (regset->core_note_type && regset->get &&
Roland McGrath4206d3a2008-01-30 13:31:45 +01001685 (!regset->active || regset->active(t->task, regset))) {
1686 int ret;
1687 size_t size = regset->n * regset->size;
1688 void *data = kmalloc(size, GFP_KERNEL);
1689 if (unlikely(!data))
1690 return 0;
1691 ret = regset->get(t->task, regset,
1692 0, size, data, NULL);
1693 if (unlikely(ret))
1694 kfree(data);
1695 else {
1696 if (regset->core_note_type != NT_PRFPREG)
1697 fill_note(&t->notes[i], "LINUX",
1698 regset->core_note_type,
1699 size, data);
1700 else {
Dmitry Safonov90954e72016-09-05 16:33:06 +03001701 SET_PR_FPVALID(&t->prstatus,
1702 1, regset_size);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001703 fill_note(&t->notes[i], "CORE",
1704 NT_PRFPREG, size, data);
1705 }
1706 *total += notesize(&t->notes[i]);
1707 }
1708 }
1709 }
1710
1711 return 1;
1712}
1713
1714static int fill_note_info(struct elfhdr *elf, int phdrs,
1715 struct elf_note_info *info,
Al Viroec579412013-10-13 17:57:29 -04001716 const siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001717{
1718 struct task_struct *dump_task = current;
1719 const struct user_regset_view *view = task_user_regset_view(dump_task);
1720 struct elf_thread_core_info *t;
1721 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001722 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001723 unsigned int i;
1724
1725 info->size = 0;
1726 info->thread = NULL;
1727
1728 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001729 if (psinfo == NULL) {
1730 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001731 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001732 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001733
Amerigo Wange2dbe122009-07-01 01:06:26 -04001734 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1735
Roland McGrath4206d3a2008-01-30 13:31:45 +01001736 /*
1737 * Figure out how many notes we're going to need for each thread.
1738 */
1739 info->thread_notes = 0;
1740 for (i = 0; i < view->n; ++i)
1741 if (view->regsets[i].core_note_type != 0)
1742 ++info->thread_notes;
1743
1744 /*
1745 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1746 * since it is our one special case.
1747 */
1748 if (unlikely(info->thread_notes == 0) ||
1749 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1750 WARN_ON(1);
1751 return 0;
1752 }
1753
1754 /*
1755 * Initialize the ELF file header.
1756 */
1757 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001758 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001759
1760 /*
1761 * Allocate a structure for each thread.
1762 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001763 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1764 t = kzalloc(offsetof(struct elf_thread_core_info,
1765 notes[info->thread_notes]),
1766 GFP_KERNEL);
1767 if (unlikely(!t))
1768 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001769
Oleg Nesterov83914442008-07-25 01:47:45 -07001770 t->task = ct->task;
1771 if (ct->task == dump_task || !info->thread) {
1772 t->next = info->thread;
1773 info->thread = t;
1774 } else {
1775 /*
1776 * Make sure to keep the original task at
1777 * the head of the list.
1778 */
1779 t->next = info->thread->next;
1780 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001781 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001782 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001783
1784 /*
1785 * Now fill in each thread's information.
1786 */
1787 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001788 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001789 return 0;
1790
1791 /*
1792 * Fill in the two process-wide notes.
1793 */
1794 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1795 info->size += notesize(&info->psinfo);
1796
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001797 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1798 info->size += notesize(&info->signote);
1799
Roland McGrath4206d3a2008-01-30 13:31:45 +01001800 fill_auxv_note(&info->auxv, current->mm);
1801 info->size += notesize(&info->auxv);
1802
Dan Aloni72023652013-09-30 13:45:02 -07001803 if (fill_files_note(&info->files) == 0)
1804 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001805
Roland McGrath4206d3a2008-01-30 13:31:45 +01001806 return 1;
1807}
1808
1809static size_t get_note_info_size(struct elf_note_info *info)
1810{
1811 return info->size;
1812}
1813
1814/*
1815 * Write all the notes for each thread. When writing the first thread, the
1816 * process-wide notes are interleaved after the first thread-specific note.
1817 */
1818static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04001819 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001820{
Fabian Frederickb219e252014-06-04 16:12:14 -07001821 bool first = true;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001822 struct elf_thread_core_info *t = info->thread;
1823
1824 do {
1825 int i;
1826
Al Viroecc8c772013-10-05 15:32:35 -04001827 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001828 return 0;
1829
Al Viroecc8c772013-10-05 15:32:35 -04001830 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001831 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001832 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001833 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001834 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001835 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07001836 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04001837 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001838 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001839
1840 for (i = 1; i < info->thread_notes; ++i)
1841 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04001842 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001843 return 0;
1844
Fabian Frederickb219e252014-06-04 16:12:14 -07001845 first = false;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001846 t = t->next;
1847 } while (t);
1848
1849 return 1;
1850}
1851
1852static void free_note_info(struct elf_note_info *info)
1853{
1854 struct elf_thread_core_info *threads = info->thread;
1855 while (threads) {
1856 unsigned int i;
1857 struct elf_thread_core_info *t = threads;
1858 threads = t->next;
1859 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1860 for (i = 1; i < info->thread_notes; ++i)
1861 kfree(t->notes[i].data);
1862 kfree(t);
1863 }
1864 kfree(info->psinfo.data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001865 vfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001866}
1867
1868#else
1869
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870/* Here is the structure in which status of each thread is captured. */
1871struct elf_thread_status
1872{
1873 struct list_head list;
1874 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1875 elf_fpregset_t fpu; /* NT_PRFPREG */
1876 struct task_struct *thread;
1877#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07001878 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879#endif
1880 struct memelfnote notes[3];
1881 int num_notes;
1882};
1883
1884/*
1885 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001886 * we need to keep a linked list of every threads pr_status and then create
1887 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 */
1889static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1890{
1891 int sz = 0;
1892 struct task_struct *p = t->thread;
1893 t->num_notes = 0;
1894
1895 fill_prstatus(&t->prstatus, p, signr);
1896 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1897
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001898 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1899 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 t->num_notes++;
1901 sz += notesize(&t->notes[0]);
1902
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001903 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1904 &t->fpu))) {
1905 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1906 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 t->num_notes++;
1908 sz += notesize(&t->notes[1]);
1909 }
1910
1911#ifdef ELF_CORE_COPY_XFPREGS
1912 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07001913 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1914 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 t->num_notes++;
1916 sz += notesize(&t->notes[2]);
1917 }
1918#endif
1919 return sz;
1920}
1921
Roland McGrath3aba4812008-01-30 13:31:44 +01001922struct elf_note_info {
1923 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07001924 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01001925 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1926 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1927 struct list_head thread_list;
1928 elf_fpregset_t *fpu;
1929#ifdef ELF_CORE_COPY_XFPREGS
1930 elf_fpxregset_t *xfpu;
1931#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001932 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01001933 int thread_status_size;
1934 int numnote;
1935};
1936
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001937static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01001938{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001939 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01001940 INIT_LIST_HEAD(&info->thread_list);
1941
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001942 /* Allocate space for ELF notes */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001943 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01001944 if (!info->notes)
1945 return 0;
1946 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1947 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001948 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001949 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1950 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001951 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001952 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1953 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001954 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001955#ifdef ELF_CORE_COPY_XFPREGS
1956 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1957 if (!info->xfpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001958 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001959#endif
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001960 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001961}
Roland McGrath3aba4812008-01-30 13:31:44 +01001962
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001963static int fill_note_info(struct elfhdr *elf, int phdrs,
1964 struct elf_note_info *info,
Al Viroec579412013-10-13 17:57:29 -04001965 const siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001966{
1967 struct list_head *t;
Al Viroafabada2013-10-14 07:39:56 -04001968 struct core_thread *ct;
1969 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001970
1971 if (!elf_note_info_init(info))
1972 return 0;
1973
Al Viroafabada2013-10-14 07:39:56 -04001974 for (ct = current->mm->core_state->dumper.next;
1975 ct; ct = ct->next) {
1976 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
1977 if (!ets)
1978 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001979
Al Viroafabada2013-10-14 07:39:56 -04001980 ets->thread = ct->task;
1981 list_add(&ets->list, &info->thread_list);
1982 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001983
Al Viroafabada2013-10-14 07:39:56 -04001984 list_for_each(t, &info->thread_list) {
1985 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07001986
Al Viroafabada2013-10-14 07:39:56 -04001987 ets = list_entry(t, struct elf_thread_status, list);
1988 sz = elf_dump_thread_status(siginfo->si_signo, ets);
1989 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01001990 }
1991 /* now collect the dump for the current */
1992 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001993 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01001994 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
1995
1996 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001997 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01001998
1999 /*
2000 * Set up the notes in similar form to SVR4 core dumps made
2001 * with info from their /proc.
2002 */
2003
2004 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2005 sizeof(*info->prstatus), info->prstatus);
2006 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2007 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2008 sizeof(*info->psinfo), info->psinfo);
2009
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002010 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2011 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07002012 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01002013
Dan Aloni72023652013-09-30 13:45:02 -07002014 if (fill_files_note(info->notes + info->numnote) == 0) {
2015 info->notes_files = info->notes + info->numnote;
2016 info->numnote++;
2017 }
Roland McGrath3aba4812008-01-30 13:31:44 +01002018
2019 /* Try to dump the FPU. */
2020 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2021 info->fpu);
2022 if (info->prstatus->pr_fpvalid)
2023 fill_note(info->notes + info->numnote++,
2024 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2025#ifdef ELF_CORE_COPY_XFPREGS
2026 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2027 fill_note(info->notes + info->numnote++,
2028 "LINUX", ELF_CORE_XFPREG_TYPE,
2029 sizeof(*info->xfpu), info->xfpu);
2030#endif
2031
2032 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01002033}
2034
2035static size_t get_note_info_size(struct elf_note_info *info)
2036{
2037 int sz = 0;
2038 int i;
2039
2040 for (i = 0; i < info->numnote; i++)
2041 sz += notesize(info->notes + i);
2042
2043 sz += info->thread_status_size;
2044
2045 return sz;
2046}
2047
2048static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002049 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01002050{
2051 int i;
2052 struct list_head *t;
2053
2054 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002055 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002056 return 0;
2057
2058 /* write out the thread status notes section */
2059 list_for_each(t, &info->thread_list) {
2060 struct elf_thread_status *tmp =
2061 list_entry(t, struct elf_thread_status, list);
2062
2063 for (i = 0; i < tmp->num_notes; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002064 if (!writenote(&tmp->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002065 return 0;
2066 }
2067
2068 return 1;
2069}
2070
2071static void free_note_info(struct elf_note_info *info)
2072{
2073 while (!list_empty(&info->thread_list)) {
2074 struct list_head *tmp = info->thread_list.next;
2075 list_del(tmp);
2076 kfree(list_entry(tmp, struct elf_thread_status, list));
2077 }
2078
Dan Aloni72023652013-09-30 13:45:02 -07002079 /* Free data possibly allocated by fill_files_note(): */
2080 if (info->notes_files)
2081 vfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002082
Roland McGrath3aba4812008-01-30 13:31:44 +01002083 kfree(info->prstatus);
2084 kfree(info->psinfo);
2085 kfree(info->notes);
2086 kfree(info->fpu);
2087#ifdef ELF_CORE_COPY_XFPREGS
2088 kfree(info->xfpu);
2089#endif
2090}
2091
Roland McGrath4206d3a2008-01-30 13:31:45 +01002092#endif
2093
Roland McGrathf47aef52007-01-26 00:56:49 -08002094static struct vm_area_struct *first_vma(struct task_struct *tsk,
2095 struct vm_area_struct *gate_vma)
2096{
2097 struct vm_area_struct *ret = tsk->mm->mmap;
2098
2099 if (ret)
2100 return ret;
2101 return gate_vma;
2102}
2103/*
2104 * Helper function for iterating across a vma list. It ensures that the caller
2105 * will visit `gate_vma' prior to terminating the search.
2106 */
2107static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2108 struct vm_area_struct *gate_vma)
2109{
2110 struct vm_area_struct *ret;
2111
2112 ret = this_vma->vm_next;
2113 if (ret)
2114 return ret;
2115 if (this_vma == gate_vma)
2116 return NULL;
2117 return gate_vma;
2118}
2119
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002120static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2121 elf_addr_t e_shoff, int segs)
2122{
2123 elf->e_shoff = e_shoff;
2124 elf->e_shentsize = sizeof(*shdr4extnum);
2125 elf->e_shnum = 1;
2126 elf->e_shstrndx = SHN_UNDEF;
2127
2128 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2129
2130 shdr4extnum->sh_type = SHT_NULL;
2131 shdr4extnum->sh_size = elf->e_shnum;
2132 shdr4extnum->sh_link = elf->e_shstrndx;
2133 shdr4extnum->sh_info = segs;
2134}
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136/*
2137 * Actual dumper
2138 *
2139 * This is a two-pass process; first we find the offsets of the bits,
2140 * and then they are actually written out. If we run out of core limit
2141 * we just truncate.
2142 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002143static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 int has_dumped = 0;
2146 mm_segment_t fs;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002147 int segs, i;
2148 size_t vma_data_size = 0;
Roland McGrathf47aef52007-01-26 00:56:49 -08002149 struct vm_area_struct *vma, *gate_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 struct elfhdr *elf = NULL;
Al Virocdc3d562013-10-05 22:24:29 -04002151 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002152 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002153 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002154 struct elf_shdr *shdr4extnum = NULL;
2155 Elf_Half e_phnum;
2156 elf_addr_t e_shoff;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002157 elf_addr_t *vma_filesz = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
2159 /*
2160 * We no longer stop all VM operations.
2161 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002162 * This is because those proceses that could possibly change map_count
2163 * or the mmap / vma pages are now blocked in do_exit on current
2164 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 *
2166 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002167 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 * exists while dumping the mm->vm_next areas to the core file.
2169 */
2170
2171 /* alloc memory for large data structures: too large to be on stack */
2172 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2173 if (!elf)
WANG Cong5f719552008-05-06 12:45:35 +08002174 goto out;
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002175 /*
2176 * The number of segs are recored into ELF header as 16bit value.
2177 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2178 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002180 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Stephen Wilson31db58b2011-03-13 15:49:15 -04002182 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002183 if (gate_vma != NULL)
2184 segs++;
2185
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002186 /* for notes section */
2187 segs++;
2188
2189 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2190 * this, kernel supports extended numbering. Have a look at
2191 * include/linux/elf.h for further information. */
2192 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2193
Roland McGrath3aba4812008-01-30 13:31:44 +01002194 /*
2195 * Collect all the non-memory information about the process for the
2196 * notes. This also sets up the file header.
2197 */
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002198 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Roland McGrath3aba4812008-01-30 13:31:44 +01002199 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
2201 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002202
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 fs = get_fs();
2204 set_fs(KERNEL_DS);
2205
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 offset += sizeof(*elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002207 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
2209 /* Write notes phdr entry */
2210 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002211 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
Michael Ellermane5501492007-09-19 14:38:12 +10002213 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002214
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002215 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2216 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002217 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002218
2219 fill_elf_note_phdr(phdr4note, sz, offset);
2220 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 }
2222
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2224
Jason Baron30f74aa2016-12-12 16:46:40 -08002225 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
2226 goto end_coredump;
2227 vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz));
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002228 if (!vma_filesz)
2229 goto end_coredump;
2230
2231 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2232 vma = next_vma(vma, gate_vma)) {
2233 unsigned long dump_size;
2234
2235 dump_size = vma_dump_size(vma, cprm->mm_flags);
2236 vma_filesz[i++] = dump_size;
2237 vma_data_size += dump_size;
2238 }
2239
2240 offset += vma_data_size;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002241 offset += elf_core_extra_data_size();
2242 e_shoff = offset;
2243
2244 if (e_phnum == PN_XNUM) {
2245 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2246 if (!shdr4extnum)
2247 goto end_coredump;
2248 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2249 }
2250
2251 offset = dataoff;
2252
Al Viroecc8c772013-10-05 15:32:35 -04002253 if (!dump_emit(cprm, elf, sizeof(*elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002254 goto end_coredump;
2255
Al Viroecc8c772013-10-05 15:32:35 -04002256 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002257 goto end_coredump;
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 /* Write program headers for segments dump */
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002260 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002261 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
2264 phdr.p_type = PT_LOAD;
2265 phdr.p_offset = offset;
2266 phdr.p_vaddr = vma->vm_start;
2267 phdr.p_paddr = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002268 phdr.p_filesz = vma_filesz[i++];
Roland McGrath82df3972007-10-16 23:27:02 -07002269 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 offset += phdr.p_filesz;
2271 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002272 if (vma->vm_flags & VM_WRITE)
2273 phdr.p_flags |= PF_W;
2274 if (vma->vm_flags & VM_EXEC)
2275 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 phdr.p_align = ELF_EXEC_PAGESIZE;
2277
Al Viroecc8c772013-10-05 15:32:35 -04002278 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002279 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 }
2281
Al Viro506f21c2013-10-05 17:22:57 -04002282 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002283 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
2285 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002286 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002287 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Al Virocdc3d562013-10-05 22:24:29 -04002289 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002290 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002291
Andi Kleend025c9d2006-09-30 23:29:28 -07002292 /* Align to page */
Mateusz Guzik1607f092016-06-05 23:14:14 +02002293 if (!dump_skip(cprm, dataoff - cprm->pos))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002294 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002296 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002297 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07002299 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002301 end = vma->vm_start + vma_filesz[i++];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Roland McGrath82df3972007-10-16 23:27:02 -07002303 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002304 struct page *page;
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002305 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002307 page = get_dump_page(addr);
2308 if (page) {
2309 void *kaddr = kmap(page);
Al Viro13046ec2013-10-05 18:08:47 -04002310 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002311 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002312 put_page(page);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002313 } else
Al Viro9b56d542013-10-08 09:26:08 -04002314 stop = !dump_skip(cprm, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002315 if (stop)
2316 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 }
2318 }
Dave Kleikamp4d22c752017-01-11 13:25:00 -06002319 dump_truncate(cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
Al Viroaa3e7ea2013-10-05 17:50:15 -04002321 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002322 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002324 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002325 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002326 goto end_coredump;
2327 }
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329end_coredump:
2330 set_fs(fs);
2331
2332cleanup:
Roland McGrath3aba4812008-01-30 13:31:44 +01002333 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002334 kfree(shdr4extnum);
Jason Baron30f74aa2016-12-12 16:46:40 -08002335 vfree(vma_filesz);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002336 kfree(phdr4note);
WANG Cong5f719552008-05-06 12:45:35 +08002337 kfree(elf);
2338out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340}
2341
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002342#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
2344static int __init init_elf_binfmt(void)
2345{
Al Viro8fc3dc52012-03-17 03:05:16 -04002346 register_binfmt(&elf_format);
2347 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348}
2349
2350static void __exit exit_elf_binfmt(void)
2351{
2352 /* Remove the COFF and ELF loaders. */
2353 unregister_binfmt(&elf_format);
2354}
2355
2356core_initcall(init_elf_binfmt);
2357module_exit(exit_elf_binfmt);
2358MODULE_LICENSE("GPL");