blob: 443a6f537d569f359c944bb689ce29fd38f06942 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
16#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/errno.h>
18#include <linux/signal.h>
19#include <linux/binfmts.h>
20#include <linux/string.h>
21#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/personality.h>
24#include <linux/elfcore.h>
25#include <linux/init.h>
26#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/compiler.h>
28#include <linux/highmem.h>
29#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070030#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070033#include <linux/elf.h>
Kees Cookd1fd8362015-04-14 15:48:07 -070034#include <linux/elf-randomize.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070035#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080036#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010037#include <linux/sched.h>
Ross Zwisler50378352015-10-05 16:33:36 -060038#include <linux/dax.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080039#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/param.h>
41#include <asm/page.h>
42
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070043#ifndef user_long_t
44#define user_long_t long
45#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070046#ifndef user_siginfo_t
47#define user_siginfo_t siginfo_t
48#endif
49
Al Viro71613c32012-10-20 22:00:48 -040050static int load_elf_binary(struct linux_binprm *bprm);
Andrew Mortonbb1ad822008-01-30 13:31:07 +010051static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
52 int, int, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Josh Triplett69369a72014-04-03 14:48:27 -070054#ifdef CONFIG_USELIB
55static int load_elf_library(struct file *);
56#else
57#define load_elf_library NULL
58#endif
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * If we don't support core dumping, then supply a NULL so we
62 * don't even try.
63 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080064#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080065static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#else
67#define elf_core_dump NULL
68#endif
69
70#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070071#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070073#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#endif
75
76#ifndef ELF_CORE_EFLAGS
77#define ELF_CORE_EFLAGS 0
78#endif
79
80#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
81#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
82#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
83
84static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -080085 .module = THIS_MODULE,
86 .load_binary = load_elf_binary,
87 .load_shlib = load_elf_library,
88 .core_dump = elf_core_dump,
89 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070090};
91
Andrew Mortond4e3cc32007-07-21 04:37:32 -070092#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Denys Vlasenko16e72e92017-02-22 15:45:16 -080094static int set_brk(unsigned long start, unsigned long end, int prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 start = ELF_PAGEALIGN(start);
97 end = ELF_PAGEALIGN(end);
98 if (end > start) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -080099 /*
100 * Map the last of the bss segment.
101 * If the header is requesting these pages to be
102 * executable, honour that (ppc32 needs this).
103 */
104 int error = vm_brk_flags(start, end - start,
105 prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700106 if (error)
107 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 current->mm->start_brk = current->mm->brk = end;
110 return 0;
111}
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/* We need to explicitly zero any fractional pages
114 after the data section (i.e. bss). This would
115 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700116 be in memory
117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static int padzero(unsigned long elf_bss)
119{
120 unsigned long nbyte;
121
122 nbyte = ELF_PAGEOFFSET(elf_bss);
123 if (nbyte) {
124 nbyte = ELF_MIN_ALIGN - nbyte;
125 if (clear_user((void __user *) elf_bss, nbyte))
126 return -EFAULT;
127 }
128 return 0;
129}
130
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200131/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132#ifdef CONFIG_STACK_GROWSUP
133#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
134#define STACK_ROUND(sp, items) \
135 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700136#define STACK_ALLOC(sp, len) ({ \
137 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
138 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#else
140#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
141#define STACK_ROUND(sp, items) \
142 (((unsigned long) (sp - items)) &~ 15UL)
143#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
144#endif
145
Nathan Lynch483fad12008-07-22 04:48:46 +1000146#ifndef ELF_BASE_PLATFORM
147/*
148 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
149 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
150 * will be copied to the user stack in the same manner as AT_PLATFORM.
151 */
152#define ELF_BASE_PLATFORM NULL
153#endif
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155static int
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700156create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
Andi Kleend20894a2008-02-08 04:21:54 -0800157 unsigned long load_addr, unsigned long interp_load_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 unsigned long p = bprm->p;
160 int argc = bprm->argc;
161 int envc = bprm->envc;
162 elf_addr_t __user *argv;
163 elf_addr_t __user *envp;
164 elf_addr_t __user *sp;
165 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000166 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800167 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000169 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800170 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 int items;
172 elf_addr_t *elf_info;
173 int ei_index = 0;
David Howells86a264a2008-11-14 10:39:18 +1100174 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700175 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700178 * In some cases (e.g. Hyper-Threading), we want to avoid L1
179 * evictions by the processes running on the same package. One
180 * thing we can do is to shuffle the initial stack for them.
181 */
182
183 p = arch_align_stack(p);
184
185 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * If this architecture has a platform capability string, copy it
187 * to userspace. In some cases (Sparc), this info is impossible
188 * for userspace to get any other way, in others (i386) it is
189 * merely difficult.
190 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 u_platform = NULL;
192 if (k_platform) {
193 size_t len = strlen(k_platform) + 1;
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
196 if (__copy_to_user(u_platform, k_platform, len))
197 return -EFAULT;
198 }
199
Nathan Lynch483fad12008-07-22 04:48:46 +1000200 /*
201 * If this architecture has a "base" platform capability
202 * string, copy it to userspace.
203 */
204 u_base_platform = NULL;
205 if (k_base_platform) {
206 size_t len = strlen(k_base_platform) + 1;
207
208 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
209 if (__copy_to_user(u_base_platform, k_base_platform, len))
210 return -EFAULT;
211 }
212
Kees Cookf06295b2009-01-07 18:08:52 -0800213 /*
214 * Generate 16 random bytes for userspace PRNG seeding.
215 */
216 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
217 u_rand_bytes = (elf_addr_t __user *)
218 STACK_ALLOC(p, sizeof(k_rand_bytes));
219 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
220 return -EFAULT;
221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 /* Create the ELF interpreter info */
Jesper Juhl785d5572006-06-23 02:05:35 -0700223 elf_info = (elf_addr_t *)current->mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700224 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700226 do { \
Jesper Juhl785d5572006-06-23 02:05:35 -0700227 elf_info[ei_index++] = id; \
228 elf_info[ei_index++] = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700229 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231#ifdef ARCH_DLINFO
232 /*
233 * ARCH_DLINFO must come first so PPC can do its special alignment of
234 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700235 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
236 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 */
238 ARCH_DLINFO;
239#endif
240 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
241 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
242 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
243 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700244 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
246 NEW_AUX_ENT(AT_BASE, interp_load_addr);
247 NEW_AUX_ENT(AT_FLAGS, 0);
248 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800249 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
250 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
251 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
252 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Jesper Juhl785d5572006-06-23 02:05:35 -0700253 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
Kees Cookf06295b2009-01-07 18:08:52 -0800254 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000255#ifdef ELF_HWCAP2
256 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
257#endif
John Reiser65191082008-07-21 14:21:32 -0700258 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700260 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700261 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000263 if (k_base_platform) {
264 NEW_AUX_ENT(AT_BASE_PLATFORM,
265 (elf_addr_t)(unsigned long)u_base_platform);
266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700268 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270#undef NEW_AUX_ENT
271 /* AT_NULL is zero; clear the rest too */
272 memset(&elf_info[ei_index], 0,
273 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
274
275 /* And advance past the AT_NULL entry. */
276 ei_index += 2;
277
278 sp = STACK_ADD(p, ei_index);
279
Andi Kleend20894a2008-02-08 04:21:54 -0800280 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 bprm->p = STACK_ROUND(sp, items);
282
283 /* Point sp at the lowest address on the stack */
284#ifdef CONFIG_STACK_GROWSUP
285 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700286 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287#else
288 sp = (elf_addr_t __user *)bprm->p;
289#endif
290
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700291
292 /*
293 * Grow the stack manually; some architectures have a limit on how
294 * far ahead a user-space access may be in order to grow the stack.
295 */
296 vma = find_extend_vma(current->mm, bprm->p);
297 if (!vma)
298 return -EFAULT;
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
301 if (__put_user(argc, sp++))
302 return -EFAULT;
Andi Kleend20894a2008-02-08 04:21:54 -0800303 argv = sp;
304 envp = argv + argc + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 /* Populate argv and envp */
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -0700307 p = current->mm->arg_end = current->mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 while (argc-- > 0) {
309 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800310 if (__put_user((elf_addr_t)p, argv++))
311 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700312 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
313 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800314 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 p += len;
316 }
317 if (__put_user(0, argv))
318 return -EFAULT;
319 current->mm->arg_end = current->mm->env_start = p;
320 while (envc-- > 0) {
321 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800322 if (__put_user((elf_addr_t)p, envp++))
323 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700324 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
325 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800326 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 p += len;
328 }
329 if (__put_user(0, envp))
330 return -EFAULT;
331 current->mm->env_end = p;
332
333 /* Put the elf_info on the stack in the right place. */
334 sp = (elf_addr_t __user *)envp + 1;
335 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
336 return -EFAULT;
337 return 0;
338}
339
James Hoganc07380b2011-05-09 10:58:40 +0100340#ifndef elf_map
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342static unsigned long elf_map(struct file *filep, unsigned long addr,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100343 struct elf_phdr *eppnt, int prot, int type,
344 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100347 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
348 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
349 addr = ELF_PAGESTART(addr);
350 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700351
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700352 /* mmap() will return -EINVAL if given a zero size, but a
353 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100354 if (!size)
355 return addr;
356
Jiri Kosinacc503c12008-01-30 13:31:07 +0100357 /*
358 * total_size is the size of the ELF (interpreter) image.
359 * The _first_ mmap needs to know the full size, otherwise
360 * randomization might put this image into an overlapping
361 * position with the ELF binary image. (since size < total_size)
362 * So we first map the 'big' image - and unmap the remainder at
363 * the end. (which unmap is needed for ELF images with holes.)
364 */
365 if (total_size) {
366 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400367 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100368 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400369 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100370 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400371 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 return(map_addr);
374}
375
James Hoganc07380b2011-05-09 10:58:40 +0100376#endif /* !elf_map */
377
Jiri Kosinacc503c12008-01-30 13:31:07 +0100378static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
379{
380 int i, first_idx = -1, last_idx = -1;
381
382 for (i = 0; i < nr; i++) {
383 if (cmds[i].p_type == PT_LOAD) {
384 last_idx = i;
385 if (first_idx == -1)
386 first_idx = i;
387 }
388 }
389 if (first_idx == -1)
390 return 0;
391
392 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
393 ELF_PAGESTART(cmds[first_idx].p_vaddr);
394}
395
Paul Burton6a8d3892014-09-11 08:30:14 +0100396/**
397 * load_elf_phdrs() - load ELF program headers
398 * @elf_ex: ELF header of the binary whose program headers should be loaded
399 * @elf_file: the opened ELF binary file
400 *
401 * Loads ELF program headers from the binary file elf_file, which has the ELF
402 * header pointed to by elf_ex, into a newly allocated array. The caller is
403 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
404 */
405static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
406 struct file *elf_file)
407{
408 struct elf_phdr *elf_phdata = NULL;
409 int retval, size, err = -1;
410
411 /*
412 * If the size of this structure has changed, then punt, since
413 * we will be doing the wrong thing.
414 */
415 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
416 goto out;
417
418 /* Sanity check the number of program headers... */
419 if (elf_ex->e_phnum < 1 ||
420 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
421 goto out;
422
423 /* ...and their total size. */
424 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
425 if (size > ELF_MIN_ALIGN)
426 goto out;
427
428 elf_phdata = kmalloc(size, GFP_KERNEL);
429 if (!elf_phdata)
430 goto out;
431
432 /* Read in the program headers */
433 retval = kernel_read(elf_file, elf_ex->e_phoff,
434 (char *)elf_phdata, size);
435 if (retval != size) {
436 err = (retval < 0) ? retval : -EIO;
437 goto out;
438 }
439
440 /* Success! */
441 err = 0;
442out:
443 if (err) {
444 kfree(elf_phdata);
445 elf_phdata = NULL;
446 }
447 return elf_phdata;
448}
Jiri Kosinacc503c12008-01-30 13:31:07 +0100449
Paul Burton774c1052014-09-11 08:30:16 +0100450#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
451
452/**
453 * struct arch_elf_state - arch-specific ELF loading state
454 *
455 * This structure is used to preserve architecture specific data during
456 * the loading of an ELF file, throughout the checking of architecture
457 * specific ELF headers & through to the point where the ELF load is
458 * known to be proceeding (ie. SET_PERSONALITY).
459 *
460 * This implementation is a dummy for architectures which require no
461 * specific state.
462 */
463struct arch_elf_state {
464};
465
466#define INIT_ARCH_ELF_STATE {}
467
468/**
469 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
470 * @ehdr: The main ELF header
471 * @phdr: The program header to check
472 * @elf: The open ELF file
473 * @is_interp: True if the phdr is from the interpreter of the ELF being
474 * loaded, else false.
475 * @state: Architecture-specific state preserved throughout the process
476 * of loading the ELF.
477 *
478 * Inspects the program header phdr to validate its correctness and/or
479 * suitability for the system. Called once per ELF program header in the
480 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
481 * interpreter.
482 *
483 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
484 * with that return code.
485 */
486static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
487 struct elf_phdr *phdr,
488 struct file *elf, bool is_interp,
489 struct arch_elf_state *state)
490{
491 /* Dummy implementation, always proceed */
492 return 0;
493}
494
495/**
Maciej W. Rozycki54d157142015-10-26 15:47:57 +0000496 * arch_check_elf() - check an ELF executable
Paul Burton774c1052014-09-11 08:30:16 +0100497 * @ehdr: The main ELF header
498 * @has_interp: True if the ELF has an interpreter, else false.
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000499 * @interp_ehdr: The interpreter's ELF header
Paul Burton774c1052014-09-11 08:30:16 +0100500 * @state: Architecture-specific state preserved throughout the process
501 * of loading the ELF.
502 *
503 * Provides a final opportunity for architecture code to reject the loading
504 * of the ELF & cause an exec syscall to return an error. This is called after
505 * all program headers to be checked by arch_elf_pt_proc have been.
506 *
507 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
508 * with that return code.
509 */
510static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000511 struct elfhdr *interp_ehdr,
Paul Burton774c1052014-09-11 08:30:16 +0100512 struct arch_elf_state *state)
513{
514 /* Dummy implementation, always proceed */
515 return 0;
516}
517
518#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520/* This is much more generalized than the library routine read function,
521 so we keep this separate. Technically the library read function
522 is only provided so that we can read a.out libraries that have
523 an ELF header */
524
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700525static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100526 struct file *interpreter, unsigned long *interp_map_addr,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100527 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct elf_phdr *eppnt;
530 unsigned long load_addr = 0;
531 int load_addr_set = 0;
532 unsigned long last_bss = 0, elf_bss = 0;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800533 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100535 unsigned long total_size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100536 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
538 /* First of all, some simple consistency checks */
539 if (interp_elf_ex->e_type != ET_EXEC &&
540 interp_elf_ex->e_type != ET_DYN)
541 goto out;
542 if (!elf_check_arch(interp_elf_ex))
543 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400544 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 goto out;
546
Paul Burtona9d9ef12014-09-11 08:30:15 +0100547 total_size = total_mapping_size(interp_elf_phdata,
548 interp_elf_ex->e_phnum);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100549 if (!total_size) {
550 error = -EINVAL;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100551 goto out;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100552 }
553
Paul Burtona9d9ef12014-09-11 08:30:15 +0100554 eppnt = interp_elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700555 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
556 if (eppnt->p_type == PT_LOAD) {
557 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
558 int elf_prot = 0;
559 unsigned long vaddr = 0;
560 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700562 if (eppnt->p_flags & PF_R)
563 elf_prot = PROT_READ;
564 if (eppnt->p_flags & PF_W)
565 elf_prot |= PROT_WRITE;
566 if (eppnt->p_flags & PF_X)
567 elf_prot |= PROT_EXEC;
568 vaddr = eppnt->p_vaddr;
569 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
570 elf_type |= MAP_FIXED;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100571 else if (no_base && interp_elf_ex->e_type == ET_DYN)
572 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700574 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100575 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100576 total_size = 0;
577 if (!*interp_map_addr)
578 *interp_map_addr = map_addr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700579 error = map_addr;
580 if (BAD_ADDR(map_addr))
Paul Burtona9d9ef12014-09-11 08:30:15 +0100581 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700583 if (!load_addr_set &&
584 interp_elf_ex->e_type == ET_DYN) {
585 load_addr = map_addr - ELF_PAGESTART(vaddr);
586 load_addr_set = 1;
587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700589 /*
590 * Check to see if the section's size will overflow the
591 * allowed task size. Note that p_filesz must always be
592 * <= p_memsize so it's only necessary to check p_memsz.
593 */
594 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700595 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700596 eppnt->p_filesz > eppnt->p_memsz ||
597 eppnt->p_memsz > TASK_SIZE ||
598 TASK_SIZE - eppnt->p_memsz < k) {
599 error = -ENOMEM;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100600 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700603 /*
604 * Find the end of the file mapping for this phdr, and
605 * keep track of the largest address we see for this.
606 */
607 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
608 if (k > elf_bss)
609 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700611 /*
612 * Do the same thing for the memory mapping - between
613 * elf_bss and last_bss is the bss section.
614 */
Kees Cook0036d1f2016-08-02 14:04:51 -0700615 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800616 if (k > last_bss) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700617 last_bss = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800618 bss_prot = elf_prot;
619 }
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 }
622
Kees Cook0036d1f2016-08-02 14:04:51 -0700623 /*
624 * Now fill out the bss section: first pad the last page from
625 * the file up to the page boundary, and zero it from elf_bss
626 * up to the end of the page.
627 */
628 if (padzero(elf_bss)) {
629 error = -EFAULT;
630 goto out;
631 }
632 /*
633 * Next, align both the file and mem bss up to the page size,
634 * since this is where elf_bss was just zeroed up to, and where
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800635 * last_bss will end after the vm_brk_flags() below.
Kees Cook0036d1f2016-08-02 14:04:51 -0700636 */
637 elf_bss = ELF_PAGEALIGN(elf_bss);
638 last_bss = ELF_PAGEALIGN(last_bss);
639 /* Finally, if there is still more bss to allocate, do it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 if (last_bss > elf_bss) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800641 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
642 bss_prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700643 if (error)
Paul Burtona9d9ef12014-09-11 08:30:15 +0100644 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 }
646
Jiri Kosinacc503c12008-01-30 13:31:07 +0100647 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648out:
649 return error;
650}
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652/*
653 * These are the functions used to load ELF style executables and shared
654 * libraries. There is no binary dependent code anywhere else.
655 */
656
Andi Kleen913bd902006-03-25 16:29:09 +0100657#ifndef STACK_RND_MASK
James Bottomleyd1cabd62007-03-16 13:38:35 -0800658#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
Andi Kleen913bd902006-03-25 16:29:09 +0100659#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661static unsigned long randomize_stack_top(unsigned long stack_top)
662{
Hector Marco-Gisbert4e7c22d2015-02-14 09:33:50 -0800663 unsigned long random_variable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Andi Kleenc16b63e02006-09-26 10:52:28 +0200665 if ((current->flags & PF_RANDOMIZE) &&
666 !(current->personality & ADDR_NO_RANDOMIZE)) {
Daniel Cashman5ef11c32016-02-26 15:19:37 -0800667 random_variable = get_random_long();
Hector Marco-Gisbert4e7c22d2015-02-14 09:33:50 -0800668 random_variable &= STACK_RND_MASK;
Andi Kleen913bd902006-03-25 16:29:09 +0100669 random_variable <<= PAGE_SHIFT;
670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671#ifdef CONFIG_STACK_GROWSUP
Andi Kleen913bd902006-03-25 16:29:09 +0100672 return PAGE_ALIGN(stack_top) + random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673#else
Andi Kleen913bd902006-03-25 16:29:09 +0100674 return PAGE_ALIGN(stack_top) - random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675#endif
676}
677
Al Viro71613c32012-10-20 22:00:48 -0400678static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
680 struct file *interpreter = NULL; /* to shut gcc up */
681 unsigned long load_addr = 0, load_bias = 0;
682 int load_addr_set = 0;
683 char * elf_interpreter = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 unsigned long error;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100685 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 unsigned long elf_bss, elf_brk;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800687 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 int retval, i;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100689 unsigned long elf_entry;
690 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700692 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800693 int executable_stack = EXSTACK_DEFAULT;
Al Viro71613c32012-10-20 22:00:48 -0400694 struct pt_regs *regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 struct {
696 struct elfhdr elf_ex;
697 struct elfhdr interp_elf_ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 } *loc;
Paul Burton774c1052014-09-11 08:30:16 +0100699 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
702 if (!loc) {
703 retval = -ENOMEM;
704 goto out_ret;
705 }
706
707 /* Get the exec-header */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700708 loc->elf_ex = *((struct elfhdr *)bprm->buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710 retval = -ENOEXEC;
711 /* First of all, some simple consistency checks */
712 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
713 goto out;
714
715 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
716 goto out;
717 if (!elf_check_arch(&loc->elf_ex))
718 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400719 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 goto out;
721
Paul Burton6a8d3892014-09-11 08:30:14 +0100722 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 if (!elf_phdata)
724 goto out;
725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 elf_ppnt = elf_phdata;
727 elf_bss = 0;
728 elf_brk = 0;
729
730 start_code = ~0UL;
731 end_code = 0;
732 start_data = 0;
733 end_data = 0;
734
735 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
736 if (elf_ppnt->p_type == PT_INTERP) {
737 /* This is the program interpreter used for
738 * shared libraries - for now assume that this
739 * is an a.out format binary
740 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 retval = -ENOEXEC;
742 if (elf_ppnt->p_filesz > PATH_MAX ||
743 elf_ppnt->p_filesz < 2)
Al Viroe7b9b552009-03-29 16:31:16 -0400744 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746 retval = -ENOMEM;
Jesper Juhl792db3a2006-01-09 20:54:45 -0800747 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700748 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 if (!elf_interpreter)
Al Viroe7b9b552009-03-29 16:31:16 -0400750 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700753 elf_interpreter,
754 elf_ppnt->p_filesz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (retval != elf_ppnt->p_filesz) {
756 if (retval >= 0)
757 retval = -EIO;
758 goto out_free_interp;
759 }
760 /* make sure path is NULL terminated */
761 retval = -ENOEXEC;
762 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
763 goto out_free_interp;
764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 interpreter = open_exec(elf_interpreter);
766 retval = PTR_ERR(interpreter);
767 if (IS_ERR(interpreter))
768 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800769
770 /*
771 * If the binary is not readable then enforce
772 * mm->dumpable = 0 regardless of the interpreter's
773 * permissions.
774 */
Al Viro1b5d7832011-06-19 12:49:47 -0400775 would_dump(bprm, interpreter);
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800776
Maciej W. Rozyckib582ef5c2015-10-26 15:48:19 +0000777 /* Get the exec headers */
778 retval = kernel_read(interpreter, 0,
779 (void *)&loc->interp_elf_ex,
780 sizeof(loc->interp_elf_ex));
781 if (retval != sizeof(loc->interp_elf_ex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 if (retval >= 0)
783 retval = -EIO;
784 goto out_free_dentry;
785 }
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 break;
788 }
789 elf_ppnt++;
790 }
791
792 elf_ppnt = elf_phdata;
793 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100794 switch (elf_ppnt->p_type) {
795 case PT_GNU_STACK:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 if (elf_ppnt->p_flags & PF_X)
797 executable_stack = EXSTACK_ENABLE_X;
798 else
799 executable_stack = EXSTACK_DISABLE_X;
800 break;
Paul Burton774c1052014-09-11 08:30:16 +0100801
802 case PT_LOPROC ... PT_HIPROC:
803 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
804 bprm->file, false,
805 &arch_state);
806 if (retval)
807 goto out_free_dentry;
808 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
811 /* Some simple consistency checks for the interpreter */
812 if (elf_interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800814 /* Not an ELF interpreter */
815 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 /* Verify the interpreter has a valid arch */
Andi Kleend20894a2008-02-08 04:21:54 -0800818 if (!elf_check_arch(&loc->interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 goto out_free_dentry;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100820
821 /* Load the interpreter program headers */
822 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
823 interpreter);
824 if (!interp_elf_phdata)
825 goto out_free_dentry;
Paul Burton774c1052014-09-11 08:30:16 +0100826
827 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
828 elf_ppnt = interp_elf_phdata;
829 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
830 switch (elf_ppnt->p_type) {
831 case PT_LOPROC ... PT_HIPROC:
832 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
833 elf_ppnt, interpreter,
834 true, &arch_state);
835 if (retval)
836 goto out_free_dentry;
837 break;
838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 }
840
Paul Burton774c1052014-09-11 08:30:16 +0100841 /*
842 * Allow arch code to reject the ELF at this point, whilst it's
843 * still possible to return an error to the code that invoked
844 * the exec syscall.
845 */
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000846 retval = arch_check_elf(&loc->elf_ex,
847 !!interpreter, &loc->interp_elf_ex,
848 &arch_state);
Paul Burton774c1052014-09-11 08:30:16 +0100849 if (retval)
850 goto out_free_dentry;
851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 /* Flush all traces of the currently running executable */
853 retval = flush_old_exec(bprm);
854 if (retval)
855 goto out_free_dentry;
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
858 may depend on the personality. */
Paul Burton774c1052014-09-11 08:30:16 +0100859 SET_PERSONALITY2(loc->elf_ex, &arch_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
861 current->personality |= READ_IMPLIES_EXEC;
862
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700863 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -0800865
866 setup_new_exec(bprm);
Linus Torvalds9f834ec2016-08-22 16:41:46 -0700867 install_exec_creds(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869 /* Do this so that we can load the interpreter, if need be. We will
870 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
872 executable_stack);
Al Viro19d860a2014-05-04 20:11:36 -0400873 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 current->mm->start_stack = bprm->p;
877
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200878 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +0100879 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700880 for(i = 0, elf_ppnt = elf_phdata;
881 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 int elf_prot = 0, elf_flags;
883 unsigned long k, vaddr;
Michael Davidsona87938b2015-04-14 15:47:38 -0700884 unsigned long total_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886 if (elf_ppnt->p_type != PT_LOAD)
887 continue;
888
889 if (unlikely (elf_brk > elf_bss)) {
890 unsigned long nbyte;
891
892 /* There was a PT_LOAD segment with p_memsz > p_filesz
893 before this one. Map anonymous pages, if needed,
894 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -0800895 retval = set_brk(elf_bss + load_bias,
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800896 elf_brk + load_bias,
897 bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -0400898 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 nbyte = ELF_PAGEOFFSET(elf_bss);
901 if (nbyte) {
902 nbyte = ELF_MIN_ALIGN - nbyte;
903 if (nbyte > elf_brk - elf_bss)
904 nbyte = elf_brk - elf_bss;
905 if (clear_user((void __user *)elf_bss +
906 load_bias, nbyte)) {
907 /*
908 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700909 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 * we don't check the return value
911 */
912 }
913 }
914 }
915
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700916 if (elf_ppnt->p_flags & PF_R)
917 elf_prot |= PROT_READ;
918 if (elf_ppnt->p_flags & PF_W)
919 elf_prot |= PROT_WRITE;
920 if (elf_ppnt->p_flags & PF_X)
921 elf_prot |= PROT_EXEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700923 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
925 vaddr = elf_ppnt->p_vaddr;
926 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
927 elf_flags |= MAP_FIXED;
928 } else if (loc->elf_ex.e_type == ET_DYN) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700929 /* Try and get dynamic programs out of the way of the
930 * default mmap base, as well as whatever program they
931 * might try to exec. This is because the brk will
932 * follow the loader, and is not movable. */
Kees Cookd1fd8362015-04-14 15:48:07 -0700933 load_bias = ELF_ET_DYN_BASE - vaddr;
Jiri Kosinaa3defbe2011-11-02 13:37:41 -0700934 if (current->flags & PF_RANDOMIZE)
Kees Cookd1fd8362015-04-14 15:48:07 -0700935 load_bias += arch_mmap_rnd();
936 load_bias = ELF_PAGESTART(load_bias);
Michael Davidsona87938b2015-04-14 15:47:38 -0700937 total_size = total_mapping_size(elf_phdata,
938 loc->elf_ex.e_phnum);
939 if (!total_size) {
Andrew Morton2b1d3ae2015-05-28 15:44:24 -0700940 retval = -EINVAL;
Michael Davidsona87938b2015-04-14 15:47:38 -0700941 goto out_free_dentry;
942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700945 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidsona87938b2015-04-14 15:47:38 -0700946 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 if (BAD_ADDR(error)) {
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700948 retval = IS_ERR((void *)error) ?
949 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 goto out_free_dentry;
951 }
952
953 if (!load_addr_set) {
954 load_addr_set = 1;
955 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
956 if (loc->elf_ex.e_type == ET_DYN) {
957 load_bias += error -
958 ELF_PAGESTART(load_bias + vaddr);
959 load_addr += load_bias;
960 reloc_func_desc = load_bias;
961 }
962 }
963 k = elf_ppnt->p_vaddr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700964 if (k < start_code)
965 start_code = k;
966 if (start_data < k)
967 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969 /*
970 * Check to see if the section's size will overflow the
971 * allowed task size. Note that p_filesz must always be
972 * <= p_memsz so it is only necessary to check p_memsz.
973 */
Chuck Ebbertce510592006-07-03 00:24:14 -0700974 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 elf_ppnt->p_memsz > TASK_SIZE ||
976 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700977 /* set_brk can never work. Avoid overflows. */
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700978 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 goto out_free_dentry;
980 }
981
982 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
983
984 if (k > elf_bss)
985 elf_bss = k;
986 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
987 end_code = k;
988 if (end_data < k)
989 end_data = k;
990 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800991 if (k > elf_brk) {
992 bss_prot = elf_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 elf_brk = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
996
997 loc->elf_ex.e_entry += load_bias;
998 elf_bss += load_bias;
999 elf_brk += load_bias;
1000 start_code += load_bias;
1001 end_code += load_bias;
1002 start_data += load_bias;
1003 end_data += load_bias;
1004
1005 /* Calling set_brk effectively mmaps the pages that we need
1006 * for the bss and break sections. We must do this before
1007 * mapping in the interpreter, to make sure it doesn't wind
1008 * up getting placed where the bss needs to go.
1009 */
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001010 retval = set_brk(elf_bss, elf_brk, bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001011 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 goto out_free_dentry;
akpm@osdl.org6de50512005-10-11 08:29:08 -07001013 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 retval = -EFAULT; /* Nobody gets to see this, but.. */
1015 goto out_free_dentry;
1016 }
1017
1018 if (elf_interpreter) {
Alan Cox6eec4822012-10-04 17:13:42 -07001019 unsigned long interp_map_addr = 0;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001020
Andi Kleend20894a2008-02-08 04:21:54 -08001021 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1022 interpreter,
1023 &interp_map_addr,
Paul Burtona9d9ef12014-09-11 08:30:15 +01001024 load_bias, interp_elf_phdata);
Andi Kleend20894a2008-02-08 04:21:54 -08001025 if (!IS_ERR((void *)elf_entry)) {
1026 /*
1027 * load_elf_interp() returns relocation
1028 * adjustment
1029 */
1030 interp_load_addr = elf_entry;
1031 elf_entry += loc->interp_elf_ex.e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001032 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001034 retval = IS_ERR((void *)elf_entry) ?
1035 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 goto out_free_dentry;
1037 }
1038 reloc_func_desc = interp_load_addr;
1039
1040 allow_write_access(interpreter);
1041 fput(interpreter);
1042 kfree(elf_interpreter);
1043 } else {
1044 elf_entry = loc->elf_ex.e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001045 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001046 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001047 goto out_free_dentry;
1048 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 }
1050
Paul Burton774c1052014-09-11 08:30:16 +01001051 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 kfree(elf_phdata);
1053
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 set_binfmt(&elf_format);
1055
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001056#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Martin Schwidefskyfc5243d2008-12-25 13:38:35 +01001057 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
Al Viro19d860a2014-05-04 20:11:36 -04001058 if (retval < 0)
Roland McGrath18c8baf2005-04-28 15:17:19 -07001059 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001060#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1061
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001062 retval = create_elf_tables(bprm, &loc->elf_ex,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001063 load_addr, interp_load_addr);
Al Viro19d860a2014-05-04 20:11:36 -04001064 if (retval < 0)
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001065 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 /* N.B. passed_fileno might not be initialized? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 current->mm->end_code = end_code;
1068 current->mm->start_code = start_code;
1069 current->mm->start_data = start_data;
1070 current->mm->end_data = end_data;
1071 current->mm->start_stack = bprm->p;
1072
Jiri Kosina4471a672011-04-14 15:22:09 -07001073 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001074 current->mm->brk = current->mm->start_brk =
1075 arch_randomize_brk(current->mm);
Kees Cook204db6e2015-04-14 15:48:12 -07001076#ifdef compat_brk_randomized
Jiri Kosina4471a672011-04-14 15:22:09 -07001077 current->brk_randomized = 1;
1078#endif
1079 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (current->personality & MMAP_PAGE_ZERO) {
1082 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1083 and some applications "depend" upon this behavior.
1084 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001085 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001086 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 }
1089
1090#ifdef ELF_PLAT_INIT
1091 /*
1092 * The ABI may specify that certain registers be set up in special
1093 * ways (on i386 %edx is the address of a DT_FINI function, for
1094 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1095 * that the e_entry field is the address of the function descriptor
1096 * for the startup routine, rather than the address of the startup
1097 * routine itself. This macro performs whatever initialization to
1098 * the regs structure is required as well as any relocations to the
1099 * function descriptor entries when executing dynamically links apps.
1100 */
1101 ELF_PLAT_INIT(regs, reloc_func_desc);
1102#endif
1103
1104 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 retval = 0;
1106out:
1107 kfree(loc);
1108out_ret:
1109 return retval;
1110
1111 /* error cleanup */
1112out_free_dentry:
Paul Burtona9d9ef12014-09-11 08:30:15 +01001113 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 allow_write_access(interpreter);
1115 if (interpreter)
1116 fput(interpreter);
1117out_free_interp:
Jesper Juhlf99d49a2005-11-07 01:01:34 -08001118 kfree(elf_interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119out_free_ph:
1120 kfree(elf_phdata);
1121 goto out;
1122}
1123
Josh Triplett69369a72014-04-03 14:48:27 -07001124#ifdef CONFIG_USELIB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125/* This is really simpleminded and specialized - we are loading an
1126 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127static int load_elf_library(struct file *file)
1128{
1129 struct elf_phdr *elf_phdata;
1130 struct elf_phdr *eppnt;
1131 unsigned long elf_bss, bss, len;
1132 int retval, error, i, j;
1133 struct elfhdr elf_ex;
1134
1135 error = -ENOEXEC;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001136 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 if (retval != sizeof(elf_ex))
1138 goto out;
1139
1140 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1141 goto out;
1142
1143 /* First of all, some simple consistency checks */
1144 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001145 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 goto out;
1147
1148 /* Now read in all of the header information */
1149
1150 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1151 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1152
1153 error = -ENOMEM;
1154 elf_phdata = kmalloc(j, GFP_KERNEL);
1155 if (!elf_phdata)
1156 goto out;
1157
1158 eppnt = elf_phdata;
1159 error = -ENOEXEC;
1160 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1161 if (retval != j)
1162 goto out_free_ph;
1163
1164 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1165 if ((eppnt + i)->p_type == PT_LOAD)
1166 j++;
1167 if (j != 1)
1168 goto out_free_ph;
1169
1170 while (eppnt->p_type != PT_LOAD)
1171 eppnt++;
1172
1173 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001174 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 ELF_PAGESTART(eppnt->p_vaddr),
1176 (eppnt->p_filesz +
1177 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1178 PROT_READ | PROT_WRITE | PROT_EXEC,
1179 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1180 (eppnt->p_offset -
1181 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1183 goto out_free_ph;
1184
1185 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1186 if (padzero(elf_bss)) {
1187 error = -EFAULT;
1188 goto out_free_ph;
1189 }
1190
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001191 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1192 ELF_MIN_ALIGN - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 bss = eppnt->p_memsz + eppnt->p_vaddr;
Michal Hockoecc2bc82016-05-23 16:25:39 -07001194 if (bss > len) {
1195 error = vm_brk(len, bss - len);
Linus Torvalds5d22fc22016-05-27 15:57:31 -07001196 if (error)
Michal Hockoecc2bc82016-05-23 16:25:39 -07001197 goto out_free_ph;
1198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 error = 0;
1200
1201out_free_ph:
1202 kfree(elf_phdata);
1203out:
1204 return error;
1205}
Josh Triplett69369a72014-04-03 14:48:27 -07001206#endif /* #ifdef CONFIG_USELIB */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001208#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209/*
1210 * ELF core dumper
1211 *
1212 * Modelled on fs/exec.c:aout_core_dump()
1213 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1214 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216/*
Jason Baron909af762012-03-23 15:02:51 -07001217 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1218 * that are useful for post-mortem analysis are included in every core dump.
1219 * In that way we ensure that the core dump is fully interpretable later
1220 * without matching up the same kernel and hardware config to see what PC values
1221 * meant. These special mappings include - vDSO, vsyscall, and other
1222 * architecture specific mappings
1223 */
1224static bool always_dump_vma(struct vm_area_struct *vma)
1225{
1226 /* Any vsyscall mappings? */
1227 if (vma == get_gate_vma(vma->vm_mm))
1228 return true;
Andy Lutomirski78d683e2014-05-19 15:58:32 -07001229
1230 /*
1231 * Assume that all vmas with a .name op should always be dumped.
1232 * If this changes, a new vm_ops field can easily be added.
1233 */
1234 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1235 return true;
1236
Jason Baron909af762012-03-23 15:02:51 -07001237 /*
1238 * arch_vma_name() returns non-NULL for special architecture mappings,
1239 * such as vDSO sections.
1240 */
1241 if (arch_vma_name(vma))
1242 return true;
1243
1244 return false;
1245}
1246
1247/*
Roland McGrath82df3972007-10-16 23:27:02 -07001248 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 */
Roland McGrath82df3972007-10-16 23:27:02 -07001250static unsigned long vma_dump_size(struct vm_area_struct *vma,
1251 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001253#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1254
Jason Baron909af762012-03-23 15:02:51 -07001255 /* always dump the vdso and vsyscall sections */
1256 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001257 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001258
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001259 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001260 return 0;
1261
Ross Zwisler50378352015-10-05 16:33:36 -06001262 /* support for DAX */
1263 if (vma_is_dax(vma)) {
1264 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1265 goto whole;
1266 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1267 goto whole;
1268 return 0;
1269 }
1270
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001271 /* Hugetlb memory check */
1272 if (vma->vm_flags & VM_HUGETLB) {
1273 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1274 goto whole;
1275 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1276 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001277 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001278 }
1279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001281 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 return 0;
1283
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001284 /* By default, dump shared memory if mapped from an anonymous file. */
1285 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001286 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001287 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1288 goto whole;
1289 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Roland McGrath82df3972007-10-16 23:27:02 -07001292 /* Dump segments that have been written to. */
1293 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1294 goto whole;
1295 if (vma->vm_file == NULL)
1296 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
Roland McGrath82df3972007-10-16 23:27:02 -07001298 if (FILTER(MAPPED_PRIVATE))
1299 goto whole;
1300
1301 /*
1302 * If this looks like the beginning of a DSO or executable mapping,
1303 * check for an ELF header. If we find one, dump the first page to
1304 * aid in determining what was mapped here.
1305 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001306 if (FILTER(ELF_HEADERS) &&
1307 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001308 u32 __user *header = (u32 __user *) vma->vm_start;
1309 u32 word;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001310 mm_segment_t fs = get_fs();
Roland McGrath82df3972007-10-16 23:27:02 -07001311 /*
1312 * Doing it this way gets the constant folded by GCC.
1313 */
1314 union {
1315 u32 cmp;
1316 char elfmag[SELFMAG];
1317 } magic;
1318 BUILD_BUG_ON(SELFMAG != sizeof word);
1319 magic.elfmag[EI_MAG0] = ELFMAG0;
1320 magic.elfmag[EI_MAG1] = ELFMAG1;
1321 magic.elfmag[EI_MAG2] = ELFMAG2;
1322 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001323 /*
1324 * Switch to the user "segment" for get_user(),
1325 * then put back what elf_core_dump() had in place.
1326 */
1327 set_fs(USER_DS);
1328 if (unlikely(get_user(word, header)))
1329 word = 0;
1330 set_fs(fs);
1331 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001332 return PAGE_SIZE;
1333 }
1334
1335#undef FILTER
1336
1337 return 0;
1338
1339whole:
1340 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341}
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343/* An ELF note in memory */
1344struct memelfnote
1345{
1346 const char *name;
1347 int type;
1348 unsigned int datasz;
1349 void *data;
1350};
1351
1352static int notesize(struct memelfnote *en)
1353{
1354 int sz;
1355
1356 sz = sizeof(struct elf_note);
1357 sz += roundup(strlen(en->name) + 1, 4);
1358 sz += roundup(en->datasz, 4);
1359
1360 return sz;
1361}
1362
Al Viroecc8c772013-10-05 15:32:35 -04001363static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364{
1365 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 en.n_namesz = strlen(men->name) + 1;
1367 en.n_descsz = men->datasz;
1368 en.n_type = men->type;
1369
Al Viroecc8c772013-10-05 15:32:35 -04001370 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001371 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1372 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Roland McGrath3aba4812008-01-30 13:31:44 +01001375static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001376 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001378 memset(elf, 0, sizeof(*elf));
1379
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1381 elf->e_ident[EI_CLASS] = ELF_CLASS;
1382 elf->e_ident[EI_DATA] = ELF_DATA;
1383 elf->e_ident[EI_VERSION] = EV_CURRENT;
1384 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001387 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001390 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 elf->e_ehsize = sizeof(struct elfhdr);
1392 elf->e_phentsize = sizeof(struct elf_phdr);
1393 elf->e_phnum = segs;
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 return;
1396}
1397
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001398static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399{
1400 phdr->p_type = PT_NOTE;
1401 phdr->p_offset = offset;
1402 phdr->p_vaddr = 0;
1403 phdr->p_paddr = 0;
1404 phdr->p_filesz = sz;
1405 phdr->p_memsz = 0;
1406 phdr->p_flags = 0;
1407 phdr->p_align = 0;
1408 return;
1409}
1410
1411static void fill_note(struct memelfnote *note, const char *name, int type,
1412 unsigned int sz, void *data)
1413{
1414 note->name = name;
1415 note->type = type;
1416 note->datasz = sz;
1417 note->data = data;
1418 return;
1419}
1420
1421/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001422 * fill up all the fields in prstatus from the given task struct, except
1423 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 */
1425static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001426 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427{
1428 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1429 prstatus->pr_sigpend = p->pending.signal.sig[0];
1430 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001431 rcu_read_lock();
1432 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1433 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001434 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001435 prstatus->pr_pgrp = task_pgrp_vnr(p);
1436 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 if (thread_group_leader(p)) {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001438 struct task_cputime cputime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001441 * This is the record for the group leader. It shows the
1442 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 */
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001444 thread_group_cputime(p, &cputime);
1445 prstatus->pr_utime = ns_to_timeval(cputime.utime);
1446 prstatus->pr_stime = ns_to_timeval(cputime.stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 } else {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001448 u64 utime, stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001449
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001450 task_cputime(p, &utime, &stime);
1451 prstatus->pr_utime = ns_to_timeval(utime);
1452 prstatus->pr_stime = ns_to_timeval(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 }
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001454
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001455 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
1456 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457}
1458
1459static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1460 struct mm_struct *mm)
1461{
David Howellsc69e8d92008-11-14 10:39:19 +11001462 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001463 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
1465 /* first copy the parameters from user space */
1466 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1467
1468 len = mm->arg_end - mm->arg_start;
1469 if (len >= ELF_PRARGSZ)
1470 len = ELF_PRARGSZ-1;
1471 if (copy_from_user(&psinfo->pr_psargs,
1472 (const char __user *)mm->arg_start, len))
1473 return -EFAULT;
1474 for(i = 0; i < len; i++)
1475 if (psinfo->pr_psargs[i] == 0)
1476 psinfo->pr_psargs[i] = ' ';
1477 psinfo->pr_psargs[len] = 0;
1478
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001479 rcu_read_lock();
1480 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1481 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001482 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001483 psinfo->pr_pgrp = task_pgrp_vnr(p);
1484 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
1486 i = p->state ? ffz(~p->state) + 1 : 0;
1487 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001488 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1490 psinfo->pr_nice = task_nice(p);
1491 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001492 rcu_read_lock();
1493 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001494 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1495 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001496 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1498
1499 return 0;
1500}
1501
Roland McGrath3aba4812008-01-30 13:31:44 +01001502static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1503{
1504 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1505 int i = 0;
1506 do
1507 i += 2;
1508 while (auxv[i - 2] != AT_NULL);
1509 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1510}
1511
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001512static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Al Viroce395962013-10-13 17:23:53 -04001513 const siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001514{
1515 mm_segment_t old_fs = get_fs();
1516 set_fs(KERNEL_DS);
1517 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1518 set_fs(old_fs);
1519 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1520}
1521
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001522#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1523/*
1524 * Format of NT_FILE note:
1525 *
1526 * long count -- how many files are mapped
1527 * long page_size -- units for file_ofs
1528 * array of [COUNT] elements of
1529 * long start
1530 * long end
1531 * long file_ofs
1532 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1533 */
Dan Aloni72023652013-09-30 13:45:02 -07001534static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001535{
1536 struct vm_area_struct *vma;
1537 unsigned count, size, names_ofs, remaining, n;
1538 user_long_t *data;
1539 user_long_t *start_end_ofs;
1540 char *name_base, *name_curpos;
1541
1542 /* *Estimated* file count and total data size needed */
1543 count = current->mm->map_count;
1544 size = count * 64;
1545
1546 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1547 alloc:
1548 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001549 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001550 size = round_up(size, PAGE_SIZE);
1551 data = vmalloc(size);
1552 if (!data)
Dan Aloni72023652013-09-30 13:45:02 -07001553 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001554
1555 start_end_ofs = data + 2;
1556 name_base = name_curpos = ((char *)data) + names_ofs;
1557 remaining = size - names_ofs;
1558 count = 0;
1559 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1560 struct file *file;
1561 const char *filename;
1562
1563 file = vma->vm_file;
1564 if (!file)
1565 continue;
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001566 filename = file_path(file, name_curpos, remaining);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001567 if (IS_ERR(filename)) {
1568 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1569 vfree(data);
1570 size = size * 5 / 4;
1571 goto alloc;
1572 }
1573 continue;
1574 }
1575
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001576 /* file_path() fills at the end, move name down */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001577 /* n = strlen(filename) + 1: */
1578 n = (name_curpos + remaining) - filename;
1579 remaining = filename - name_curpos;
1580 memmove(name_curpos, filename, n);
1581 name_curpos += n;
1582
1583 *start_end_ofs++ = vma->vm_start;
1584 *start_end_ofs++ = vma->vm_end;
1585 *start_end_ofs++ = vma->vm_pgoff;
1586 count++;
1587 }
1588
1589 /* Now we know exact count of files, can store it */
1590 data[0] = count;
1591 data[1] = PAGE_SIZE;
1592 /*
1593 * Count usually is less than current->mm->map_count,
1594 * we need to move filenames down.
1595 */
1596 n = current->mm->map_count - count;
1597 if (n != 0) {
1598 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1599 memmove(name_base - shift_bytes, name_base,
1600 name_curpos - name_base);
1601 name_curpos -= shift_bytes;
1602 }
1603
1604 size = name_curpos - (char *)data;
1605 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001606 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001607}
1608
Roland McGrath4206d3a2008-01-30 13:31:45 +01001609#ifdef CORE_DUMP_USE_REGSET
1610#include <linux/regset.h>
1611
1612struct elf_thread_core_info {
1613 struct elf_thread_core_info *next;
1614 struct task_struct *task;
1615 struct elf_prstatus prstatus;
1616 struct memelfnote notes[0];
1617};
1618
1619struct elf_note_info {
1620 struct elf_thread_core_info *thread;
1621 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001622 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001623 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001624 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001625 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001626 size_t size;
1627 int thread_notes;
1628};
1629
Roland McGrathd31472b2008-03-04 14:28:30 -08001630/*
1631 * When a regset has a writeback hook, we call it on each thread before
1632 * dumping user memory. On register window machines, this makes sure the
1633 * user memory backing the register data is up to date before we read it.
1634 */
1635static void do_thread_regset_writeback(struct task_struct *task,
1636 const struct user_regset *regset)
1637{
1638 if (regset->writeback)
1639 regset->writeback(task, regset, 1);
1640}
1641
H. J. Lu0953f65d2012-02-14 13:34:52 -08001642#ifndef PRSTATUS_SIZE
Dmitry Safonov90954e72016-09-05 16:33:06 +03001643#define PRSTATUS_SIZE(S, R) sizeof(S)
H. J. Lu0953f65d2012-02-14 13:34:52 -08001644#endif
1645
1646#ifndef SET_PR_FPVALID
Dmitry Safonov90954e72016-09-05 16:33:06 +03001647#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
H. J. Lu0953f65d2012-02-14 13:34:52 -08001648#endif
1649
Roland McGrath4206d3a2008-01-30 13:31:45 +01001650static int fill_thread_core_info(struct elf_thread_core_info *t,
1651 const struct user_regset_view *view,
1652 long signr, size_t *total)
1653{
1654 unsigned int i;
Dmitry Safonov90954e72016-09-05 16:33:06 +03001655 unsigned int regset_size = view->regsets[0].n * view->regsets[0].size;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001656
1657 /*
1658 * NT_PRSTATUS is the one special case, because the regset data
1659 * goes into the pr_reg field inside the note contents, rather
1660 * than being the whole note contents. We fill the reset in here.
1661 * We assume that regset 0 is NT_PRSTATUS.
1662 */
1663 fill_prstatus(&t->prstatus, t->task, signr);
Dmitry Safonov90954e72016-09-05 16:33:06 +03001664 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size,
1665 &t->prstatus.pr_reg, NULL);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001666
1667 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
Dmitry Safonov90954e72016-09-05 16:33:06 +03001668 PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001669 *total += notesize(&t->notes[0]);
1670
Roland McGrathd31472b2008-03-04 14:28:30 -08001671 do_thread_regset_writeback(t->task, &view->regsets[0]);
1672
Roland McGrath4206d3a2008-01-30 13:31:45 +01001673 /*
1674 * Each other regset might generate a note too. For each regset
1675 * that has no core_note_type or is inactive, we leave t->notes[i]
1676 * all zero and we'll know to skip writing it later.
1677 */
1678 for (i = 1; i < view->n; ++i) {
1679 const struct user_regset *regset = &view->regsets[i];
Roland McGrathd31472b2008-03-04 14:28:30 -08001680 do_thread_regset_writeback(t->task, regset);
H. Peter Anvinc8e25252012-03-02 10:43:48 -08001681 if (regset->core_note_type && regset->get &&
Roland McGrath4206d3a2008-01-30 13:31:45 +01001682 (!regset->active || regset->active(t->task, regset))) {
1683 int ret;
1684 size_t size = regset->n * regset->size;
1685 void *data = kmalloc(size, GFP_KERNEL);
1686 if (unlikely(!data))
1687 return 0;
1688 ret = regset->get(t->task, regset,
1689 0, size, data, NULL);
1690 if (unlikely(ret))
1691 kfree(data);
1692 else {
1693 if (regset->core_note_type != NT_PRFPREG)
1694 fill_note(&t->notes[i], "LINUX",
1695 regset->core_note_type,
1696 size, data);
1697 else {
Dmitry Safonov90954e72016-09-05 16:33:06 +03001698 SET_PR_FPVALID(&t->prstatus,
1699 1, regset_size);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001700 fill_note(&t->notes[i], "CORE",
1701 NT_PRFPREG, size, data);
1702 }
1703 *total += notesize(&t->notes[i]);
1704 }
1705 }
1706 }
1707
1708 return 1;
1709}
1710
1711static int fill_note_info(struct elfhdr *elf, int phdrs,
1712 struct elf_note_info *info,
Al Viroec579412013-10-13 17:57:29 -04001713 const siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001714{
1715 struct task_struct *dump_task = current;
1716 const struct user_regset_view *view = task_user_regset_view(dump_task);
1717 struct elf_thread_core_info *t;
1718 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001719 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001720 unsigned int i;
1721
1722 info->size = 0;
1723 info->thread = NULL;
1724
1725 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001726 if (psinfo == NULL) {
1727 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001728 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001729 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001730
Amerigo Wange2dbe122009-07-01 01:06:26 -04001731 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1732
Roland McGrath4206d3a2008-01-30 13:31:45 +01001733 /*
1734 * Figure out how many notes we're going to need for each thread.
1735 */
1736 info->thread_notes = 0;
1737 for (i = 0; i < view->n; ++i)
1738 if (view->regsets[i].core_note_type != 0)
1739 ++info->thread_notes;
1740
1741 /*
1742 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1743 * since it is our one special case.
1744 */
1745 if (unlikely(info->thread_notes == 0) ||
1746 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1747 WARN_ON(1);
1748 return 0;
1749 }
1750
1751 /*
1752 * Initialize the ELF file header.
1753 */
1754 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001755 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001756
1757 /*
1758 * Allocate a structure for each thread.
1759 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001760 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1761 t = kzalloc(offsetof(struct elf_thread_core_info,
1762 notes[info->thread_notes]),
1763 GFP_KERNEL);
1764 if (unlikely(!t))
1765 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001766
Oleg Nesterov83914442008-07-25 01:47:45 -07001767 t->task = ct->task;
1768 if (ct->task == dump_task || !info->thread) {
1769 t->next = info->thread;
1770 info->thread = t;
1771 } else {
1772 /*
1773 * Make sure to keep the original task at
1774 * the head of the list.
1775 */
1776 t->next = info->thread->next;
1777 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001778 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001779 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001780
1781 /*
1782 * Now fill in each thread's information.
1783 */
1784 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001785 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001786 return 0;
1787
1788 /*
1789 * Fill in the two process-wide notes.
1790 */
1791 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1792 info->size += notesize(&info->psinfo);
1793
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001794 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1795 info->size += notesize(&info->signote);
1796
Roland McGrath4206d3a2008-01-30 13:31:45 +01001797 fill_auxv_note(&info->auxv, current->mm);
1798 info->size += notesize(&info->auxv);
1799
Dan Aloni72023652013-09-30 13:45:02 -07001800 if (fill_files_note(&info->files) == 0)
1801 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001802
Roland McGrath4206d3a2008-01-30 13:31:45 +01001803 return 1;
1804}
1805
1806static size_t get_note_info_size(struct elf_note_info *info)
1807{
1808 return info->size;
1809}
1810
1811/*
1812 * Write all the notes for each thread. When writing the first thread, the
1813 * process-wide notes are interleaved after the first thread-specific note.
1814 */
1815static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04001816 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001817{
Fabian Frederickb219e252014-06-04 16:12:14 -07001818 bool first = true;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001819 struct elf_thread_core_info *t = info->thread;
1820
1821 do {
1822 int i;
1823
Al Viroecc8c772013-10-05 15:32:35 -04001824 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001825 return 0;
1826
Al Viroecc8c772013-10-05 15:32:35 -04001827 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001828 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001829 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001830 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001831 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001832 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07001833 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04001834 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001835 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001836
1837 for (i = 1; i < info->thread_notes; ++i)
1838 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04001839 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001840 return 0;
1841
Fabian Frederickb219e252014-06-04 16:12:14 -07001842 first = false;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001843 t = t->next;
1844 } while (t);
1845
1846 return 1;
1847}
1848
1849static void free_note_info(struct elf_note_info *info)
1850{
1851 struct elf_thread_core_info *threads = info->thread;
1852 while (threads) {
1853 unsigned int i;
1854 struct elf_thread_core_info *t = threads;
1855 threads = t->next;
1856 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1857 for (i = 1; i < info->thread_notes; ++i)
1858 kfree(t->notes[i].data);
1859 kfree(t);
1860 }
1861 kfree(info->psinfo.data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001862 vfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001863}
1864
1865#else
1866
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867/* Here is the structure in which status of each thread is captured. */
1868struct elf_thread_status
1869{
1870 struct list_head list;
1871 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1872 elf_fpregset_t fpu; /* NT_PRFPREG */
1873 struct task_struct *thread;
1874#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07001875 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876#endif
1877 struct memelfnote notes[3];
1878 int num_notes;
1879};
1880
1881/*
1882 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001883 * we need to keep a linked list of every threads pr_status and then create
1884 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 */
1886static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1887{
1888 int sz = 0;
1889 struct task_struct *p = t->thread;
1890 t->num_notes = 0;
1891
1892 fill_prstatus(&t->prstatus, p, signr);
1893 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1894
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001895 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1896 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 t->num_notes++;
1898 sz += notesize(&t->notes[0]);
1899
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001900 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1901 &t->fpu))) {
1902 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1903 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 t->num_notes++;
1905 sz += notesize(&t->notes[1]);
1906 }
1907
1908#ifdef ELF_CORE_COPY_XFPREGS
1909 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07001910 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1911 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 t->num_notes++;
1913 sz += notesize(&t->notes[2]);
1914 }
1915#endif
1916 return sz;
1917}
1918
Roland McGrath3aba4812008-01-30 13:31:44 +01001919struct elf_note_info {
1920 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07001921 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01001922 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1923 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1924 struct list_head thread_list;
1925 elf_fpregset_t *fpu;
1926#ifdef ELF_CORE_COPY_XFPREGS
1927 elf_fpxregset_t *xfpu;
1928#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001929 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01001930 int thread_status_size;
1931 int numnote;
1932};
1933
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001934static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01001935{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001936 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01001937 INIT_LIST_HEAD(&info->thread_list);
1938
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001939 /* Allocate space for ELF notes */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001940 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01001941 if (!info->notes)
1942 return 0;
1943 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1944 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001945 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001946 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1947 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001948 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001949 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1950 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001951 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001952#ifdef ELF_CORE_COPY_XFPREGS
1953 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1954 if (!info->xfpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001955 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001956#endif
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001957 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001958}
Roland McGrath3aba4812008-01-30 13:31:44 +01001959
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001960static int fill_note_info(struct elfhdr *elf, int phdrs,
1961 struct elf_note_info *info,
Al Viroec579412013-10-13 17:57:29 -04001962 const siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001963{
1964 struct list_head *t;
Al Viroafabada2013-10-14 07:39:56 -04001965 struct core_thread *ct;
1966 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001967
1968 if (!elf_note_info_init(info))
1969 return 0;
1970
Al Viroafabada2013-10-14 07:39:56 -04001971 for (ct = current->mm->core_state->dumper.next;
1972 ct; ct = ct->next) {
1973 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
1974 if (!ets)
1975 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001976
Al Viroafabada2013-10-14 07:39:56 -04001977 ets->thread = ct->task;
1978 list_add(&ets->list, &info->thread_list);
1979 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001980
Al Viroafabada2013-10-14 07:39:56 -04001981 list_for_each(t, &info->thread_list) {
1982 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07001983
Al Viroafabada2013-10-14 07:39:56 -04001984 ets = list_entry(t, struct elf_thread_status, list);
1985 sz = elf_dump_thread_status(siginfo->si_signo, ets);
1986 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01001987 }
1988 /* now collect the dump for the current */
1989 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001990 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01001991 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
1992
1993 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001994 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01001995
1996 /*
1997 * Set up the notes in similar form to SVR4 core dumps made
1998 * with info from their /proc.
1999 */
2000
2001 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2002 sizeof(*info->prstatus), info->prstatus);
2003 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2004 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2005 sizeof(*info->psinfo), info->psinfo);
2006
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002007 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2008 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07002009 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01002010
Dan Aloni72023652013-09-30 13:45:02 -07002011 if (fill_files_note(info->notes + info->numnote) == 0) {
2012 info->notes_files = info->notes + info->numnote;
2013 info->numnote++;
2014 }
Roland McGrath3aba4812008-01-30 13:31:44 +01002015
2016 /* Try to dump the FPU. */
2017 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2018 info->fpu);
2019 if (info->prstatus->pr_fpvalid)
2020 fill_note(info->notes + info->numnote++,
2021 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2022#ifdef ELF_CORE_COPY_XFPREGS
2023 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2024 fill_note(info->notes + info->numnote++,
2025 "LINUX", ELF_CORE_XFPREG_TYPE,
2026 sizeof(*info->xfpu), info->xfpu);
2027#endif
2028
2029 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01002030}
2031
2032static size_t get_note_info_size(struct elf_note_info *info)
2033{
2034 int sz = 0;
2035 int i;
2036
2037 for (i = 0; i < info->numnote; i++)
2038 sz += notesize(info->notes + i);
2039
2040 sz += info->thread_status_size;
2041
2042 return sz;
2043}
2044
2045static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002046 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01002047{
2048 int i;
2049 struct list_head *t;
2050
2051 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002052 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002053 return 0;
2054
2055 /* write out the thread status notes section */
2056 list_for_each(t, &info->thread_list) {
2057 struct elf_thread_status *tmp =
2058 list_entry(t, struct elf_thread_status, list);
2059
2060 for (i = 0; i < tmp->num_notes; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002061 if (!writenote(&tmp->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002062 return 0;
2063 }
2064
2065 return 1;
2066}
2067
2068static void free_note_info(struct elf_note_info *info)
2069{
2070 while (!list_empty(&info->thread_list)) {
2071 struct list_head *tmp = info->thread_list.next;
2072 list_del(tmp);
2073 kfree(list_entry(tmp, struct elf_thread_status, list));
2074 }
2075
Dan Aloni72023652013-09-30 13:45:02 -07002076 /* Free data possibly allocated by fill_files_note(): */
2077 if (info->notes_files)
2078 vfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002079
Roland McGrath3aba4812008-01-30 13:31:44 +01002080 kfree(info->prstatus);
2081 kfree(info->psinfo);
2082 kfree(info->notes);
2083 kfree(info->fpu);
2084#ifdef ELF_CORE_COPY_XFPREGS
2085 kfree(info->xfpu);
2086#endif
2087}
2088
Roland McGrath4206d3a2008-01-30 13:31:45 +01002089#endif
2090
Roland McGrathf47aef52007-01-26 00:56:49 -08002091static struct vm_area_struct *first_vma(struct task_struct *tsk,
2092 struct vm_area_struct *gate_vma)
2093{
2094 struct vm_area_struct *ret = tsk->mm->mmap;
2095
2096 if (ret)
2097 return ret;
2098 return gate_vma;
2099}
2100/*
2101 * Helper function for iterating across a vma list. It ensures that the caller
2102 * will visit `gate_vma' prior to terminating the search.
2103 */
2104static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2105 struct vm_area_struct *gate_vma)
2106{
2107 struct vm_area_struct *ret;
2108
2109 ret = this_vma->vm_next;
2110 if (ret)
2111 return ret;
2112 if (this_vma == gate_vma)
2113 return NULL;
2114 return gate_vma;
2115}
2116
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002117static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2118 elf_addr_t e_shoff, int segs)
2119{
2120 elf->e_shoff = e_shoff;
2121 elf->e_shentsize = sizeof(*shdr4extnum);
2122 elf->e_shnum = 1;
2123 elf->e_shstrndx = SHN_UNDEF;
2124
2125 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2126
2127 shdr4extnum->sh_type = SHT_NULL;
2128 shdr4extnum->sh_size = elf->e_shnum;
2129 shdr4extnum->sh_link = elf->e_shstrndx;
2130 shdr4extnum->sh_info = segs;
2131}
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133/*
2134 * Actual dumper
2135 *
2136 * This is a two-pass process; first we find the offsets of the bits,
2137 * and then they are actually written out. If we run out of core limit
2138 * we just truncate.
2139 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002140static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 int has_dumped = 0;
2143 mm_segment_t fs;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002144 int segs, i;
2145 size_t vma_data_size = 0;
Roland McGrathf47aef52007-01-26 00:56:49 -08002146 struct vm_area_struct *vma, *gate_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 struct elfhdr *elf = NULL;
Al Virocdc3d562013-10-05 22:24:29 -04002148 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002149 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002150 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002151 struct elf_shdr *shdr4extnum = NULL;
2152 Elf_Half e_phnum;
2153 elf_addr_t e_shoff;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002154 elf_addr_t *vma_filesz = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 /*
2157 * We no longer stop all VM operations.
2158 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002159 * This is because those proceses that could possibly change map_count
2160 * or the mmap / vma pages are now blocked in do_exit on current
2161 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 *
2163 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002164 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 * exists while dumping the mm->vm_next areas to the core file.
2166 */
2167
2168 /* alloc memory for large data structures: too large to be on stack */
2169 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2170 if (!elf)
WANG Cong5f719552008-05-06 12:45:35 +08002171 goto out;
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002172 /*
2173 * The number of segs are recored into ELF header as 16bit value.
2174 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002177 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
Stephen Wilson31db58b2011-03-13 15:49:15 -04002179 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002180 if (gate_vma != NULL)
2181 segs++;
2182
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002183 /* for notes section */
2184 segs++;
2185
2186 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2187 * this, kernel supports extended numbering. Have a look at
2188 * include/linux/elf.h for further information. */
2189 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2190
Roland McGrath3aba4812008-01-30 13:31:44 +01002191 /*
2192 * Collect all the non-memory information about the process for the
2193 * notes. This also sets up the file header.
2194 */
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002195 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Roland McGrath3aba4812008-01-30 13:31:44 +01002196 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
2198 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 fs = get_fs();
2201 set_fs(KERNEL_DS);
2202
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 offset += sizeof(*elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002204 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 /* Write notes phdr entry */
2207 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002208 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Michael Ellermane5501492007-09-19 14:38:12 +10002210 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002211
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002212 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2213 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002214 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002215
2216 fill_elf_note_phdr(phdr4note, sz, offset);
2217 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 }
2219
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2221
Jason Baron30f74aa2016-12-12 16:46:40 -08002222 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
2223 goto end_coredump;
2224 vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz));
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002225 if (!vma_filesz)
2226 goto end_coredump;
2227
2228 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2229 vma = next_vma(vma, gate_vma)) {
2230 unsigned long dump_size;
2231
2232 dump_size = vma_dump_size(vma, cprm->mm_flags);
2233 vma_filesz[i++] = dump_size;
2234 vma_data_size += dump_size;
2235 }
2236
2237 offset += vma_data_size;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002238 offset += elf_core_extra_data_size();
2239 e_shoff = offset;
2240
2241 if (e_phnum == PN_XNUM) {
2242 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2243 if (!shdr4extnum)
2244 goto end_coredump;
2245 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2246 }
2247
2248 offset = dataoff;
2249
Al Viroecc8c772013-10-05 15:32:35 -04002250 if (!dump_emit(cprm, elf, sizeof(*elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002251 goto end_coredump;
2252
Al Viroecc8c772013-10-05 15:32:35 -04002253 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002254 goto end_coredump;
2255
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 /* Write program headers for segments dump */
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002257 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002258 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261 phdr.p_type = PT_LOAD;
2262 phdr.p_offset = offset;
2263 phdr.p_vaddr = vma->vm_start;
2264 phdr.p_paddr = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002265 phdr.p_filesz = vma_filesz[i++];
Roland McGrath82df3972007-10-16 23:27:02 -07002266 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 offset += phdr.p_filesz;
2268 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002269 if (vma->vm_flags & VM_WRITE)
2270 phdr.p_flags |= PF_W;
2271 if (vma->vm_flags & VM_EXEC)
2272 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 phdr.p_align = ELF_EXEC_PAGESIZE;
2274
Al Viroecc8c772013-10-05 15:32:35 -04002275 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002276 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 }
2278
Al Viro506f21c2013-10-05 17:22:57 -04002279 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002280 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
2282 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002283 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002284 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
Al Virocdc3d562013-10-05 22:24:29 -04002286 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002287 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002288
Andi Kleend025c9d2006-09-30 23:29:28 -07002289 /* Align to page */
Mateusz Guzik1607f092016-06-05 23:14:14 +02002290 if (!dump_skip(cprm, dataoff - cprm->pos))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002291 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002293 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002294 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07002296 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002298 end = vma->vm_start + vma_filesz[i++];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Roland McGrath82df3972007-10-16 23:27:02 -07002300 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002301 struct page *page;
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002302 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002304 page = get_dump_page(addr);
2305 if (page) {
2306 void *kaddr = kmap(page);
Al Viro13046ec2013-10-05 18:08:47 -04002307 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002308 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002309 put_page(page);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002310 } else
Al Viro9b56d542013-10-08 09:26:08 -04002311 stop = !dump_skip(cprm, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002312 if (stop)
2313 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 }
2315 }
Dave Kleikamp4d22c752017-01-11 13:25:00 -06002316 dump_truncate(cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
Al Viroaa3e7ea2013-10-05 17:50:15 -04002318 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002319 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002321 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002322 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002323 goto end_coredump;
2324 }
2325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326end_coredump:
2327 set_fs(fs);
2328
2329cleanup:
Roland McGrath3aba4812008-01-30 13:31:44 +01002330 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002331 kfree(shdr4extnum);
Jason Baron30f74aa2016-12-12 16:46:40 -08002332 vfree(vma_filesz);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002333 kfree(phdr4note);
WANG Cong5f719552008-05-06 12:45:35 +08002334 kfree(elf);
2335out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337}
2338
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002339#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
2341static int __init init_elf_binfmt(void)
2342{
Al Viro8fc3dc52012-03-17 03:05:16 -04002343 register_binfmt(&elf_format);
2344 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345}
2346
2347static void __exit exit_elf_binfmt(void)
2348{
2349 /* Remove the COFF and ELF loaders. */
2350 unregister_binfmt(&elf_format);
2351}
2352
2353core_initcall(init_elf_binfmt);
2354module_exit(exit_elf_binfmt);
2355MODULE_LICENSE("GPL");