blob: 90d7cee543474b0245d1758e64f4ffcc777381e4 [file] [log] [blame]
Alex Kelly10c28d92012-09-26 21:52:08 -04001#include <linux/slab.h>
2#include <linux/file.h>
3#include <linux/fdtable.h>
4#include <linux/mm.h>
5#include <linux/stat.h>
6#include <linux/fcntl.h>
7#include <linux/swap.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/pagemap.h>
11#include <linux/perf_event.h>
12#include <linux/highmem.h>
13#include <linux/spinlock.h>
14#include <linux/key.h>
15#include <linux/personality.h>
16#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070017#include <linux/coredump.h>
Alex Kelly10c28d92012-09-26 21:52:08 -040018#include <linux/utsname.h>
19#include <linux/pid_namespace.h>
20#include <linux/module.h>
21#include <linux/namei.h>
22#include <linux/mount.h>
23#include <linux/security.h>
24#include <linux/syscalls.h>
25#include <linux/tsacct_kern.h>
26#include <linux/cn_proc.h>
27#include <linux/audit.h>
28#include <linux/tracehook.h>
29#include <linux/kmod.h>
30#include <linux/fsnotify.h>
31#include <linux/fs_struct.h>
32#include <linux/pipe_fs_i.h>
33#include <linux/oom.h>
34#include <linux/compat.h>
35
36#include <asm/uaccess.h>
37#include <asm/mmu_context.h>
38#include <asm/tlb.h>
39#include <asm/exec.h>
40
41#include <trace/events/task.h>
42#include "internal.h"
Alex Kelly179899f2012-10-04 17:15:24 -070043#include "coredump.h"
Alex Kelly10c28d92012-09-26 21:52:08 -040044
45#include <trace/events/sched.h>
46
47int core_uses_pid;
48char core_pattern[CORENAME_MAX_SIZE] = "core";
49unsigned int core_pipe_limit;
50
51struct core_name {
52 char *corename;
53 int used, size;
54};
55static atomic_t call_count = ATOMIC_INIT(1);
56
57/* The maximal length of core_pattern is also specified in sysctl.c */
58
59static int expand_corename(struct core_name *cn)
60{
Oleg Nesterove7fd1542013-07-03 15:08:16 -070061 int size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
62 char *corename = krealloc(cn->corename, size, GFP_KERNEL);
Alex Kelly10c28d92012-09-26 21:52:08 -040063
Oleg Nesterove7fd1542013-07-03 15:08:16 -070064 if (!corename)
Alex Kelly10c28d92012-09-26 21:52:08 -040065 return -ENOMEM;
Alex Kelly10c28d92012-09-26 21:52:08 -040066
Oleg Nesterove7fd1542013-07-03 15:08:16 -070067 cn->size = size;
68 cn->corename = corename;
Alex Kelly10c28d92012-09-26 21:52:08 -040069 return 0;
70}
71
Oleg Nesterovbc03c692013-07-03 15:08:17 -070072static int cn_vprintf(struct core_name *cn, const char *fmt, va_list arg)
Alex Kelly10c28d92012-09-26 21:52:08 -040073{
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -070074 int free, need;
Alex Kelly10c28d92012-09-26 21:52:08 -040075
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -070076again:
77 free = cn->size - cn->used;
78 need = vsnprintf(cn->corename + cn->used, free, fmt, arg);
79 if (need < free) {
80 cn->used += need;
81 return 0;
82 }
Alex Kelly10c28d92012-09-26 21:52:08 -040083
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -070084 if (!expand_corename(cn))
85 goto again;
Alex Kelly10c28d92012-09-26 21:52:08 -040086
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -070087 return -ENOMEM;
Alex Kelly10c28d92012-09-26 21:52:08 -040088}
89
Oleg Nesterovbc03c692013-07-03 15:08:17 -070090static int cn_printf(struct core_name *cn, const char *fmt, ...)
91{
92 va_list arg;
93 int ret;
94
95 va_start(arg, fmt);
96 ret = cn_vprintf(cn, fmt, arg);
97 va_end(arg);
98
99 return ret;
100}
101
Oleg Nesterov923bed032013-07-03 15:08:20 -0700102static int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
Alex Kelly10c28d92012-09-26 21:52:08 -0400103{
Oleg Nesterov923bed032013-07-03 15:08:20 -0700104 int cur = cn->used;
105 va_list arg;
106 int ret;
107
108 va_start(arg, fmt);
109 ret = cn_vprintf(cn, fmt, arg);
110 va_end(arg);
111
112 for (; cur < cn->used; ++cur) {
113 if (cn->corename[cur] == '/')
114 cn->corename[cur] = '!';
115 }
116 return ret;
Alex Kelly10c28d92012-09-26 21:52:08 -0400117}
118
119static int cn_print_exe_file(struct core_name *cn)
120{
121 struct file *exe_file;
122 char *pathbuf, *path;
123 int ret;
124
125 exe_file = get_mm_exe_file(current->mm);
Oleg Nesterov923bed032013-07-03 15:08:20 -0700126 if (!exe_file)
127 return cn_esc_printf(cn, "%s (path unknown)", current->comm);
Alex Kelly10c28d92012-09-26 21:52:08 -0400128
129 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
130 if (!pathbuf) {
131 ret = -ENOMEM;
132 goto put_exe_file;
133 }
134
135 path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
136 if (IS_ERR(path)) {
137 ret = PTR_ERR(path);
138 goto free_buf;
139 }
140
Oleg Nesterov923bed032013-07-03 15:08:20 -0700141 ret = cn_esc_printf(cn, "%s", path);
Alex Kelly10c28d92012-09-26 21:52:08 -0400142
143free_buf:
144 kfree(pathbuf);
145put_exe_file:
146 fput(exe_file);
147 return ret;
148}
149
150/* format_corename will inspect the pattern parameter, and output a
151 * name into corename, which must have space for at least
152 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
153 */
Oleg Nesterov12a2b4b2012-10-04 17:15:25 -0700154static int format_corename(struct core_name *cn, struct coredump_params *cprm)
Alex Kelly10c28d92012-09-26 21:52:08 -0400155{
156 const struct cred *cred = current_cred();
157 const char *pat_ptr = core_pattern;
158 int ispipe = (*pat_ptr == '|');
159 int pid_in_pattern = 0;
160 int err = 0;
161
Oleg Nesterove7fd1542013-07-03 15:08:16 -0700162 cn->used = 0;
Alex Kelly10c28d92012-09-26 21:52:08 -0400163 cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
164 cn->corename = kmalloc(cn->size, GFP_KERNEL);
Alex Kelly10c28d92012-09-26 21:52:08 -0400165 if (!cn->corename)
166 return -ENOMEM;
167
168 /* Repeat as long as we have more pattern to process and more output
169 space */
170 while (*pat_ptr) {
171 if (*pat_ptr != '%') {
172 if (*pat_ptr == 0)
173 goto out;
174 err = cn_printf(cn, "%c", *pat_ptr++);
175 } else {
176 switch (*++pat_ptr) {
177 /* single % at the end, drop that */
178 case 0:
179 goto out;
180 /* Double percent, output one percent */
181 case '%':
182 err = cn_printf(cn, "%c", '%');
183 break;
184 /* pid */
185 case 'p':
186 pid_in_pattern = 1;
187 err = cn_printf(cn, "%d",
188 task_tgid_vnr(current));
189 break;
190 /* uid */
191 case 'u':
192 err = cn_printf(cn, "%d", cred->uid);
193 break;
194 /* gid */
195 case 'g':
196 err = cn_printf(cn, "%d", cred->gid);
197 break;
Oleg Nesterov12a2b4b2012-10-04 17:15:25 -0700198 case 'd':
199 err = cn_printf(cn, "%d",
200 __get_dumpable(cprm->mm_flags));
201 break;
Alex Kelly10c28d92012-09-26 21:52:08 -0400202 /* signal that caused the coredump */
203 case 's':
Denys Vlasenko5ab1c302012-10-04 17:15:29 -0700204 err = cn_printf(cn, "%ld", cprm->siginfo->si_signo);
Alex Kelly10c28d92012-09-26 21:52:08 -0400205 break;
206 /* UNIX time of coredump */
207 case 't': {
208 struct timeval tv;
209 do_gettimeofday(&tv);
210 err = cn_printf(cn, "%lu", tv.tv_sec);
211 break;
212 }
213 /* hostname */
Oleg Nesterov923bed032013-07-03 15:08:20 -0700214 case 'h':
Alex Kelly10c28d92012-09-26 21:52:08 -0400215 down_read(&uts_sem);
Oleg Nesterov923bed032013-07-03 15:08:20 -0700216 err = cn_esc_printf(cn, "%s",
Alex Kelly10c28d92012-09-26 21:52:08 -0400217 utsname()->nodename);
218 up_read(&uts_sem);
Alex Kelly10c28d92012-09-26 21:52:08 -0400219 break;
Alex Kelly10c28d92012-09-26 21:52:08 -0400220 /* executable */
Oleg Nesterov923bed032013-07-03 15:08:20 -0700221 case 'e':
222 err = cn_esc_printf(cn, "%s", current->comm);
Alex Kelly10c28d92012-09-26 21:52:08 -0400223 break;
Alex Kelly10c28d92012-09-26 21:52:08 -0400224 case 'E':
225 err = cn_print_exe_file(cn);
226 break;
227 /* core limit size */
228 case 'c':
229 err = cn_printf(cn, "%lu",
230 rlimit(RLIMIT_CORE));
231 break;
232 default:
233 break;
234 }
235 ++pat_ptr;
236 }
237
238 if (err)
239 return err;
240 }
241
242 /* Backward compatibility with core_uses_pid:
243 *
244 * If core_pattern does not include a %p (as is the default)
245 * and core_uses_pid is set, then .%pid will be appended to
246 * the filename. Do not do this for piped commands. */
247 if (!ispipe && !pid_in_pattern && core_uses_pid) {
248 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
249 if (err)
250 return err;
251 }
252out:
253 return ispipe;
254}
255
256static int zap_process(struct task_struct *start, int exit_code)
257{
258 struct task_struct *t;
259 int nr = 0;
260
Alex Kelly10c28d92012-09-26 21:52:08 -0400261 start->signal->group_exit_code = exit_code;
262 start->signal->group_stop_count = 0;
263
264 t = start;
265 do {
266 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
267 if (t != current && t->mm) {
268 sigaddset(&t->pending.signal, SIGKILL);
269 signal_wake_up(t, 1);
270 nr++;
271 }
272 } while_each_thread(start, t);
273
274 return nr;
275}
276
Oleg Nesterov403bad72013-04-30 15:28:10 -0700277static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
278 struct core_state *core_state, int exit_code)
Alex Kelly10c28d92012-09-26 21:52:08 -0400279{
280 struct task_struct *g, *p;
281 unsigned long flags;
282 int nr = -EAGAIN;
283
284 spin_lock_irq(&tsk->sighand->siglock);
285 if (!signal_group_exit(tsk->signal)) {
286 mm->core_state = core_state;
287 nr = zap_process(tsk, exit_code);
Oleg Nesterov6cd8f0a2013-04-30 15:28:12 -0700288 tsk->signal->group_exit_task = tsk;
Oleg Nesterov403bad72013-04-30 15:28:10 -0700289 /* ignore all signals except SIGKILL, see prepare_signal() */
Oleg Nesterov6cd8f0a2013-04-30 15:28:12 -0700290 tsk->signal->flags = SIGNAL_GROUP_COREDUMP;
Oleg Nesterov403bad72013-04-30 15:28:10 -0700291 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
Alex Kelly10c28d92012-09-26 21:52:08 -0400292 }
293 spin_unlock_irq(&tsk->sighand->siglock);
294 if (unlikely(nr < 0))
295 return nr;
296
Oleg Nesterov079148b2013-04-30 15:28:16 -0700297 tsk->flags = PF_DUMPCORE;
Alex Kelly10c28d92012-09-26 21:52:08 -0400298 if (atomic_read(&mm->mm_users) == nr + 1)
299 goto done;
300 /*
301 * We should find and kill all tasks which use this mm, and we should
302 * count them correctly into ->nr_threads. We don't take tasklist
303 * lock, but this is safe wrt:
304 *
305 * fork:
306 * None of sub-threads can fork after zap_process(leader). All
307 * processes which were created before this point should be
308 * visible to zap_threads() because copy_process() adds the new
309 * process to the tail of init_task.tasks list, and lock/unlock
310 * of ->siglock provides a memory barrier.
311 *
312 * do_exit:
313 * The caller holds mm->mmap_sem. This means that the task which
314 * uses this mm can't pass exit_mm(), so it can't exit or clear
315 * its ->mm.
316 *
317 * de_thread:
318 * It does list_replace_rcu(&leader->tasks, &current->tasks),
319 * we must see either old or new leader, this does not matter.
320 * However, it can change p->sighand, so lock_task_sighand(p)
321 * must be used. Since p->mm != NULL and we hold ->mmap_sem
322 * it can't fail.
323 *
324 * Note also that "g" can be the old leader with ->mm == NULL
325 * and already unhashed and thus removed from ->thread_group.
326 * This is OK, __unhash_process()->list_del_rcu() does not
327 * clear the ->next pointer, we will find the new leader via
328 * next_thread().
329 */
330 rcu_read_lock();
331 for_each_process(g) {
332 if (g == tsk->group_leader)
333 continue;
334 if (g->flags & PF_KTHREAD)
335 continue;
336 p = g;
337 do {
338 if (p->mm) {
339 if (unlikely(p->mm == mm)) {
340 lock_task_sighand(p, &flags);
341 nr += zap_process(p, exit_code);
Oleg Nesterov6cd8f0a2013-04-30 15:28:12 -0700342 p->signal->flags = SIGNAL_GROUP_EXIT;
Alex Kelly10c28d92012-09-26 21:52:08 -0400343 unlock_task_sighand(p, &flags);
344 }
345 break;
346 }
347 } while_each_thread(g, p);
348 }
349 rcu_read_unlock();
350done:
351 atomic_set(&core_state->nr_threads, nr);
352 return nr;
353}
354
355static int coredump_wait(int exit_code, struct core_state *core_state)
356{
357 struct task_struct *tsk = current;
358 struct mm_struct *mm = tsk->mm;
359 int core_waiters = -EBUSY;
360
361 init_completion(&core_state->startup);
362 core_state->dumper.task = tsk;
363 core_state->dumper.next = NULL;
364
365 down_write(&mm->mmap_sem);
366 if (!mm->core_state)
367 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
368 up_write(&mm->mmap_sem);
369
370 if (core_waiters > 0) {
371 struct core_thread *ptr;
372
373 wait_for_completion(&core_state->startup);
374 /*
375 * Wait for all the threads to become inactive, so that
376 * all the thread context (extended register state, like
377 * fpu etc) gets copied to the memory.
378 */
379 ptr = core_state->dumper.next;
380 while (ptr != NULL) {
381 wait_task_inactive(ptr->task, 0);
382 ptr = ptr->next;
383 }
384 }
385
386 return core_waiters;
387}
388
Oleg Nesterovacdedd92013-04-30 15:28:13 -0700389static void coredump_finish(struct mm_struct *mm, bool core_dumped)
Alex Kelly10c28d92012-09-26 21:52:08 -0400390{
391 struct core_thread *curr, *next;
392 struct task_struct *task;
393
Oleg Nesterov6cd8f0a2013-04-30 15:28:12 -0700394 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovacdedd92013-04-30 15:28:13 -0700395 if (core_dumped && !__fatal_signal_pending(current))
396 current->signal->group_exit_code |= 0x80;
Oleg Nesterov6cd8f0a2013-04-30 15:28:12 -0700397 current->signal->group_exit_task = NULL;
398 current->signal->flags = SIGNAL_GROUP_EXIT;
399 spin_unlock_irq(&current->sighand->siglock);
400
Alex Kelly10c28d92012-09-26 21:52:08 -0400401 next = mm->core_state->dumper.next;
402 while ((curr = next) != NULL) {
403 next = curr->next;
404 task = curr->task;
405 /*
406 * see exit_mm(), curr->task must not see
407 * ->task == NULL before we read ->next.
408 */
409 smp_mb();
410 curr->task = NULL;
411 wake_up_process(task);
412 }
413
414 mm->core_state = NULL;
415}
416
Oleg Nesterov528f8272013-04-30 15:28:15 -0700417static bool dump_interrupted(void)
418{
419 /*
420 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
421 * can do try_to_freeze() and check __fatal_signal_pending(),
422 * but then we need to teach dump_write() to restart and clear
423 * TIF_SIGPENDING.
424 */
425 return signal_pending(current);
426}
427
Alex Kelly10c28d92012-09-26 21:52:08 -0400428static void wait_for_dump_helpers(struct file *file)
429{
Al Virode32ec42013-03-21 11:16:56 -0400430 struct pipe_inode_info *pipe = file->private_data;
Alex Kelly10c28d92012-09-26 21:52:08 -0400431
432 pipe_lock(pipe);
433 pipe->readers++;
434 pipe->writers--;
Oleg Nesterovdc7ee2a2013-04-30 15:28:17 -0700435 wake_up_interruptible_sync(&pipe->wait);
436 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
437 pipe_unlock(pipe);
Alex Kelly10c28d92012-09-26 21:52:08 -0400438
Oleg Nesterovdc7ee2a2013-04-30 15:28:17 -0700439 /*
440 * We actually want wait_event_freezable() but then we need
441 * to clear TIF_SIGPENDING and improve dump_interrupted().
442 */
443 wait_event_interruptible(pipe->wait, pipe->readers == 1);
Alex Kelly10c28d92012-09-26 21:52:08 -0400444
Oleg Nesterovdc7ee2a2013-04-30 15:28:17 -0700445 pipe_lock(pipe);
Alex Kelly10c28d92012-09-26 21:52:08 -0400446 pipe->readers--;
447 pipe->writers++;
448 pipe_unlock(pipe);
Alex Kelly10c28d92012-09-26 21:52:08 -0400449}
450
451/*
452 * umh_pipe_setup
453 * helper function to customize the process used
454 * to collect the core in userspace. Specifically
455 * it sets up a pipe and installs it as fd 0 (stdin)
456 * for the process. Returns 0 on success, or
457 * PTR_ERR on failure.
458 * Note that it also sets the core limit to 1. This
459 * is a special value that we use to trap recursive
460 * core dumps
461 */
462static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
463{
464 struct file *files[2];
465 struct coredump_params *cp = (struct coredump_params *)info->data;
466 int err = create_pipe_files(files, 0);
467 if (err)
468 return err;
469
470 cp->file = files[1];
471
Al Viro45525b22012-10-16 13:30:07 -0400472 err = replace_fd(0, files[0], 0);
473 fput(files[0]);
Alex Kelly10c28d92012-09-26 21:52:08 -0400474 /* and disallow core files too */
475 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
476
Al Viro45525b22012-10-16 13:30:07 -0400477 return err;
Alex Kelly10c28d92012-09-26 21:52:08 -0400478}
479
Al Viro541880d2012-11-05 13:11:26 -0500480void do_coredump(siginfo_t *siginfo)
Alex Kelly10c28d92012-09-26 21:52:08 -0400481{
482 struct core_state core_state;
483 struct core_name cn;
484 struct mm_struct *mm = current->mm;
485 struct linux_binfmt * binfmt;
486 const struct cred *old_cred;
487 struct cred *cred;
488 int retval = 0;
489 int flag = 0;
490 int ispipe;
491 struct files_struct *displaced;
492 bool need_nonrelative = false;
Oleg Nesterovacdedd92013-04-30 15:28:13 -0700493 bool core_dumped = false;
Alex Kelly10c28d92012-09-26 21:52:08 -0400494 static atomic_t core_dump_count = ATOMIC_INIT(0);
495 struct coredump_params cprm = {
Denys Vlasenko5ab1c302012-10-04 17:15:29 -0700496 .siginfo = siginfo,
Al Viro541880d2012-11-05 13:11:26 -0500497 .regs = signal_pt_regs(),
Alex Kelly10c28d92012-09-26 21:52:08 -0400498 .limit = rlimit(RLIMIT_CORE),
499 /*
500 * We must use the same mm->flags while dumping core to avoid
501 * inconsistency of bit flags, since this flag is not protected
502 * by any locks.
503 */
504 .mm_flags = mm->flags,
505 };
506
Denys Vlasenko5ab1c302012-10-04 17:15:29 -0700507 audit_core_dumps(siginfo->si_signo);
Alex Kelly10c28d92012-09-26 21:52:08 -0400508
509 binfmt = mm->binfmt;
510 if (!binfmt || !binfmt->core_dump)
511 goto fail;
512 if (!__get_dumpable(cprm.mm_flags))
513 goto fail;
514
515 cred = prepare_creds();
516 if (!cred)
517 goto fail;
518 /*
519 * We cannot trust fsuid as being the "true" uid of the process
520 * nor do we know its entire history. We only know it was tainted
521 * so we dump it as root in mode 2, and only into a controlled
522 * environment (pipe handler or fully qualified path).
523 */
Kees Cooke579d2c2013-02-27 17:03:15 -0800524 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
Alex Kelly10c28d92012-09-26 21:52:08 -0400525 /* Setuid core dump mode */
526 flag = O_EXCL; /* Stop rewrite attacks */
527 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
528 need_nonrelative = true;
529 }
530
Denys Vlasenko5ab1c302012-10-04 17:15:29 -0700531 retval = coredump_wait(siginfo->si_signo, &core_state);
Alex Kelly10c28d92012-09-26 21:52:08 -0400532 if (retval < 0)
533 goto fail_creds;
534
535 old_cred = override_creds(cred);
536
Oleg Nesterov12a2b4b2012-10-04 17:15:25 -0700537 ispipe = format_corename(&cn, &cprm);
Alex Kelly10c28d92012-09-26 21:52:08 -0400538
Lucas De Marchifb96c472013-04-30 15:28:06 -0700539 if (ispipe) {
Alex Kelly10c28d92012-09-26 21:52:08 -0400540 int dump_count;
541 char **helper_argv;
Lucas De Marchi907ed132013-04-30 15:28:07 -0700542 struct subprocess_info *sub_info;
Alex Kelly10c28d92012-09-26 21:52:08 -0400543
544 if (ispipe < 0) {
545 printk(KERN_WARNING "format_corename failed\n");
546 printk(KERN_WARNING "Aborting core\n");
Oleg Nesterove7fd1542013-07-03 15:08:16 -0700547 goto fail_unlock;
Alex Kelly10c28d92012-09-26 21:52:08 -0400548 }
549
550 if (cprm.limit == 1) {
551 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
552 *
553 * Normally core limits are irrelevant to pipes, since
554 * we're not writing to the file system, but we use
555 * cprm.limit of 1 here as a speacial value, this is a
556 * consistent way to catch recursive crashes.
557 * We can still crash if the core_pattern binary sets
558 * RLIM_CORE = !1, but it runs as root, and can do
559 * lots of stupid things.
560 *
561 * Note that we use task_tgid_vnr here to grab the pid
562 * of the process group leader. That way we get the
563 * right pid if a thread in a multi-threaded
564 * core_pattern process dies.
565 */
566 printk(KERN_WARNING
567 "Process %d(%s) has RLIMIT_CORE set to 1\n",
568 task_tgid_vnr(current), current->comm);
569 printk(KERN_WARNING "Aborting core\n");
570 goto fail_unlock;
571 }
572 cprm.limit = RLIM_INFINITY;
573
574 dump_count = atomic_inc_return(&core_dump_count);
575 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
576 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
577 task_tgid_vnr(current), current->comm);
578 printk(KERN_WARNING "Skipping core dump\n");
579 goto fail_dropcount;
580 }
581
582 helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
583 if (!helper_argv) {
584 printk(KERN_WARNING "%s failed to allocate memory\n",
585 __func__);
586 goto fail_dropcount;
587 }
588
Lucas De Marchi907ed132013-04-30 15:28:07 -0700589 retval = -ENOMEM;
590 sub_info = call_usermodehelper_setup(helper_argv[0],
591 helper_argv, NULL, GFP_KERNEL,
592 umh_pipe_setup, NULL, &cprm);
593 if (sub_info)
594 retval = call_usermodehelper_exec(sub_info,
595 UMH_WAIT_EXEC);
596
Alex Kelly10c28d92012-09-26 21:52:08 -0400597 argv_free(helper_argv);
598 if (retval) {
Lucas De Marchifb96c472013-04-30 15:28:06 -0700599 printk(KERN_INFO "Core dump to %s pipe failed\n",
Alex Kelly10c28d92012-09-26 21:52:08 -0400600 cn.corename);
601 goto close_fail;
Lucas De Marchifb96c472013-04-30 15:28:06 -0700602 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400603 } else {
604 struct inode *inode;
605
606 if (cprm.limit < binfmt->min_coredump)
607 goto fail_unlock;
608
609 if (need_nonrelative && cn.corename[0] != '/') {
610 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
611 "to fully qualified path!\n",
612 task_tgid_vnr(current), current->comm);
613 printk(KERN_WARNING "Skipping core dump\n");
614 goto fail_unlock;
615 }
616
617 cprm.file = filp_open(cn.corename,
618 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
619 0600);
620 if (IS_ERR(cprm.file))
621 goto fail_unlock;
622
Al Viro496ad9a2013-01-23 17:07:38 -0500623 inode = file_inode(cprm.file);
Alex Kelly10c28d92012-09-26 21:52:08 -0400624 if (inode->i_nlink > 1)
625 goto close_fail;
626 if (d_unhashed(cprm.file->f_path.dentry))
627 goto close_fail;
628 /*
629 * AK: actually i see no reason to not allow this for named
630 * pipes etc, but keep the previous behaviour for now.
631 */
632 if (!S_ISREG(inode->i_mode))
633 goto close_fail;
634 /*
635 * Dont allow local users get cute and trick others to coredump
636 * into their pre-created files.
637 */
638 if (!uid_eq(inode->i_uid, current_fsuid()))
639 goto close_fail;
640 if (!cprm.file->f_op || !cprm.file->f_op->write)
641 goto close_fail;
642 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
643 goto close_fail;
644 }
645
646 /* get us an unshared descriptor table; almost always a no-op */
647 retval = unshare_files(&displaced);
648 if (retval)
649 goto close_fail;
650 if (displaced)
651 put_files_struct(displaced);
Al Viroe86d35c2013-05-04 14:45:54 -0400652 if (!dump_interrupted()) {
653 file_start_write(cprm.file);
654 core_dumped = binfmt->core_dump(&cprm);
655 file_end_write(cprm.file);
656 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400657 if (ispipe && core_pipe_limit)
658 wait_for_dump_helpers(cprm.file);
659close_fail:
660 if (cprm.file)
661 filp_close(cprm.file, NULL);
662fail_dropcount:
663 if (ispipe)
664 atomic_dec(&core_dump_count);
665fail_unlock:
666 kfree(cn.corename);
Oleg Nesterovacdedd92013-04-30 15:28:13 -0700667 coredump_finish(mm, core_dumped);
Alex Kelly10c28d92012-09-26 21:52:08 -0400668 revert_creds(old_cred);
669fail_creds:
670 put_cred(cred);
671fail:
672 return;
673}
674
675/*
676 * Core dumping helper functions. These are the only things you should
677 * do on a core-file: use only these functions to write out all the
678 * necessary info.
679 */
680int dump_write(struct file *file, const void *addr, int nr)
681{
Oleg Nesterov528f8272013-04-30 15:28:15 -0700682 return !dump_interrupted() &&
683 access_ok(VERIFY_READ, addr, nr) &&
684 file->f_op->write(file, addr, nr, &file->f_pos) == nr;
Alex Kelly10c28d92012-09-26 21:52:08 -0400685}
686EXPORT_SYMBOL(dump_write);
687
688int dump_seek(struct file *file, loff_t off)
689{
690 int ret = 1;
691
692 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
Oleg Nesterov528f8272013-04-30 15:28:15 -0700693 if (dump_interrupted() ||
694 file->f_op->llseek(file, off, SEEK_CUR) < 0)
Alex Kelly10c28d92012-09-26 21:52:08 -0400695 return 0;
696 } else {
697 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
698
699 if (!buf)
700 return 0;
701 while (off > 0) {
702 unsigned long n = off;
703
704 if (n > PAGE_SIZE)
705 n = PAGE_SIZE;
706 if (!dump_write(file, buf, n)) {
707 ret = 0;
708 break;
709 }
710 off -= n;
711 }
712 free_page((unsigned long)buf);
713 }
714 return ret;
715}
716EXPORT_SYMBOL(dump_seek);