blob: 235827032a717dc47186abe7a0d9ac55fcbe6fa4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
Christian Braunercf9f8292018-11-19 00:51:56 +010017#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/fs.h>
Christian Braunercf9f8292018-11-19 00:51:56 +010019#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/tty.h>
21#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070022#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/security.h>
24#include <linux/syscalls.h>
25#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070026#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070027#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090028#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070029#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080030#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080031#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080032#include <linux/pid_namespace.h>
33#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080034#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053035#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050036#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000037#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070038#include <linux/compiler.h>
39
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050040#define CREATE_TRACE_POINTS
41#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/param.h>
44#include <asm/uaccess.h>
45#include <asm/unistd.h>
46#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010047#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040048#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50/*
51 * SLAB caches for signal bits.
52 */
53
Christoph Lametere18b8902006-12-06 20:33:20 -080054static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090056int print_fatal_signals __read_mostly;
57
Roland McGrath35de2542008-07-25 19:45:51 -070058static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070059{
Roland McGrath35de2542008-07-25 19:45:51 -070060 return t->sighand->action[sig - 1].sa.sa_handler;
61}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070062
Roland McGrath35de2542008-07-25 19:45:51 -070063static int sig_handler_ignored(void __user *handler, int sig)
64{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070065 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070066 return handler == SIG_IGN ||
67 (handler == SIG_DFL && sig_kernel_ignore(sig));
68}
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070070static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Roland McGrath35de2542008-07-25 19:45:51 -070072 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Oleg Nesterovf008faf2009-04-02 16:58:02 -070074 handler = sig_handler(t, sig);
75
Eric W. Biederman1f7d8a22018-07-19 19:47:27 -050076 /* SIGKILL and SIGSTOP may not be sent to the global init */
77 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
78 return true;
79
Oleg Nesterovf008faf2009-04-02 16:58:02 -070080 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterov794ac8e2017-11-17 15:30:04 -080081 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Oleg Nesterovf008faf2009-04-02 16:58:02 -070082 return 1;
83
Eric W. Biedermana3714572019-08-16 12:33:54 -050084 /* Only allow kernel generated signals to this kthread */
85 if (unlikely((t->flags & PF_KTHREAD) &&
86 (handler == SIG_KTHREAD_KERNEL) && !force))
87 return true;
88
Oleg Nesterovf008faf2009-04-02 16:58:02 -070089 return sig_handler_ignored(handler, sig);
90}
91
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070092static int sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070093{
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /*
95 * Blocked signals are never ignored, since the
96 * signal handler may change by the time it is
97 * unblocked.
98 */
Roland McGrath325d22d2007-11-12 15:41:55 -080099 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 return 0;
101
Oleg Nesterov1453b3a2017-11-17 15:30:01 -0800102 /*
103 * Tracers may want to know about even ignored signal unless it
104 * is SIGKILL which can't be reported anyway but can be ignored
105 * by SIGNAL_UNKILLABLE task.
106 */
107 if (t->ptrace && sig != SIGKILL)
Roland McGrath35de2542008-07-25 19:45:51 -0700108 return 0;
109
Oleg Nesterov1453b3a2017-11-17 15:30:01 -0800110 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112
113/*
114 * Re-calculate pending state from the set of locally pending
115 * signals, globally pending signals, and blocked signals.
116 */
117static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
118{
119 unsigned long ready;
120 long i;
121
122 switch (_NSIG_WORDS) {
123 default:
124 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
125 ready |= signal->sig[i] &~ blocked->sig[i];
126 break;
127
128 case 4: ready = signal->sig[3] &~ blocked->sig[3];
129 ready |= signal->sig[2] &~ blocked->sig[2];
130 ready |= signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
132 break;
133
134 case 2: ready = signal->sig[1] &~ blocked->sig[1];
135 ready |= signal->sig[0] &~ blocked->sig[0];
136 break;
137
138 case 1: ready = signal->sig[0] &~ blocked->sig[0];
139 }
140 return ready != 0;
141}
142
143#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
144
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700145static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200147 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700149 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700151 return 1;
152 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700153 /*
154 * We must never clear the flag in another thread, or in current
155 * when it's possible the current syscall is returning -ERESTART*.
156 * So we don't clear it here, and only callers who know they should do.
157 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700158 return 0;
159}
160
161/*
162 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
163 * This is superfluous when called on current, the wakeup is a harmless no-op.
164 */
165void recalc_sigpending_and_wake(struct task_struct *t)
166{
167 if (recalc_sigpending_tsk(t))
168 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
171void recalc_sigpending(void)
172{
Tejun Heodd1d6772011-06-02 11:14:00 +0200173 if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700174 clear_thread_flag(TIF_SIGPENDING);
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
178/* Given the mask, find the first available signal that should be serviced. */
179
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800180#define SYNCHRONOUS_MASK \
181 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500182 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800183
Davide Libenzifba2afa2007-05-10 22:23:13 -0700184int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
186 unsigned long i, *s, *m, x;
187 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 s = pending->signal.sig;
190 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800191
192 /*
193 * Handle the first word specially: it contains the
194 * synchronous signals that need to be dequeued first.
195 */
196 x = *s &~ *m;
197 if (x) {
198 if (x & SYNCHRONOUS_MASK)
199 x &= SYNCHRONOUS_MASK;
200 sig = ffz(~x) + 1;
201 return sig;
202 }
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 switch (_NSIG_WORDS) {
205 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800206 for (i = 1; i < _NSIG_WORDS; ++i) {
207 x = *++s &~ *++m;
208 if (!x)
209 continue;
210 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 break;
214
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800215 case 2:
216 x = s[1] &~ m[1];
217 if (!x)
218 break;
219 sig = ffz(~x) + _NSIG_BPW + 1;
220 break;
221
222 case 1:
223 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 break;
225 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 return sig;
228}
229
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900230static inline void print_dropped_signal(int sig)
231{
232 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
233
234 if (!print_fatal_signals)
235 return;
236
237 if (!__ratelimit(&ratelimit_state))
238 return;
239
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700240 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900241 current->comm, current->pid, sig);
242}
243
Tejun Heoe5c19022011-03-23 10:37:00 +0100244/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200245 * task_set_jobctl_pending - set jobctl pending bits
246 * @task: target task
247 * @mask: pending bits to set
248 *
249 * Clear @mask from @task->jobctl. @mask must be subset of
250 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
251 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
252 * cleared. If @task is already being killed or exiting, this function
253 * becomes noop.
254 *
255 * CONTEXT:
256 * Must be called with @task->sighand->siglock held.
257 *
258 * RETURNS:
259 * %true if @mask is set, %false if made noop because @task was dying.
260 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700261bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200262{
263 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
264 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
265 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
266
267 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
268 return false;
269
270 if (mask & JOBCTL_STOP_SIGMASK)
271 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
272
273 task->jobctl |= mask;
274 return true;
275}
276
277/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200278 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100279 * @task: target task
280 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200281 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
282 * Clear it and wake up the ptracer. Note that we don't need any further
283 * locking. @task->siglock guarantees that @task->parent points to the
284 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100285 *
286 * CONTEXT:
287 * Must be called with @task->sighand->siglock held.
288 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200289void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100290{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200291 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
292 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700293 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200294 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100295 }
296}
297
298/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200299 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c19022011-03-23 10:37:00 +0100300 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200301 * @mask: pending bits to clear
Tejun Heoe5c19022011-03-23 10:37:00 +0100302 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200303 * Clear @mask from @task->jobctl. @mask must be subset of
304 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
305 * STOP bits are cleared together.
Tejun Heoe5c19022011-03-23 10:37:00 +0100306 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200307 * If clearing of @mask leaves no stop or trap pending, this function calls
308 * task_clear_jobctl_trapping().
Tejun Heoe5c19022011-03-23 10:37:00 +0100309 *
310 * CONTEXT:
311 * Must be called with @task->sighand->siglock held.
312 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700313void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c19022011-03-23 10:37:00 +0100314{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200315 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
316
317 if (mask & JOBCTL_STOP_PENDING)
318 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
319
320 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200321
322 if (!(task->jobctl & JOBCTL_PENDING_MASK))
323 task_clear_jobctl_trapping(task);
Tejun Heoe5c19022011-03-23 10:37:00 +0100324}
325
326/**
327 * task_participate_group_stop - participate in a group stop
328 * @task: task participating in a group stop
329 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200330 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100331 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200332 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100333 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c19022011-03-23 10:37:00 +0100334 *
335 * CONTEXT:
336 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100337 *
338 * RETURNS:
339 * %true if group stop completion should be notified to the parent, %false
340 * otherwise.
Tejun Heoe5c19022011-03-23 10:37:00 +0100341 */
342static bool task_participate_group_stop(struct task_struct *task)
343{
344 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200345 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c19022011-03-23 10:37:00 +0100346
Tejun Heoa8f072c2011-06-02 11:13:59 +0200347 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100348
Tejun Heo3759a0d2011-06-02 11:14:00 +0200349 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c19022011-03-23 10:37:00 +0100350
351 if (!consume)
352 return false;
353
354 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
355 sig->group_stop_count--;
356
Tejun Heo244056f2011-03-23 10:37:01 +0100357 /*
358 * Tell the caller to notify completion iff we are entering into a
359 * fresh group stop. Read comment in do_signal_stop() for details.
360 */
361 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles916a05b2017-01-10 16:57:54 -0800362 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c19022011-03-23 10:37:00 +0100363 return true;
364 }
365 return false;
366}
367
David Howellsc69e8d92008-11-14 10:39:19 +1100368/*
369 * allocate a new signal queue record
370 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700371 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100372 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900373static struct sigqueue *
374__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
376 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800377 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800379 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000380 * Protect access to @t credentials. This can go away when all
381 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800382 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000383 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100384 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800385 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000386 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800389 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800390 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900392 } else {
393 print_dropped_signal(sig);
394 }
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800397 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100398 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 } else {
400 INIT_LIST_HEAD(&q->list);
401 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100402 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 }
David Howellsd84f4f92008-11-14 10:39:23 +1100404
405 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
Andrew Morton514a01b2006-02-03 03:04:41 -0800408static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
410 if (q->flags & SIGQUEUE_PREALLOC)
411 return;
412 atomic_dec(&q->user->sigpending);
413 free_uid(q->user);
414 kmem_cache_free(sigqueue_cachep, q);
415}
416
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800417void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
419 struct sigqueue *q;
420
421 sigemptyset(&queue->signal);
422 while (!list_empty(&queue->list)) {
423 q = list_entry(queue->list.next, struct sigqueue , list);
424 list_del_init(&q->list);
425 __sigqueue_free(q);
426 }
427}
428
429/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400430 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800432void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 unsigned long flags;
435
436 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400437 clear_tsk_thread_flag(t, TIF_SIGPENDING);
438 flush_sigqueue(&t->pending);
439 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 spin_unlock_irqrestore(&t->sighand->siglock, flags);
441}
442
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400443static void __flush_itimer_signals(struct sigpending *pending)
444{
445 sigset_t signal, retain;
446 struct sigqueue *q, *n;
447
448 signal = pending->signal;
449 sigemptyset(&retain);
450
451 list_for_each_entry_safe(q, n, &pending->list, list) {
452 int sig = q->info.si_signo;
453
454 if (likely(q->info.si_code != SI_TIMER)) {
455 sigaddset(&retain, sig);
456 } else {
457 sigdelset(&signal, sig);
458 list_del_init(&q->list);
459 __sigqueue_free(q);
460 }
461 }
462
463 sigorsets(&pending->signal, &signal, &retain);
464}
465
466void flush_itimer_signals(void)
467{
468 struct task_struct *tsk = current;
469 unsigned long flags;
470
471 spin_lock_irqsave(&tsk->sighand->siglock, flags);
472 __flush_itimer_signals(&tsk->pending);
473 __flush_itimer_signals(&tsk->signal->shared_pending);
474 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
475}
476
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700477void ignore_signals(struct task_struct *t)
478{
479 int i;
480
481 for (i = 0; i < _NSIG; ++i)
482 t->sighand->action[i].sa.sa_handler = SIG_IGN;
483
484 flush_signals(t);
485}
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 * Flush all handlers for a task.
489 */
490
491void
492flush_signal_handlers(struct task_struct *t, int force_default)
493{
494 int i;
495 struct k_sigaction *ka = &t->sighand->action[0];
496 for (i = _NSIG ; i != 0 ; i--) {
497 if (force_default || ka->sa.sa_handler != SIG_IGN)
498 ka->sa.sa_handler = SIG_DFL;
499 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700500#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700501 ka->sa.sa_restorer = NULL;
502#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 sigemptyset(&ka->sa.sa_mask);
504 ka++;
505 }
506}
507
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200508int unhandled_signal(struct task_struct *tsk, int sig)
509{
Roland McGrath445a91d2008-07-25 19:45:52 -0700510 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700511 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200512 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700513 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200514 return 0;
Tejun Heoa288eec2011-06-17 16:50:37 +0200515 /* if ptraced, let the tracer determine */
516 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200517}
518
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500519static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
520 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
522 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 /*
525 * Collect the siginfo appropriate to this signal. Check if
526 * there is another siginfo for the same signal.
527 */
528 list_for_each_entry(q, &list->list, list) {
529 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700530 if (first)
531 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 first = q;
533 }
534 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700535
536 sigdelset(&list->signal, sig);
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700539still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 list_del_init(&first->list);
541 copy_siginfo(info, &first->info);
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500542
543 *resched_timer =
544 (first->flags & SIGQUEUE_PREALLOC) &&
545 (info->si_code == SI_TIMER) &&
546 (info->si_sys_private);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700550 /*
551 * Ok, it wasn't in the queue. This must be
552 * a fast-pathed signal or we must have been
553 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 info->si_signo = sig;
556 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800557 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 info->si_pid = 0;
559 info->si_uid = 0;
560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561}
562
563static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500564 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565{
Roland McGrath27d91e02006-09-29 02:00:31 -0700566 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800568 if (sig)
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500569 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 return sig;
571}
572
573/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700574 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 * expected to free it.
576 *
577 * All callers have to hold the siglock.
578 */
579int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
580{
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500581 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700582 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000583
584 /* We only dequeue private signals from ourselves, we don't let
585 * signalfd steal them
586 */
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500587 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800588 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500590 mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800591 /*
592 * itimer signal ?
593 *
594 * itimers are process shared and we restart periodic
595 * itimers in the signal delivery path to prevent DoS
596 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700597 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800598 * itimers, as the SIGALRM is a legacy signal and only
599 * queued once. Changing the restart behaviour to
600 * restart the timer in the signal dequeue path is
601 * reducing the timer noise on heavy loaded !highres
602 * systems too.
603 */
604 if (unlikely(signr == SIGALRM)) {
605 struct hrtimer *tmr = &tsk->signal->real_timer;
606
607 if (!hrtimer_is_queued(tmr) &&
608 tsk->signal->it_real_incr.tv64 != 0) {
609 hrtimer_forward(tmr, tmr->base->get_time(),
610 tsk->signal->it_real_incr);
611 hrtimer_restart(tmr);
612 }
613 }
614 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700615
Davide Libenzib8fceee2007-09-20 12:40:16 -0700616 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700617 if (!signr)
618 return 0;
619
620 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800621 /*
622 * Set a marker that we have dequeued a stop signal. Our
623 * caller might release the siglock and then the pending
624 * stop signal it is about to process is no longer in the
625 * pending bitmasks, but must still be cleared by a SIGCONT
626 * (and overruled by a SIGKILL). So those cases clear this
627 * shared flag after we've set it. Note that this flag may
628 * remain set after the signal we return is ignored or
629 * handled. That doesn't matter because its only purpose
630 * is to alert stop-signal processing code when another
631 * processor has come along and cleared the flag.
632 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200633 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800634 }
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500635 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 /*
637 * Release the siglock to ensure proper locking order
638 * of timer locks outside of siglocks. Note, we leave
639 * irqs disabled here, since the posix-timers code is
640 * about to disable them again anyway.
641 */
642 spin_unlock(&tsk->sighand->siglock);
643 do_schedule_next_timer(info);
644 spin_lock(&tsk->sighand->siglock);
645 }
646 return signr;
647}
648
649/*
650 * Tell a process that it has a new active signal..
651 *
652 * NOTE! we rely on the previous spin_lock to
653 * lock interrupts for us! We can only be called with
654 * "siglock" held, and the local interrupt must
655 * have been disabled when that got acquired!
656 *
657 * No need to set need_resched since signal event passing
658 * goes through ->blocked
659 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100660void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100664 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500665 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 * executing another processor and just now entering stopped state.
667 * By using wake_up_state, we ensure the process will wake up and
668 * handle its death signal.
669 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100670 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 kick_process(t);
672}
673
674/*
675 * Remove signals in mask from the pending set and queue.
676 * Returns 1 if any signals were found.
677 *
678 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800679 */
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700680static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd52006-01-08 01:02:48 -0800681{
682 struct sigqueue *q, *n;
683 sigset_t m;
684
685 sigandsets(&m, mask, &s->signal);
686 if (sigisemptyset(&m))
687 return 0;
688
Oleg Nesterov702a5072011-04-27 22:01:27 +0200689 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd52006-01-08 01:02:48 -0800690 list_for_each_entry_safe(q, n, &s->list, list) {
691 if (sigismember(mask, q->info.si_signo)) {
692 list_del_init(&q->list);
693 __sigqueue_free(q);
694 }
695 }
696 return 1;
697}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Oleg Nesterov614c5172009-12-15 16:47:22 -0800699static inline int is_si_special(const struct siginfo *info)
700{
701 return info <= SEND_SIG_FORCED;
702}
703
704static inline bool si_fromuser(const struct siginfo *info)
705{
706 return info == SEND_SIG_NOINFO ||
707 (!is_si_special(info) && SI_FROMUSER(info));
708}
709
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -0600710static int dequeue_synchronous_signal(siginfo_t *info)
711{
712 struct task_struct *tsk = current;
713 struct sigpending *pending = &tsk->pending;
714 struct sigqueue *q, *sync = NULL;
715
716 /*
717 * Might a synchronous signal be in the queue?
718 */
719 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
720 return 0;
721
722 /*
723 * Return the first synchronous signal in the queue.
724 */
725 list_for_each_entry(q, &pending->list, list) {
726 /* Synchronous signals have a postive si_code */
727 if ((q->info.si_code > SI_USER) &&
728 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
729 sync = q;
730 goto next;
731 }
732 }
733 return 0;
734next:
735 /*
736 * Check if there is another siginfo for the same signal.
737 */
738 list_for_each_entry_continue(q, &pending->list, list) {
739 if (q->info.si_signo == sync->info.si_signo)
740 goto still_pending;
741 }
742
743 sigdelset(&pending->signal, sync->info.si_signo);
744 recalc_sigpending();
745still_pending:
746 list_del_init(&sync->list);
747 copy_siginfo(info, &sync->info);
748 __sigqueue_free(sync);
749 return info->si_signo;
750}
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700753 * called with RCU read lock from check_kill_permission()
754 */
755static int kill_ok_by_cred(struct task_struct *t)
756{
757 const struct cred *cred = current_cred();
758 const struct cred *tcred = __task_cred(t);
759
Eric W. Biederman5af66202012-03-03 20:21:47 -0800760 if (uid_eq(cred->euid, tcred->suid) ||
761 uid_eq(cred->euid, tcred->uid) ||
762 uid_eq(cred->uid, tcred->suid) ||
763 uid_eq(cred->uid, tcred->uid))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700764 return 1;
765
Eric W. Biedermanc4a4d602011-11-16 23:15:31 -0800766 if (ns_capable(tcred->user_ns, CAP_KILL))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700767 return 1;
768
769 return 0;
770}
771
772/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100774 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 */
776static int check_kill_permission(int sig, struct siginfo *info,
777 struct task_struct *t)
778{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700779 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700780 int error;
781
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700782 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700783 return -EINVAL;
784
Oleg Nesterov614c5172009-12-15 16:47:22 -0800785 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700786 return 0;
787
788 error = audit_signal_info(sig, t); /* Let audit system see the signal */
789 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400791
Oleg Nesterov065add32010-05-26 14:42:54 -0700792 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700793 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700794 switch (sig) {
795 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700796 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700797 /*
798 * We don't return the error if sid == NULL. The
799 * task was unhashed, the caller must notice this.
800 */
801 if (!sid || sid == task_session(current))
802 break;
803 default:
804 return -EPERM;
805 }
806 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100807
Amy Griffise54dc242007-03-29 18:01:04 -0400808 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809}
810
Tejun Heofb1d9102011-06-14 11:20:17 +0200811/**
812 * ptrace_trap_notify - schedule trap to notify ptracer
813 * @t: tracee wanting to notify tracer
814 *
815 * This function schedules sticky ptrace trap which is cleared on the next
816 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
817 * ptracer.
818 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200819 * If @t is running, STOP trap will be taken. If trapped for STOP and
820 * ptracer is listening for events, tracee is woken up so that it can
821 * re-trap for the new event. If trapped otherwise, STOP trap will be
822 * eventually taken without returning to userland after the existing traps
823 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200824 *
825 * CONTEXT:
826 * Must be called with @task->sighand->siglock held.
827 */
828static void ptrace_trap_notify(struct task_struct *t)
829{
830 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
831 assert_spin_locked(&t->sighand->siglock);
832
833 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100834 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200835}
836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700838 * Handle magic process-wide effects of stop/continue signals. Unlike
839 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * time regardless of blocking, ignoring, or handling. This does the
841 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700842 * signals. The process stop is done as a signal action for SIG_DFL.
843 *
844 * Returns true if the signal should be actually delivered, otherwise
845 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700847static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700849 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700851 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Oleg Nesterov403bad72013-04-30 15:28:10 -0700853 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800854 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700855 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700857 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700859 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 /*
861 * This is a stop signal. Remove SIGCONT from all queues.
862 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700863 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700864 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700865 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700866 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700868 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200870 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700872 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700873 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700874 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700875 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200876 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200877 if (likely(!(t->ptrace & PT_SEIZED)))
878 wake_up_state(t, __TASK_STOPPED);
879 else
880 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700883 /*
884 * Notify the parent with CLD_CONTINUED if we were stopped.
885 *
886 * If we were in the middle of a group stop, we pretend it
887 * was already finished, and then continued. Since SIGCHLD
888 * doesn't queue we report only CLD_STOPPED, as if the next
889 * CLD_CONTINUED was dropped.
890 */
891 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700892 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700893 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700894 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700895 why |= SIGNAL_CLD_STOPPED;
896
897 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700898 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700899 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700900 * will take ->siglock, notice SIGNAL_CLD_MASK, and
901 * notify its parent. See get_signal_to_deliver().
902 */
Jamie Iles916a05b2017-01-10 16:57:54 -0800903 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700904 signal->group_stop_count = 0;
905 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700908
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700909 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910}
911
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700912/*
913 * Test if P wants to take SIG. After we've checked all threads with this,
914 * it's equivalent to finding no threads not blocking SIG. Any threads not
915 * blocking SIG were ruled out because they are not running and already
916 * have pending signals. Such threads will dequeue from the shared queue
917 * as soon as they're available, so putting the signal on the shared queue
918 * will be equivalent to sending it to one such thread.
919 */
920static inline int wants_signal(int sig, struct task_struct *p)
921{
922 if (sigismember(&p->blocked, sig))
923 return 0;
924 if (p->flags & PF_EXITING)
925 return 0;
926 if (sig == SIGKILL)
927 return 1;
928 if (task_is_stopped_or_traced(p))
929 return 0;
930 return task_curr(p) || !signal_pending(p);
931}
932
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700933static void complete_signal(int sig, struct task_struct *p, int group)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700934{
935 struct signal_struct *signal = p->signal;
936 struct task_struct *t;
937
938 /*
939 * Now find a thread we can wake up to take the signal off the queue.
940 *
941 * If the main thread wants the signal, it gets first crack.
942 * Probably the least surprising to the average bear.
943 */
944 if (wants_signal(sig, p))
945 t = p;
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700946 else if (!group || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700947 /*
948 * There is just one thread and it does not need to be woken.
949 * It will dequeue unblocked signals before it runs again.
950 */
951 return;
952 else {
953 /*
954 * Otherwise try to find a suitable thread.
955 */
956 t = signal->curr_target;
957 while (!wants_signal(sig, t)) {
958 t = next_thread(t);
959 if (t == signal->curr_target)
960 /*
961 * No thread needs to be woken.
962 * Any eligible threads will see
963 * the signal in the queue soon.
964 */
965 return;
966 }
967 signal->curr_target = t;
968 }
969
970 /*
971 * Found a killable thread. If the signal will be fatal,
972 * then start taking the whole group down immediately.
973 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700974 if (sig_fatal(p, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800975 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700976 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800977 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700978 /*
979 * This signal will be fatal to the whole group.
980 */
981 if (!sig_kernel_coredump(sig)) {
982 /*
983 * Start a group exit and wake everybody up.
984 * This way we don't have other threads
985 * running and doing things after a slower
986 * thread has the fatal signal pending.
987 */
988 signal->flags = SIGNAL_GROUP_EXIT;
989 signal->group_exit_code = sig;
990 signal->group_stop_count = 0;
991 t = p;
992 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200993 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700994 sigaddset(&t->pending.signal, SIGKILL);
995 signal_wake_up(t, 1);
996 } while_each_thread(p, t);
997 return;
998 }
999 }
1000
1001 /*
1002 * The signal is already in the shared-pending queue.
1003 * Tell the chosen thread to wake up and dequeue it.
1004 */
1005 signal_wake_up(t, sig == SIGKILL);
1006 return;
1007}
1008
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001009static inline int legacy_queue(struct sigpending *signals, int sig)
1010{
1011 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1012}
1013
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001014#ifdef CONFIG_USER_NS
1015static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1016{
1017 if (current_user_ns() == task_cred_xxx(t, user_ns))
1018 return;
1019
1020 if (SI_FROMKERNEL(info))
1021 return;
1022
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001023 rcu_read_lock();
1024 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1025 make_kuid(current_user_ns(), info->si_uid));
1026 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001027}
1028#else
1029static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1030{
1031 return;
1032}
1033#endif
1034
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001035static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1036 int group, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001038 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001039 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001040 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001041 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001042
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001043 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001044
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001045 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001046 if (!prepare_signal(sig, t,
Eric W. Biedermanba277fe2018-09-03 20:02:46 +02001047 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001048 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001049
1050 pending = group ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001052 * Short-circuit ignored signals and support queuing
1053 * exactly one non-rt signal, so that we can get more
1054 * detailed information about the cause of the signal.
1055 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001056 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001057 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001058 goto ret;
1059
1060 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001061 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 * fast-pathed signals for kernel-internal things like SIGSTOP
1063 * or SIGKILL.
1064 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001065 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 goto out_set;
1067
Randy Dunlap5aba0852011-04-04 14:59:31 -07001068 /*
1069 * Real-time signals must be queued if sent by sigqueue, or
1070 * some other real-time mechanism. It is implementation
1071 * defined whether kill() does so. We attempt to do so, on
1072 * the principle of least surprise, but since kill is not
1073 * allowed to fail with EAGAIN when low on memory we just
1074 * make sure at least one signal gets delivered and don't
1075 * pass on the info struct.
1076 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001077 if (sig < SIGRTMIN)
1078 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1079 else
1080 override_rlimit = 0;
1081
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001082 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001083 override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001085 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001087 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 q->info.si_signo = sig;
1089 q->info.si_errno = 0;
1090 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001091 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001092 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001093 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001095 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 q->info.si_signo = sig;
1097 q->info.si_errno = 0;
1098 q->info.si_code = SI_KERNEL;
1099 q->info.si_pid = 0;
1100 q->info.si_uid = 0;
1101 break;
1102 default:
1103 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001104 if (from_ancestor_ns)
1105 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 break;
1107 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001108
1109 userns_fixup_signal_uid(&q->info, t);
1110
Oleg Nesterov621d3122005-10-30 15:03:45 -08001111 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001112 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1113 /*
1114 * Queue overflow, abort. We may abort if the
1115 * signal was rt and sent by user using something
1116 * other than kill().
1117 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001118 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1119 ret = -EAGAIN;
1120 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001121 } else {
1122 /*
1123 * This is a silent loss of information. We still
1124 * send the signal, but the *info bits are lost.
1125 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001126 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
1129
1130out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001131 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001132 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001133 complete_signal(sig, t, group);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001134ret:
1135 trace_signal_generate(sig, info, t, group, result);
1136 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137}
1138
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001139static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1140 int group)
1141{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001142 int from_ancestor_ns = 0;
1143
1144#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001145 from_ancestor_ns = si_fromuser(info) &&
1146 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001147#endif
1148
1149 return __send_signal(sig, info, t, group, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001150}
1151
Al Viro4aaefee2012-11-05 13:09:56 -05001152static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001153{
Al Viro4aaefee2012-11-05 13:09:56 -05001154 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001155 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001156
Al Viroca5cd872007-10-29 04:31:16 +00001157#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001158 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001159 {
1160 int i;
1161 for (i = 0; i < 16; i++) {
1162 unsigned char insn;
1163
Andi Kleenb45c6e72010-01-08 14:42:52 -08001164 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1165 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001166 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001167 }
1168 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001169 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001170#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001171 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001172 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001173 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001174}
1175
1176static int __init setup_print_fatal_signals(char *str)
1177{
1178 get_option (&str, &print_fatal_signals);
1179
1180 return 1;
1181}
1182
1183__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001185int
1186__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1187{
1188 return send_signal(sig, info, p, 1);
1189}
1190
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191static int
1192specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1193{
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001194 return send_signal(sig, info, t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001197int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1198 bool group)
1199{
1200 unsigned long flags;
1201 int ret = -ESRCH;
1202
1203 if (lock_task_sighand(p, &flags)) {
1204 ret = send_signal(sig, info, p, group);
1205 unlock_task_sighand(p, &flags);
1206 }
1207
1208 return ret;
1209}
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211/*
1212 * Force a signal that the process can't ignore: if necessary
1213 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001214 *
1215 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1216 * since we do not want to have a signal handler that was blocked
1217 * be invoked when user space had explicitly blocked it.
1218 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001219 * We don't want to have recursive SIGSEGV's etc, for example,
1220 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222int
1223force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1224{
1225 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001226 int ret, blocked, ignored;
1227 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
1229 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001230 action = &t->sighand->action[sig-1];
1231 ignored = action->sa.sa_handler == SIG_IGN;
1232 blocked = sigismember(&t->blocked, sig);
1233 if (blocked || ignored) {
1234 action->sa.sa_handler = SIG_DFL;
1235 if (blocked) {
1236 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001237 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 }
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001240 if (action->sa.sa_handler == SIG_DFL)
1241 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 ret = specific_send_sig_info(sig, info, t);
1243 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1244
1245 return ret;
1246}
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248/*
1249 * Nuke all other threads in the group.
1250 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001251int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001253 struct task_struct *t = p;
1254 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 p->signal->group_stop_count = 0;
1257
Oleg Nesterov09faef12010-05-26 14:43:11 -07001258 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001259 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001260 count++;
1261
1262 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 if (t->exit_state)
1264 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 signal_wake_up(t, 1);
1267 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001268
1269 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270}
1271
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001272struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1273 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001274{
1275 struct sighand_struct *sighand;
1276
1277 for (;;) {
Paul E. McKenneyc41247e2014-05-05 08:18:30 -07001278 /*
1279 * Disable interrupts early to avoid deadlocks.
1280 * See rcu_read_unlock() comment header for details.
1281 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001282 local_irq_save(*flags);
1283 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001284 sighand = rcu_dereference(tsk->sighand);
Paul E. McKenneya8417962011-07-19 03:25:36 -07001285 if (unlikely(sighand == NULL)) {
1286 rcu_read_unlock();
1287 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001288 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001289 }
Oleg Nesterov392809b2014-09-28 23:44:18 +02001290 /*
1291 * This sighand can be already freed and even reused, but
1292 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1293 * initializes ->siglock: this slab can't go away, it has
1294 * the same object type, ->siglock can't be reinitialized.
1295 *
1296 * We need to ensure that tsk->sighand is still the same
1297 * after we take the lock, we can race with de_thread() or
1298 * __exit_signal(). In the latter case the next iteration
1299 * must see ->sighand == NULL.
1300 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001301 spin_lock(&sighand->siglock);
1302 if (likely(sighand == tsk->sighand)) {
1303 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001304 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001305 }
1306 spin_unlock(&sighand->siglock);
1307 rcu_read_unlock();
1308 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001309 }
1310
1311 return sighand;
1312}
1313
David Howellsc69e8d92008-11-14 10:39:19 +11001314/*
1315 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001316 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1318{
David Howells694f6902010-08-04 16:59:14 +01001319 int ret;
1320
1321 rcu_read_lock();
1322 ret = check_kill_permission(sig, info, p);
1323 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001325 if (!ret && sig)
1326 ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 return ret;
1329}
1330
1331/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001332 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001334 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001336int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
1338 struct task_struct *p = NULL;
1339 int retval, success;
1340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 success = 0;
1342 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001343 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 int err = group_send_sig_info(sig, info, p);
1345 success |= !err;
1346 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001347 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 return success ? 0 : retval;
1349}
1350
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001351int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001353 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 struct task_struct *p;
1355
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001356 for (;;) {
1357 rcu_read_lock();
1358 p = pid_task(pid, PIDTYPE_PID);
1359 if (p)
1360 error = group_send_sig_info(sig, info, p);
1361 rcu_read_unlock();
1362 if (likely(!p || error != -ESRCH))
1363 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001364
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001365 /*
1366 * The task was unhashed in between, try again. If it
1367 * is dead, pid_task() will return NULL, if we race with
1368 * de_thread() it will find the new leader.
1369 */
1370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371}
1372
Randy Dunlap5aba0852011-04-04 14:59:31 -07001373int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001374{
1375 int error;
1376 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001377 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001378 rcu_read_unlock();
1379 return error;
1380}
1381
Serge Hallynd178bc32011-09-26 10:45:18 -05001382static int kill_as_cred_perm(const struct cred *cred,
1383 struct task_struct *target)
1384{
1385 const struct cred *pcred = __task_cred(target);
Eric W. Biederman5af66202012-03-03 20:21:47 -08001386 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1387 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
Serge Hallynd178bc32011-09-26 10:45:18 -05001388 return 0;
1389 return 1;
1390}
1391
Eric W. Biederman2425c082006-10-02 02:17:28 -07001392/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001393int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1394 const struct cred *cred, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001395{
1396 int ret = -EINVAL;
1397 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001398 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001399
1400 if (!valid_signal(sig))
1401 return ret;
1402
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001403 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001404 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001405 if (!p) {
1406 ret = -ESRCH;
1407 goto out_unlock;
1408 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001409 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001410 ret = -EPERM;
1411 goto out_unlock;
1412 }
David Quigley8f95dc52006-06-30 01:55:47 -07001413 ret = security_task_kill(p, info, sig, secid);
1414 if (ret)
1415 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001416
1417 if (sig) {
1418 if (lock_task_sighand(p, &flags)) {
1419 ret = __send_signal(sig, info, p, 1, 0);
1420 unlock_task_sighand(p, &flags);
1421 } else
1422 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001423 }
1424out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001425 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001426 return ret;
1427}
Serge Hallynd178bc32011-09-26 10:45:18 -05001428EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430/*
1431 * kill_something_info() interprets pid in interesting ways just like kill(2).
1432 *
1433 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1434 * is probably wrong. Should make it like BSD or SYSV.
1435 */
1436
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001437static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001439 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001440
1441 if (pid > 0) {
1442 rcu_read_lock();
1443 ret = kill_pid_info(sig, info, find_vpid(pid));
1444 rcu_read_unlock();
1445 return ret;
1446 }
1447
zhongjiangec1975a2017-07-10 15:52:57 -07001448 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1449 if (pid == INT_MIN)
1450 return -ESRCH;
1451
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001452 read_lock(&tasklist_lock);
1453 if (pid != -1) {
1454 ret = __kill_pgrp_info(sig, info,
1455 pid ? find_vpid(-pid) : task_pgrp(current));
1456 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 int retval = 0, count = 0;
1458 struct task_struct * p;
1459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001461 if (task_pid_vnr(p) > 1 &&
1462 !same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 int err = group_send_sig_info(sig, info, p);
1464 ++count;
1465 if (err != -EPERM)
1466 retval = err;
1467 }
1468 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001469 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001471 read_unlock(&tasklist_lock);
1472
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001473 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474}
1475
1476/*
1477 * These are for backward compatibility with the rest of the kernel source.
1478 */
1479
Randy Dunlap5aba0852011-04-04 14:59:31 -07001480int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 /*
1483 * Make sure legacy kernel users don't send in bad values
1484 * (normal paths check this in check_kill_permission).
1485 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001486 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 return -EINVAL;
1488
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001489 return do_send_sig_info(sig, info, p, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490}
1491
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001492#define __si_special(priv) \
1493 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495int
1496send_sig(int sig, struct task_struct *p, int priv)
1497{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001498 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499}
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501void
1502force_sig(int sig, struct task_struct *p)
1503{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001504 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505}
1506
1507/*
1508 * When things go south during signal handling, we
1509 * will force a SIGSEGV. And if the signal that caused
1510 * the problem was already a SIGSEGV, we'll want to
1511 * make sure we don't even try to deliver the signal..
1512 */
1513int
1514force_sigsegv(int sig, struct task_struct *p)
1515{
1516 if (sig == SIGSEGV) {
1517 unsigned long flags;
1518 spin_lock_irqsave(&p->sighand->siglock, flags);
1519 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1520 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1521 }
1522 force_sig(SIGSEGV, p);
1523 return 0;
1524}
1525
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001526int kill_pgrp(struct pid *pid, int sig, int priv)
1527{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001528 int ret;
1529
1530 read_lock(&tasklist_lock);
1531 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1532 read_unlock(&tasklist_lock);
1533
1534 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001535}
1536EXPORT_SYMBOL(kill_pgrp);
1537
1538int kill_pid(struct pid *pid, int sig, int priv)
1539{
1540 return kill_pid_info(sig, __si_special(priv), pid);
1541}
1542EXPORT_SYMBOL(kill_pid);
1543
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544/*
1545 * These functions support sending signals using preallocated sigqueue
1546 * structures. This is needed "because realtime applications cannot
1547 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001548 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 * we allocate the sigqueue structure from the timer_create. If this
1550 * allocation fails we are able to report the failure to the application
1551 * with an EAGAIN error.
1552 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553struct sigqueue *sigqueue_alloc(void)
1554{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001555 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001557 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001559
1560 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561}
1562
1563void sigqueue_free(struct sigqueue *q)
1564{
1565 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001566 spinlock_t *lock = &current->sighand->siglock;
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1569 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001570 * We must hold ->siglock while testing q->list
1571 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001572 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001574 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001575 q->flags &= ~SIGQUEUE_PREALLOC;
1576 /*
1577 * If it is queued it will be freed when dequeued,
1578 * like the "regular" sigqueue.
1579 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001580 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001581 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001582 spin_unlock_irqrestore(lock, flags);
1583
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001584 if (q)
1585 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586}
1587
Oleg Nesterovac5c2152008-04-30 00:52:57 -07001588int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001589{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001590 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001591 struct sigpending *pending;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001592 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001593 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001594
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001595 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001596
1597 ret = -1;
1598 if (!likely(lock_task_sighand(t, &flags)))
1599 goto ret;
1600
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001601 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001602 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001603 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001604 goto out;
1605
1606 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001607 if (unlikely(!list_empty(&q->list))) {
1608 /*
1609 * If an SI_TIMER entry is already queue just increment
1610 * the overrun count.
1611 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001612 BUG_ON(q->info.si_code != SI_TIMER);
1613 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001614 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001615 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001616 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001617 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001618
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001619 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001620 pending = group ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001621 list_add_tail(&q->list, &pending->list);
1622 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001623 complete_signal(sig, t, group);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001624 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001625out:
Oleg Nesterov163566f2011-11-22 21:37:41 +01001626 trace_signal_generate(sig, &q->info, t, group, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001627 unlock_task_sighand(t, &flags);
1628ret:
1629 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001630}
1631
Joel Fernandes (Google)af1070f2019-04-30 12:21:53 -04001632static void do_notify_pidfd(struct task_struct *task)
1633{
1634 struct pid *pid;
1635
1636 pid = task_pid(task);
1637 wake_up_all(&pid->wait_pidfd);
1638}
1639
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 * Let a parent know about the death of a child.
1642 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001643 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001644 * Returns true if our parent ignored us and so we've switched to
1645 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001647bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
1649 struct siginfo info;
1650 unsigned long flags;
1651 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001652 bool autoreap = false;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001653 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655 BUG_ON(sig == -1);
1656
1657 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001658 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Tejun Heod21142e2011-06-17 16:50:34 +02001660 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1662
Joel Fernandes (Google)af1070f2019-04-30 12:21:53 -04001663 /* Wake up all pidfd waiters */
1664 do_notify_pidfd(tsk);
1665
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001666 if (sig != SIGCHLD) {
1667 /*
1668 * This is only possible if parent == real_parent.
1669 * Check if it has changed security domain.
1670 */
1671 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1672 sig = SIGCHLD;
1673 }
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 info.si_signo = sig;
1676 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001677 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001678 * We are under tasklist_lock here so our parent is tied to
1679 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001680 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001681 * task_active_pid_ns will always return the same pid namespace
1682 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001683 *
1684 * write_lock() currently calls preempt_disable() which is the
1685 * same as rcu_read_lock(), but according to Oleg, this is not
1686 * correct to rely on this
1687 */
1688 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001689 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001690 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1691 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001692 rcu_read_unlock();
1693
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001694 task_cputime(tsk, &utime, &stime);
1695 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1696 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698 info.si_status = tsk->exit_code & 0x7f;
1699 if (tsk->exit_code & 0x80)
1700 info.si_code = CLD_DUMPED;
1701 else if (tsk->exit_code & 0x7f)
1702 info.si_code = CLD_KILLED;
1703 else {
1704 info.si_code = CLD_EXITED;
1705 info.si_status = tsk->exit_code >> 8;
1706 }
1707
1708 psig = tsk->parent->sighand;
1709 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001710 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1712 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1713 /*
1714 * We are exiting and our parent doesn't care. POSIX.1
1715 * defines special semantics for setting SIGCHLD to SIG_IGN
1716 * or setting the SA_NOCLDWAIT flag: we should be reaped
1717 * automatically and not left for our parent's wait4 call.
1718 * Rather than having the parent do it as a magic kind of
1719 * signal handler, we just set this to tell do_exit that we
1720 * can be cleaned up without becoming a zombie. Note that
1721 * we still call __wake_up_parent in this case, because a
1722 * blocked sys_wait4 might now return -ECHILD.
1723 *
1724 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1725 * is implementation-defined: we do (if you don't want
1726 * it, just use SIG_IGN instead).
1727 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001728 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001730 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001732 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 __group_send_sig_info(sig, &info, tsk->parent);
1734 __wake_up_parent(tsk, tsk->parent);
1735 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001736
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001737 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738}
1739
Tejun Heo75b95952011-03-23 10:37:01 +01001740/**
1741 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1742 * @tsk: task reporting the state change
1743 * @for_ptracer: the notification is for ptracer
1744 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1745 *
1746 * Notify @tsk's parent that the stopped/continued state has changed. If
1747 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1748 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1749 *
1750 * CONTEXT:
1751 * Must be called with tasklist_lock at least read locked.
1752 */
1753static void do_notify_parent_cldstop(struct task_struct *tsk,
1754 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755{
1756 struct siginfo info;
1757 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001758 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 struct sighand_struct *sighand;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001760 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
Tejun Heo75b95952011-03-23 10:37:01 +01001762 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001763 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001764 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001765 tsk = tsk->group_leader;
1766 parent = tsk->real_parent;
1767 }
1768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 info.si_signo = SIGCHLD;
1770 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001771 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001772 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001773 */
1774 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001775 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001776 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001777 rcu_read_unlock();
1778
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001779 task_cputime(tsk, &utime, &stime);
1780 info.si_utime = cputime_to_clock_t(utime);
1781 info.si_stime = cputime_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783 info.si_code = why;
1784 switch (why) {
1785 case CLD_CONTINUED:
1786 info.si_status = SIGCONT;
1787 break;
1788 case CLD_STOPPED:
1789 info.si_status = tsk->signal->group_exit_code & 0x7f;
1790 break;
1791 case CLD_TRAPPED:
1792 info.si_status = tsk->exit_code & 0x7f;
1793 break;
1794 default:
1795 BUG();
1796 }
1797
1798 sighand = parent->sighand;
1799 spin_lock_irqsave(&sighand->siglock, flags);
1800 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1801 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1802 __group_send_sig_info(SIGCHLD, &info, parent);
1803 /*
1804 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1805 */
1806 __wake_up_parent(tsk, parent);
1807 spin_unlock_irqrestore(&sighand->siglock, flags);
1808}
1809
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001810static inline int may_ptrace_stop(void)
1811{
Tejun Heod21142e2011-06-17 16:50:34 +02001812 if (!likely(current->ptrace))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001813 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001814 /*
1815 * Are we in the middle of do_coredump?
1816 * If so and our tracer is also part of the coredump stopping
1817 * is a deadlock situation, and pointless because our tracer
1818 * is dead so don't allow us to stop.
1819 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001820 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001821 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001822 *
1823 * This is almost outdated, a task with the pending SIGKILL can't
1824 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1825 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001826 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001827 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001828 unlikely(current->mm == current->parent->mm))
1829 return 0;
1830
1831 return 1;
1832}
1833
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001835 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08001836 * Called with the siglock held.
1837 */
1838static int sigkill_pending(struct task_struct *tsk)
1839{
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001840 return sigismember(&tsk->pending.signal, SIGKILL) ||
1841 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001842}
1843
1844/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 * This must be called with current->sighand->siglock held.
1846 *
1847 * This should be the path for all ptrace stops.
1848 * We always set current->last_siginfo while stopped here.
1849 * That makes it a way to test a stopped process for
1850 * being ptrace-stopped vs being job-control-stopped.
1851 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001852 * If we actually decide not to stop at all because the tracer
1853 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001855static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001856 __releases(&current->sighand->siglock)
1857 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001859 bool gstop_done = false;
1860
Roland McGrath1a669c22008-02-06 01:37:37 -08001861 if (arch_ptrace_stop_needed(exit_code, info)) {
1862 /*
1863 * The arch code has something special to do before a
1864 * ptrace stop. This is allowed to block, e.g. for faults
1865 * on user stack pages. We can't keep the siglock while
1866 * calling arch_ptrace_stop, so we must release it now.
1867 * To preserve proper semantics, we must do this before
1868 * any signal bookkeeping like checking group_stop_count.
1869 * Meanwhile, a SIGKILL could come in before we retake the
1870 * siglock. That must prevent us from sleeping in TASK_TRACED.
1871 * So after regaining the lock, we must check for SIGKILL.
1872 */
1873 spin_unlock_irq(&current->sighand->siglock);
1874 arch_ptrace_stop(exit_code, info);
1875 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001876 if (sigkill_pending(current))
1877 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001878 }
1879
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02001881 * We're committing to trapping. TRACED should be visible before
1882 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1883 * Also, transition to TRACED and updates to ->jobctl should be
1884 * atomic with respect to siglock and should be done after the arch
1885 * hook as siglock is released and regrabbed across it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 */
Tejun Heo81be24b2011-06-02 11:13:59 +02001887 set_current_state(TASK_TRACED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 current->last_siginfo = info;
1890 current->exit_code = exit_code;
1891
Tejun Heod79fdd62011-03-23 10:37:00 +01001892 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 * If @why is CLD_STOPPED, we're trapping to participate in a group
1894 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02001895 * across siglock relocks since INTERRUPT was scheduled, PENDING
1896 * could be clear now. We act as if SIGCONT is received after
1897 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01001898 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001899 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001901
Tejun Heofb1d9102011-06-14 11:20:17 +02001902 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02001903 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02001904 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1905 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02001906
Tejun Heo81be24b2011-06-02 11:13:59 +02001907 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001908 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001909
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 spin_unlock_irq(&current->sighand->siglock);
1911 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001912 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01001913 /*
1914 * Notify parents of the stop.
1915 *
1916 * While ptraced, there are two parents - the ptracer and
1917 * the real_parent of the group_leader. The ptracer should
1918 * know about every stop while the real parent is only
1919 * interested in the completion of group stop. The states
1920 * for the two don't interact with each other. Notify
1921 * separately unless they're gonna be duplicates.
1922 */
1923 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02001924 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01001925 do_notify_parent_cldstop(current, false, why);
1926
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001927 /*
1928 * Don't want to allow preemption here, because
1929 * sys_ptrace() needs this task to be inactive.
1930 *
1931 * XXX: implement read_unlock_no_resched().
1932 */
1933 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001935 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02001936 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 } else {
1938 /*
1939 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001940 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01001941 *
1942 * If @gstop_done, the ptracer went away between group stop
1943 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02001944 * JOBCTL_STOP_PENDING on us and we'll re-enter
1945 * TASK_STOPPED in do_signal_stop() on return, so notifying
1946 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01001948 if (gstop_done)
1949 do_notify_parent_cldstop(current, false, why);
1950
Oleg Nesterov9899d112013-01-21 20:48:00 +01001951 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001952 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08001953 if (clear_code)
1954 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001955 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 }
1957
1958 /*
1959 * We are back. Now reacquire the siglock before touching
1960 * last_siginfo, so that we are sure to have synchronized with
1961 * any signal-sending on another CPU that wants to examine it.
1962 */
1963 spin_lock_irq(&current->sighand->siglock);
1964 current->last_siginfo = NULL;
1965
Tejun Heo544b2c92011-06-14 11:20:18 +02001966 /* LISTENING can be set only during STOP traps, clear it */
1967 current->jobctl &= ~JOBCTL_LISTENING;
1968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 /*
1970 * Queued signals ignored us while we were stopped for tracing.
1971 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07001972 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07001974 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975}
1976
Tejun Heo3544d722011-06-14 11:20:15 +02001977static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978{
1979 siginfo_t info;
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 memset(&info, 0, sizeof info);
Tejun Heo3544d722011-06-14 11:20:15 +02001982 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001984 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001985 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02001988 ptrace_stop(exit_code, why, 1, &info);
1989}
1990
1991void ptrace_notify(int exit_code)
1992{
1993 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02001994 if (unlikely(current->task_works))
1995 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02001996
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02001998 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 spin_unlock_irq(&current->sighand->siglock);
2000}
2001
Tejun Heo73ddff22011-06-14 11:20:14 +02002002/**
2003 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2004 * @signr: signr causing group stop if initiating
2005 *
2006 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2007 * and participate in it. If already set, participate in the existing
2008 * group stop. If participated in a group stop (and thus slept), %true is
2009 * returned with siglock released.
2010 *
2011 * If ptraced, this function doesn't handle stop itself. Instead,
2012 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2013 * untouched. The caller must ensure that INTERRUPT trap handling takes
2014 * places afterwards.
2015 *
2016 * CONTEXT:
2017 * Must be called with @current->sighand->siglock held, which is released
2018 * on %true return.
2019 *
2020 * RETURNS:
2021 * %false if group stop is already cancelled or ptrace trap is scheduled.
2022 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002024static bool do_signal_stop(int signr)
2025 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
2027 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
Tejun Heoa8f072c2011-06-02 11:13:59 +02002029 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002030 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002031 struct task_struct *t;
2032
Tejun Heoa8f072c2011-06-02 11:13:59 +02002033 /* signr will be recorded in task->jobctl for retries */
2034 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002035
Tejun Heoa8f072c2011-06-02 11:13:59 +02002036 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002037 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002038 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002040 * There is no group stop already in progress. We must
2041 * initiate one now.
2042 *
2043 * While ptraced, a task may be resumed while group stop is
2044 * still in effect and then receive a stop signal and
2045 * initiate another group stop. This deviates from the
2046 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002047 * cause two group stops when !ptraced. That is why we
2048 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002049 *
2050 * The condition can be distinguished by testing whether
2051 * SIGNAL_STOP_STOPPED is already set. Don't generate
2052 * group_exit_code in such case.
2053 *
2054 * This is not necessary for SIGNAL_STOP_CONTINUED because
2055 * an intervening stop signal is required to cause two
2056 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002058 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2059 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002060
Tejun Heo7dd3db52011-06-02 11:14:00 +02002061 sig->group_stop_count = 0;
2062
2063 if (task_set_jobctl_pending(current, signr | gstop))
2064 sig->group_stop_count++;
2065
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002066 t = current;
2067 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002068 /*
2069 * Setting state to TASK_STOPPED for a group
2070 * stop is always done with the siglock held,
2071 * so this check has no races.
2072 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002073 if (!task_is_stopped(t) &&
2074 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002075 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002076 if (likely(!(t->ptrace & PT_SEIZED)))
2077 signal_wake_up(t, 0);
2078 else
2079 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002080 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002081 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002082 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002083
Tejun Heod21142e2011-06-17 16:50:34 +02002084 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002085 int notify = 0;
2086
2087 /*
2088 * If there are no other threads in the group, or if there
2089 * is a group stop in progress and we are the last to stop,
2090 * report to the parent.
2091 */
2092 if (task_participate_group_stop(current))
2093 notify = CLD_STOPPED;
2094
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002095 __set_current_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002096 spin_unlock_irq(&current->sighand->siglock);
2097
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002098 /*
2099 * Notify the parent of the group stop completion. Because
2100 * we're not holding either the siglock or tasklist_lock
2101 * here, ptracer may attach inbetween; however, this is for
2102 * group stop and should always be delivered to the real
2103 * parent of the group leader. The new ptracer will get
2104 * its notification when this task transitions into
2105 * TASK_TRACED.
2106 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002107 if (notify) {
2108 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002109 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002110 read_unlock(&tasklist_lock);
2111 }
2112
2113 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002114 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002115 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002116 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002117 /*
2118 * While ptraced, group stop is handled by STOP trap.
2119 * Schedule it and let the caller deal with it.
2120 */
2121 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2122 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002123 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002124}
Tejun Heod79fdd62011-03-23 10:37:00 +01002125
Tejun Heo73ddff22011-06-14 11:20:14 +02002126/**
2127 * do_jobctl_trap - take care of ptrace jobctl traps
2128 *
Tejun Heo3544d722011-06-14 11:20:15 +02002129 * When PT_SEIZED, it's used for both group stop and explicit
2130 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2131 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2132 * the stop signal; otherwise, %SIGTRAP.
2133 *
2134 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2135 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002136 *
2137 * CONTEXT:
2138 * Must be called with @current->sighand->siglock held, which may be
2139 * released and re-acquired before returning with intervening sleep.
2140 */
2141static void do_jobctl_trap(void)
2142{
Tejun Heo3544d722011-06-14 11:20:15 +02002143 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002144 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002145
Tejun Heo3544d722011-06-14 11:20:15 +02002146 if (current->ptrace & PT_SEIZED) {
2147 if (!signal->group_stop_count &&
2148 !(signal->flags & SIGNAL_STOP_STOPPED))
2149 signr = SIGTRAP;
2150 WARN_ON_ONCE(!signr);
2151 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2152 CLD_STOPPED);
2153 } else {
2154 WARN_ON_ONCE(!signr);
2155 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002156 current->exit_code = 0;
2157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158}
2159
Al Viro94eb22d2012-11-05 13:08:06 -05002160static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002161{
Al Virob7f95912012-11-05 13:06:22 -05002162 ptrace_signal_deliver();
Oleg Nesterov8a352412011-07-21 17:06:53 +02002163 /*
2164 * We do not check sig_kernel_stop(signr) but set this marker
2165 * unconditionally because we do not know whether debugger will
2166 * change signr. This flag has no meaning unless we are going
2167 * to stop after return from ptrace_stop(). In this case it will
2168 * be checked in do_signal_stop(), we should only stop if it was
2169 * not cleared by SIGCONT while we were sleeping. See also the
2170 * comment in dequeue_signal().
2171 */
2172 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002173 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002174
2175 /* We're back. Did the debugger cancel the sig? */
2176 signr = current->exit_code;
2177 if (signr == 0)
2178 return signr;
2179
2180 current->exit_code = 0;
2181
Randy Dunlap5aba0852011-04-04 14:59:31 -07002182 /*
2183 * Update the siginfo structure if the signal has
2184 * changed. If the debugger wanted something
2185 * specific in the siginfo structure then it should
2186 * have updated *info via PTRACE_SETSIGINFO.
2187 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002188 if (signr != info->si_signo) {
2189 info->si_signo = signr;
2190 info->si_errno = 0;
2191 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002192 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002193 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002194 info->si_uid = from_kuid_munged(current_user_ns(),
2195 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002196 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002197 }
2198
2199 /* If the (new) signal is now blocked, requeue it. */
2200 if (sigismember(&current->blocked, signr)) {
2201 specific_send_sig_info(signr, info, current);
2202 signr = 0;
2203 }
2204
2205 return signr;
2206}
2207
Richard Weinberger828b1f62013-10-07 15:26:57 +02002208int get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002210 struct sighand_struct *sighand = current->sighand;
2211 struct signal_struct *signal = current->signal;
2212 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002214 if (unlikely(current->task_works))
2215 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002216
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302217 if (unlikely(uprobe_deny_signal()))
2218 return 0;
2219
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002220 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002221 * Do this once, we can't return to user-mode if freezing() == T.
2222 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2223 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002224 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002225 try_to_freeze();
2226
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002227relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002228 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002229 /*
2230 * Every stopped thread goes here after wakeup. Check to see if
2231 * we should notify the parent, prepare_signal(SIGCONT) encodes
2232 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2233 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002234 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002235 int why;
2236
2237 if (signal->flags & SIGNAL_CLD_CONTINUED)
2238 why = CLD_CONTINUED;
2239 else
2240 why = CLD_STOPPED;
2241
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002242 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002243
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002244 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002245
Tejun Heoceb6bd62011-03-23 10:37:01 +01002246 /*
2247 * Notify the parent that we're continuing. This event is
2248 * always per-process and doesn't make whole lot of sense
2249 * for ptracers, who shouldn't consume the state via
2250 * wait(2) either, but, for backward compatibility, notify
2251 * the ptracer of the group leader too unless it's gonna be
2252 * a duplicate.
2253 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002254 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002255 do_notify_parent_cldstop(current, false, why);
2256
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002257 if (ptrace_reparented(current->group_leader))
2258 do_notify_parent_cldstop(current->group_leader,
2259 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002260 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002261
Oleg Nesterove4420552008-04-30 00:52:44 -07002262 goto relock;
2263 }
2264
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002265 /* Has this task already been marked for death? */
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002266 if (signal_group_exit(signal)) {
2267 ksig->info.si_signo = signr = SIGKILL;
2268 sigdelset(&current->pending.signal, SIGKILL);
Zhenliang Wei9adcdd52019-05-31 22:30:52 -07002269 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2270 &sighand->action[SIGKILL - 1]);
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002271 recalc_sigpending();
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002272 goto fatal;
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002273 }
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 for (;;) {
2276 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002277
2278 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2279 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002280 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002281
Tejun Heo73ddff22011-06-14 11:20:14 +02002282 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2283 do_jobctl_trap();
2284 spin_unlock_irq(&sighand->siglock);
2285 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
2287
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -06002288 /*
2289 * Signals generated by the execution of an instruction
2290 * need to be delivered before any other pending signals
2291 * so that the instruction pointer in the signal stack
2292 * frame points to the faulting instruction.
2293 */
2294 signr = dequeue_synchronous_signal(&ksig->info);
2295 if (!signr)
2296 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Tejun Heodd1d6772011-06-02 11:14:00 +02002298 if (!signr)
2299 break; /* will return 0 */
2300
Oleg Nesterov8a352412011-07-21 17:06:53 +02002301 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002302 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002304 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 }
2306
Tejun Heodd1d6772011-06-02 11:14:00 +02002307 ka = &sighand->action[signr-1];
2308
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002309 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002310 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2313 continue;
2314 if (ka->sa.sa_handler != SIG_DFL) {
2315 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002316 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318 if (ka->sa.sa_flags & SA_ONESHOT)
2319 ka->sa.sa_handler = SIG_DFL;
2320
2321 break; /* will return non-zero "signr" value */
2322 }
2323
2324 /*
2325 * Now we are doing the default action for this signal.
2326 */
2327 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2328 continue;
2329
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002330 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002331 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002332 * Container-init gets no signals it doesn't want from same
2333 * container.
2334 *
2335 * Note that if global/container-init sees a sig_kernel_only()
2336 * signal here, the signal must have been generated internally
2337 * or must have come from an ancestor namespace. In either
2338 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002339 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002340 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002341 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 continue;
2343
2344 if (sig_kernel_stop(signr)) {
2345 /*
2346 * The default action is to stop all threads in
2347 * the thread group. The job control signals
2348 * do nothing in an orphaned pgrp, but SIGSTOP
2349 * always works. Note that siglock needs to be
2350 * dropped during the call to is_orphaned_pgrp()
2351 * because of lock ordering with tasklist_lock.
2352 * This allows an intervening SIGCONT to be posted.
2353 * We need to check for that and bail out if necessary.
2354 */
2355 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002356 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357
2358 /* signals can be posted during this window */
2359
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002360 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 goto relock;
2362
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002363 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 }
2365
Richard Weinberger828b1f62013-10-07 15:26:57 +02002366 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 /* It released the siglock. */
2368 goto relock;
2369 }
2370
2371 /*
2372 * We didn't actually stop, due to a race
2373 * with SIGCONT or something like that.
2374 */
2375 continue;
2376 }
2377
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002378 fatal:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002379 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
2381 /*
2382 * Anything else is fatal, maybe with a core dump.
2383 */
2384 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002385
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002387 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002388 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002389 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 /*
2391 * If it was able to dump core, this kills all
2392 * other threads in the group and synchronizes with
2393 * their demise. If we lost the race with another
2394 * thread getting here, it set group_exit_code
2395 * first and our do_group_exit call below will use
2396 * that value and ignore the one we pass it.
2397 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002398 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 }
2400
2401 /*
2402 * Death signals, no core dump.
2403 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002404 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 /* NOTREACHED */
2406 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002407 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002408
2409 ksig->sig = signr;
2410 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411}
2412
Matt Fleming5e6292c2012-01-10 15:11:17 -08002413/**
Al Viroefee9842012-04-28 02:04:15 -04002414 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002415 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002416 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002417 *
Masanari Iidae2278672014-02-18 22:54:36 +09002418 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002419 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002420 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002421 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002422 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002423static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002424{
2425 sigset_t blocked;
2426
Al Viroa610d6e2012-05-21 23:42:15 -04002427 /* A signal was successfully delivered, and the
2428 saved sigmask was stored on the signal frame,
2429 and will be restored by sigreturn. So we can
2430 simply clear the restore sigmask flag. */
2431 clear_restore_sigmask();
2432
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002433 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2434 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2435 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002436 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002437 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002438}
2439
Al Viro2ce5da12012-11-07 15:11:25 -05002440void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2441{
2442 if (failed)
2443 force_sigsegv(ksig->sig, current);
2444 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002445 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002446}
2447
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002448/*
2449 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002450 * group-wide signal. Other threads should be notified now to take
2451 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002452 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002453static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002454{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002455 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002456 struct task_struct *t;
2457
Oleg Nesterovf646e222011-04-27 19:18:39 +02002458 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2459 if (sigisemptyset(&retarget))
2460 return;
2461
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002462 t = tsk;
2463 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002464 if (t->flags & PF_EXITING)
2465 continue;
2466
2467 if (!has_pending_signals(&retarget, &t->blocked))
2468 continue;
2469 /* Remove the signals this thread can handle. */
2470 sigandsets(&retarget, &retarget, &t->blocked);
2471
2472 if (!signal_pending(t))
2473 signal_wake_up(t, 0);
2474
2475 if (sigisemptyset(&retarget))
2476 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002477 }
2478}
2479
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002480void exit_signals(struct task_struct *tsk)
2481{
2482 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002483 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002484
Tejun Heo77e4ef92011-12-12 18:12:21 -08002485 /*
2486 * @tsk is about to have PF_EXITING set - lock out users which
2487 * expect stable threadgroup.
2488 */
2489 threadgroup_change_begin(tsk);
2490
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002491 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2492 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002493 threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002494 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002495 }
2496
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002497 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002498 /*
2499 * From now this task is not visible for group-wide signals,
2500 * see wants_signal(), do_signal_stop().
2501 */
2502 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002503
2504 threadgroup_change_end(tsk);
2505
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002506 if (!signal_pending(tsk))
2507 goto out;
2508
Oleg Nesterovf646e222011-04-27 19:18:39 +02002509 unblocked = tsk->blocked;
2510 signotset(&unblocked);
2511 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002512
Tejun Heoa8f072c2011-06-02 11:13:59 +02002513 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c19022011-03-23 10:37:00 +01002514 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002515 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002516out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002517 spin_unlock_irq(&tsk->sighand->siglock);
2518
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002519 /*
2520 * If group stop has completed, deliver the notification. This
2521 * should always go to the real parent of the group leader.
2522 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002523 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002524 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002525 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002526 read_unlock(&tasklist_lock);
2527 }
2528}
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530EXPORT_SYMBOL(recalc_sigpending);
2531EXPORT_SYMBOL_GPL(dequeue_signal);
2532EXPORT_SYMBOL(flush_signals);
2533EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534EXPORT_SYMBOL(send_sig);
2535EXPORT_SYMBOL(send_sig_info);
2536EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
2538/*
2539 * System call entry points.
2540 */
2541
Randy Dunlap41c57892011-04-04 15:00:26 -07002542/**
2543 * sys_restart_syscall - restart a system call
2544 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002545SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002547 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 return restart->fn(restart);
2549}
2550
2551long do_no_restart_syscall(struct restart_block *param)
2552{
2553 return -EINTR;
2554}
2555
Oleg Nesterovb1828012011-04-27 21:56:14 +02002556static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2557{
2558 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2559 sigset_t newblocked;
2560 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002561 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002562 retarget_shared_pending(tsk, &newblocked);
2563 }
2564 tsk->blocked = *newset;
2565 recalc_sigpending();
2566}
2567
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002568/**
2569 * set_current_blocked - change current->blocked mask
2570 * @newset: new mask
2571 *
2572 * It is wrong to change ->blocked directly, this helper should be used
2573 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 */
Al Viro77097ae2012-04-27 13:58:59 -04002575void set_current_blocked(sigset_t *newset)
2576{
Al Viro77097ae2012-04-27 13:58:59 -04002577 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002578 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002579}
2580
2581void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002582{
2583 struct task_struct *tsk = current;
2584
Waiman Long20a30612016-12-14 15:04:10 -08002585 /*
2586 * In case the signal mask hasn't changed, there is nothing we need
2587 * to do. The current->blocked shouldn't be modified by other task.
2588 */
2589 if (sigequalsets(&tsk->blocked, newset))
2590 return;
2591
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002592 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002593 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002594 spin_unlock_irq(&tsk->sighand->siglock);
2595}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
2597/*
2598 * This is also useful for kernel threads that want to temporarily
2599 * (or permanently) block certain signals.
2600 *
2601 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2602 * interface happily blocks "unblockable" signals like SIGKILL
2603 * and friends.
2604 */
2605int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2606{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002607 struct task_struct *tsk = current;
2608 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002610 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002611 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002612 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 switch (how) {
2615 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002616 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 break;
2618 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002619 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 break;
2621 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002622 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 break;
2624 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002625 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002627
Al Viro77097ae2012-04-27 13:58:59 -04002628 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002629 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630}
2631
Randy Dunlap41c57892011-04-04 15:00:26 -07002632/**
2633 * sys_rt_sigprocmask - change the list of currently blocked signals
2634 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002635 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002636 * @oset: previous value of signal mask if non-null
2637 * @sigsetsize: size of sigset_t type
2638 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002639SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002640 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002643 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
2645 /* XXX: Don't preclude handling different sized sigset_t's. */
2646 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002647 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002649 old_set = current->blocked;
2650
2651 if (nset) {
2652 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2653 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2655
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002656 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002658 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002660
2661 if (oset) {
2662 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2663 return -EFAULT;
2664 }
2665
2666 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667}
2668
Al Viro322a56c2012-12-25 13:32:58 -05002669#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002670COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2671 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672{
Al Viro322a56c2012-12-25 13:32:58 -05002673#ifdef __BIG_ENDIAN
2674 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Al Viro322a56c2012-12-25 13:32:58 -05002676 /* XXX: Don't preclude handling different sized sigset_t's. */
2677 if (sigsetsize != sizeof(sigset_t))
2678 return -EINVAL;
2679
2680 if (nset) {
2681 compat_sigset_t new32;
2682 sigset_t new_set;
2683 int error;
2684 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2685 return -EFAULT;
2686
2687 sigset_from_compat(&new_set, &new32);
2688 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2689
2690 error = sigprocmask(how, &new_set, NULL);
2691 if (error)
2692 return error;
2693 }
2694 if (oset) {
2695 compat_sigset_t old32;
2696 sigset_to_compat(&old32, &old_set);
Al Virodb61ec22013-03-02 20:39:15 -05002697 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
Al Viro322a56c2012-12-25 13:32:58 -05002698 return -EFAULT;
2699 }
2700 return 0;
2701#else
2702 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2703 (sigset_t __user *)oset, sigsetsize);
2704#endif
2705}
2706#endif
Al Viro322a56c2012-12-25 13:32:58 -05002707
Al Virofe9c1db2012-12-25 14:31:38 -05002708static int do_sigpending(void *set, unsigned long sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 if (sigsetsize > sizeof(sigset_t))
Al Virofe9c1db2012-12-25 14:31:38 -05002711 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712
2713 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002714 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 &current->signal->shared_pending.signal);
2716 spin_unlock_irq(&current->sighand->siglock);
2717
2718 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002719 sigandsets(set, &current->blocked, set);
2720 return 0;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002721}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
Randy Dunlap41c57892011-04-04 15:00:26 -07002723/**
2724 * sys_rt_sigpending - examine a pending signal that has been raised
2725 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002726 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002727 * @sigsetsize: size of sigset_t type or larger
2728 */
Al Virofe9c1db2012-12-25 14:31:38 -05002729SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730{
Al Virofe9c1db2012-12-25 14:31:38 -05002731 sigset_t set;
2732 int err = do_sigpending(&set, sigsetsize);
2733 if (!err && copy_to_user(uset, &set, sigsetsize))
2734 err = -EFAULT;
2735 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736}
2737
Al Virofe9c1db2012-12-25 14:31:38 -05002738#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002739COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2740 compat_size_t, sigsetsize)
2741{
2742#ifdef __BIG_ENDIAN
2743 sigset_t set;
2744 int err = do_sigpending(&set, sigsetsize);
2745 if (!err) {
2746 compat_sigset_t set32;
2747 sigset_to_compat(&set32, &set);
2748 /* we can get here only if sigsetsize <= sizeof(set) */
2749 if (copy_to_user(uset, &set32, sigsetsize))
2750 err = -EFAULT;
2751 }
2752 return err;
2753#else
2754 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2755#endif
2756}
2757#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002758
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2760
Al Viroce395962013-10-13 17:23:53 -04002761int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762{
2763 int err;
2764
2765 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2766 return -EFAULT;
2767 if (from->si_code < 0)
2768 return __copy_to_user(to, from, sizeof(siginfo_t))
2769 ? -EFAULT : 0;
2770 /*
2771 * If you change siginfo_t structure, please be sure
2772 * this code is fixed accordingly.
Davide Libenzifba2afa2007-05-10 22:23:13 -07002773 * Please remember to update the signalfd_copyinfo() function
2774 * inside fs/signalfd.c too, in case siginfo_t changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 * It should never copy any pad contained in the structure
2776 * to avoid security leaks, but must copy the generic
2777 * 3 ints plus the relevant union member.
2778 */
2779 err = __put_user(from->si_signo, &to->si_signo);
2780 err |= __put_user(from->si_errno, &to->si_errno);
2781 err |= __put_user((short)from->si_code, &to->si_code);
2782 switch (from->si_code & __SI_MASK) {
2783 case __SI_KILL:
2784 err |= __put_user(from->si_pid, &to->si_pid);
2785 err |= __put_user(from->si_uid, &to->si_uid);
2786 break;
2787 case __SI_TIMER:
2788 err |= __put_user(from->si_tid, &to->si_tid);
2789 err |= __put_user(from->si_overrun, &to->si_overrun);
2790 err |= __put_user(from->si_ptr, &to->si_ptr);
2791 break;
2792 case __SI_POLL:
2793 err |= __put_user(from->si_band, &to->si_band);
2794 err |= __put_user(from->si_fd, &to->si_fd);
2795 break;
2796 case __SI_FAULT:
2797 err |= __put_user(from->si_addr, &to->si_addr);
2798#ifdef __ARCH_SI_TRAPNO
2799 err |= __put_user(from->si_trapno, &to->si_trapno);
2800#endif
Andi Kleena337fda2010-09-27 20:32:19 +02002801#ifdef BUS_MCEERR_AO
Randy Dunlap5aba0852011-04-04 14:59:31 -07002802 /*
Andi Kleena337fda2010-09-27 20:32:19 +02002803 * Other callers might not initialize the si_lsb field,
Randy Dunlap5aba0852011-04-04 14:59:31 -07002804 * so check explicitly for the right codes here.
Andi Kleena337fda2010-09-27 20:32:19 +02002805 */
Amanieu d'Antras26135022015-08-06 15:46:29 -07002806 if (from->si_signo == SIGBUS &&
2807 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
Andi Kleena337fda2010-09-27 20:32:19 +02002808 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2809#endif
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002810#ifdef SEGV_BNDERR
Amanieu d'Antras26135022015-08-06 15:46:29 -07002811 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2812 err |= __put_user(from->si_lower, &to->si_lower);
2813 err |= __put_user(from->si_upper, &to->si_upper);
2814 }
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002815#endif
Dave Hansencd0ea352016-02-12 13:02:12 -08002816#ifdef SEGV_PKUERR
2817 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2818 err |= __put_user(from->si_pkey, &to->si_pkey);
2819#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 break;
2821 case __SI_CHLD:
2822 err |= __put_user(from->si_pid, &to->si_pid);
2823 err |= __put_user(from->si_uid, &to->si_uid);
2824 err |= __put_user(from->si_status, &to->si_status);
2825 err |= __put_user(from->si_utime, &to->si_utime);
2826 err |= __put_user(from->si_stime, &to->si_stime);
2827 break;
2828 case __SI_RT: /* This is not generated by the kernel as of now. */
2829 case __SI_MESGQ: /* But this is */
2830 err |= __put_user(from->si_pid, &to->si_pid);
2831 err |= __put_user(from->si_uid, &to->si_uid);
2832 err |= __put_user(from->si_ptr, &to->si_ptr);
2833 break;
Will Drewrya0727e82012-04-12 16:48:00 -05002834#ifdef __ARCH_SIGSYS
2835 case __SI_SYS:
2836 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2837 err |= __put_user(from->si_syscall, &to->si_syscall);
2838 err |= __put_user(from->si_arch, &to->si_arch);
2839 break;
2840#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 default: /* this is just in case for now ... */
2842 err |= __put_user(from->si_pid, &to->si_pid);
2843 err |= __put_user(from->si_uid, &to->si_uid);
2844 break;
2845 }
2846 return err;
2847}
2848
2849#endif
2850
Randy Dunlap41c57892011-04-04 15:00:26 -07002851/**
Oleg Nesterov943df142011-04-27 21:44:14 +02002852 * do_sigtimedwait - wait for queued signals specified in @which
2853 * @which: queued signals to wait for
2854 * @info: if non-null, the signal's siginfo is returned here
2855 * @ts: upper bound on process time suspension
2856 */
2857int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002858 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02002859{
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002860 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
Oleg Nesterov943df142011-04-27 21:44:14 +02002861 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02002862 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002863 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02002864
2865 if (ts) {
2866 if (!timespec_valid(ts))
2867 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002868 timeout = timespec_to_ktime(*ts);
2869 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02002870 }
2871
2872 /*
2873 * Invert the set of allowed signals to get those we want to block.
2874 */
2875 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2876 signotset(&mask);
2877
2878 spin_lock_irq(&tsk->sighand->siglock);
2879 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002880 if (!sig && timeout.tv64) {
Oleg Nesterov943df142011-04-27 21:44:14 +02002881 /*
2882 * None ready, temporarily unblock those we're interested
2883 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02002884 * they arrive. Unblocking is always fine, we can avoid
2885 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02002886 */
2887 tsk->real_blocked = tsk->blocked;
2888 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2889 recalc_sigpending();
2890 spin_unlock_irq(&tsk->sighand->siglock);
2891
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002892 __set_current_state(TASK_INTERRUPTIBLE);
2893 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2894 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02002895 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002896 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07002897 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002898 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02002899 }
2900 spin_unlock_irq(&tsk->sighand->siglock);
2901
2902 if (sig)
2903 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002904 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02002905}
2906
2907/**
Randy Dunlap41c57892011-04-04 15:00:26 -07002908 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2909 * in @uthese
2910 * @uthese: queued signals to wait for
2911 * @uinfo: if non-null, the signal's siginfo is returned here
2912 * @uts: upper bound on process time suspension
2913 * @sigsetsize: size of sigset_t type
2914 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002915SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2916 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2917 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 sigset_t these;
2920 struct timespec ts;
2921 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02002922 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
2924 /* XXX: Don't preclude handling different sized sigset_t's. */
2925 if (sigsetsize != sizeof(sigset_t))
2926 return -EINVAL;
2927
2928 if (copy_from_user(&these, uthese, sizeof(these)))
2929 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002930
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 if (uts) {
2932 if (copy_from_user(&ts, uts, sizeof(ts)))
2933 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 }
2935
Oleg Nesterov943df142011-04-27 21:44:14 +02002936 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
Oleg Nesterov943df142011-04-27 21:44:14 +02002938 if (ret > 0 && uinfo) {
2939 if (copy_siginfo_to_user(uinfo, &info))
2940 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 }
2942
2943 return ret;
2944}
2945
Christian Braunercf9f8292018-11-19 00:51:56 +01002946static inline void prepare_kill_siginfo(int sig, struct siginfo *info)
2947{
2948 info->si_signo = sig;
2949 info->si_errno = 0;
2950 info->si_code = SI_USER;
2951 info->si_pid = task_tgid_vnr(current);
2952 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
2953}
2954
Randy Dunlap41c57892011-04-04 15:00:26 -07002955/**
2956 * sys_kill - send a signal to a process
2957 * @pid: the PID of the process
2958 * @sig: signal to be sent
2959 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002960SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961{
2962 struct siginfo info;
2963
Christian Braunercf9f8292018-11-19 00:51:56 +01002964 prepare_kill_siginfo(sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
2966 return kill_something_info(sig, &info, pid);
2967}
2968
Christian Braunercf9f8292018-11-19 00:51:56 +01002969/*
2970 * Verify that the signaler and signalee either are in the same pid namespace
2971 * or that the signaler's pid namespace is an ancestor of the signalee's pid
2972 * namespace.
2973 */
2974static bool access_pidfd_pidns(struct pid *pid)
2975{
2976 struct pid_namespace *active = task_active_pid_ns(current);
2977 struct pid_namespace *p = ns_of_pid(pid);
2978
2979 for (;;) {
2980 if (!p)
2981 return false;
2982 if (p == active)
2983 break;
2984 p = p->parent;
2985 }
2986
2987 return true;
2988}
2989
Christian Braunerb3ae5982019-04-17 22:50:25 +02002990static struct pid *pidfd_to_pid(const struct file *file)
2991{
2992 if (file->f_op == &pidfd_fops)
2993 return file->private_data;
2994
2995 return tgid_pidfd_to_pid(file);
2996}
2997
Christian Braunercf9f8292018-11-19 00:51:56 +01002998static int copy_siginfo_from_user_any(siginfo_t *kinfo, siginfo_t __user *info)
2999{
3000#ifdef CONFIG_COMPAT
3001 /*
3002 * Avoid hooking up compat syscalls and instead handle necessary
3003 * conversions here. Note, this is a stop-gap measure and should not be
3004 * considered a generic solution.
3005 */
3006 if (in_compat_syscall())
3007 return copy_siginfo_from_user32(
3008 kinfo, (struct compat_siginfo __user *)info);
3009#endif
3010 return copy_from_user(kinfo, info, sizeof(siginfo_t));
3011}
3012
3013/**
Christian Braunerac937bb2019-06-04 15:18:43 +02003014 * sys_pidfd_send_signal - Signal a process through a pidfd
3015 * @pidfd: file descriptor of the process
3016 * @sig: signal to send
3017 * @info: signal info
3018 * @flags: future flags
Christian Braunercf9f8292018-11-19 00:51:56 +01003019 *
3020 * The syscall currently only signals via PIDTYPE_PID which covers
3021 * kill(<positive-pid>, <signal>. It does not signal threads or process
3022 * groups.
3023 * In order to extend the syscall to threads and process groups the @flags
3024 * argument should be used. In essence, the @flags argument will determine
3025 * what is signaled and not the file descriptor itself. Put in other words,
3026 * grouping is a property of the flags argument not a property of the file
3027 * descriptor.
3028 *
3029 * Return: 0 on success, negative errno on failure
3030 */
3031SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3032 siginfo_t __user *, info, unsigned int, flags)
3033{
3034 int ret;
3035 struct fd f;
3036 struct pid *pid;
3037 siginfo_t kinfo;
3038
3039 /* Enforce flags be set to 0 until we add an extension. */
3040 if (flags)
3041 return -EINVAL;
3042
Christian Brauner68defbc2019-04-18 12:18:39 +02003043 f = fdget(pidfd);
Christian Braunercf9f8292018-11-19 00:51:56 +01003044 if (!f.file)
3045 return -EBADF;
3046
3047 /* Is this a pidfd? */
Christian Braunerb3ae5982019-04-17 22:50:25 +02003048 pid = pidfd_to_pid(f.file);
Christian Braunercf9f8292018-11-19 00:51:56 +01003049 if (IS_ERR(pid)) {
3050 ret = PTR_ERR(pid);
3051 goto err;
3052 }
3053
3054 ret = -EINVAL;
3055 if (!access_pidfd_pidns(pid))
3056 goto err;
3057
3058 if (info) {
3059 ret = copy_siginfo_from_user_any(&kinfo, info);
3060 if (unlikely(ret))
3061 goto err;
3062
3063 ret = -EINVAL;
3064 if (unlikely(sig != kinfo.si_signo))
3065 goto err;
3066
Jann Hornf511d492019-03-30 03:12:32 +01003067 /* Only allow sending arbitrary signals to yourself. */
3068 ret = -EPERM;
Christian Braunercf9f8292018-11-19 00:51:56 +01003069 if ((task_pid(current) != pid) &&
Jann Hornf511d492019-03-30 03:12:32 +01003070 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3071 goto err;
Christian Braunercf9f8292018-11-19 00:51:56 +01003072 } else {
3073 prepare_kill_siginfo(sig, &kinfo);
3074 }
3075
3076 ret = kill_pid_info(sig, &kinfo, pid);
3077
3078err:
3079 fdput(f);
3080 return ret;
3081}
Christian Braunercf9f8292018-11-19 00:51:56 +01003082
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003083static int
3084do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003085{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003086 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003087 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003088
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003089 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003090 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003091 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003092 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003093 /*
3094 * The null signal is a permissions and process existence
3095 * probe. No signal is actually delivered.
3096 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003097 if (!error && sig) {
3098 error = do_send_sig_info(sig, info, p, false);
3099 /*
3100 * If lock_task_sighand() failed we pretend the task
3101 * dies after receiving the signal. The window is tiny,
3102 * and the signal is private anyway.
3103 */
3104 if (unlikely(error == -ESRCH))
3105 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003106 }
3107 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003108 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003109
3110 return error;
3111}
3112
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003113static int do_tkill(pid_t tgid, pid_t pid, int sig)
3114{
Emese Revfyb9e146d2013-04-17 15:58:36 -07003115 struct siginfo info = {};
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003116
3117 info.si_signo = sig;
3118 info.si_errno = 0;
3119 info.si_code = SI_TKILL;
3120 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003121 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003122
3123 return do_send_specific(tgid, pid, sig, &info);
3124}
3125
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126/**
3127 * sys_tgkill - send signal to one specific thread
3128 * @tgid: the thread group ID of the thread
3129 * @pid: the PID of the thread
3130 * @sig: signal to be sent
3131 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08003132 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 * exists but it's not belonging to the target process anymore. This
3134 * method solves the problem of threads exiting and PIDs getting reused.
3135 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003136SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 /* This is only valid for single tasks */
3139 if (pid <= 0 || tgid <= 0)
3140 return -EINVAL;
3141
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003142 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143}
3144
Randy Dunlap41c57892011-04-04 15:00:26 -07003145/**
3146 * sys_tkill - send signal to one specific task
3147 * @pid: the PID of the task
3148 * @sig: signal to be sent
3149 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3151 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003152SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 /* This is only valid for single tasks */
3155 if (pid <= 0)
3156 return -EINVAL;
3157
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003158 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159}
3160
Al Viro75907d42012-12-25 15:19:12 -05003161static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3162{
3163 /* Not even root can pretend to send signals from the kernel.
3164 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3165 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003166 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003167 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003168 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003169
Al Viro75907d42012-12-25 15:19:12 -05003170 info->si_signo = sig;
3171
3172 /* POSIX.1b doesn't mention process groups. */
3173 return kill_proc_info(sig, info, pid);
3174}
3175
Randy Dunlap41c57892011-04-04 15:00:26 -07003176/**
3177 * sys_rt_sigqueueinfo - send signal information to a signal
3178 * @pid: the PID of the thread
3179 * @sig: signal to be sent
3180 * @uinfo: signal info to be sent
3181 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003182SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3183 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184{
3185 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3187 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003188 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189}
3190
Al Viro75907d42012-12-25 15:19:12 -05003191#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003192COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3193 compat_pid_t, pid,
3194 int, sig,
3195 struct compat_siginfo __user *, uinfo)
3196{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003197 siginfo_t info = {};
Al Viro75907d42012-12-25 15:19:12 -05003198 int ret = copy_siginfo_from_user32(&info, uinfo);
3199 if (unlikely(ret))
3200 return ret;
3201 return do_rt_sigqueueinfo(pid, sig, &info);
3202}
3203#endif
Al Viro75907d42012-12-25 15:19:12 -05003204
Al Viro9aae8fc2012-12-24 23:12:04 -05003205static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003206{
3207 /* This is only valid for single tasks */
3208 if (pid <= 0 || tgid <= 0)
3209 return -EINVAL;
3210
3211 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003212 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3213 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003214 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3215 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003216 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003217
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003218 info->si_signo = sig;
3219
3220 return do_send_specific(tgid, pid, sig, info);
3221}
3222
3223SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3224 siginfo_t __user *, uinfo)
3225{
3226 siginfo_t info;
3227
3228 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3229 return -EFAULT;
3230
3231 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3232}
3233
Al Viro9aae8fc2012-12-24 23:12:04 -05003234#ifdef CONFIG_COMPAT
3235COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3236 compat_pid_t, tgid,
3237 compat_pid_t, pid,
3238 int, sig,
3239 struct compat_siginfo __user *, uinfo)
3240{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003241 siginfo_t info = {};
Al Viro9aae8fc2012-12-24 23:12:04 -05003242
3243 if (copy_siginfo_from_user32(&info, uinfo))
3244 return -EFAULT;
3245 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3246}
3247#endif
3248
Oleg Nesterov03417292014-06-06 14:36:53 -07003249/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003250 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003251 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003252void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003253{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003254 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003255 current->sighand->action[sig - 1].sa.sa_handler = action;
3256 if (action == SIG_IGN) {
3257 sigset_t mask;
3258
3259 sigemptyset(&mask);
3260 sigaddset(&mask, sig);
3261
3262 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3263 flush_sigqueue_mask(&mask, &current->pending);
3264 recalc_sigpending();
3265 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003266 spin_unlock_irq(&current->sighand->siglock);
3267}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003268EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003269
Dmitry Safonov68463512016-09-05 16:33:08 +03003270void __weak sigaction_compat_abi(struct k_sigaction *act,
3271 struct k_sigaction *oact)
3272{
3273}
3274
Oleg Nesterov88531f72006-03-28 16:11:24 -08003275int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003277 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08003279 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003281 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 return -EINVAL;
3283
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003284 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003286 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 if (oact)
3288 *oact = *k;
3289
Dmitry Safonov68463512016-09-05 16:33:08 +03003290 sigaction_compat_abi(act, oact);
3291
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003293 sigdelsetmask(&act->sa.sa_mask,
3294 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003295 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 /*
3297 * POSIX 3.3.1.3:
3298 * "Setting a signal action to SIG_IGN for a signal that is
3299 * pending shall cause the pending signal to be discarded,
3300 * whether or not it is blocked."
3301 *
3302 * "Setting a signal action to SIG_DFL for a signal that is
3303 * pending and whose default action is to ignore the signal
3304 * (for example, SIGCHLD), shall cause the pending signal to
3305 * be discarded, whether or not it is blocked"
3306 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003307 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08003308 sigemptyset(&mask);
3309 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003310 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3311 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003312 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 }
3315
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003316 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 return 0;
3318}
3319
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003320static int
Will Deacon1e7066a2018-09-05 15:34:42 +01003321do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp,
3322 size_t min_ss_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323{
3324 stack_t oss;
3325 int error;
3326
Linus Torvalds0083fc22009-08-01 10:34:56 -07003327 oss.ss_sp = (void __user *) current->sas_ss_sp;
3328 oss.ss_size = current->sas_ss_size;
Andy Lutomirski0318bc82016-05-03 10:31:51 -07003329 oss.ss_flags = sas_ss_flags(sp) |
3330 (current->sas_ss_flags & SS_FLAG_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331
3332 if (uss) {
3333 void __user *ss_sp;
3334 size_t ss_size;
Stas Sergeev407bc162016-04-14 23:20:03 +03003335 unsigned ss_flags;
3336 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
3338 error = -EFAULT;
Linus Torvalds0dd84862009-08-01 11:18:56 -07003339 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3340 goto out;
3341 error = __get_user(ss_sp, &uss->ss_sp) |
3342 __get_user(ss_flags, &uss->ss_flags) |
3343 __get_user(ss_size, &uss->ss_size);
3344 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 goto out;
3346
3347 error = -EPERM;
3348 if (on_sig_stack(sp))
3349 goto out;
3350
Stas Sergeev407bc162016-04-14 23:20:03 +03003351 ss_mode = ss_flags & ~SS_FLAG_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 error = -EINVAL;
Stas Sergeev407bc162016-04-14 23:20:03 +03003353 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3354 ss_mode != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 goto out;
3356
Stas Sergeev407bc162016-04-14 23:20:03 +03003357 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 ss_size = 0;
3359 ss_sp = NULL;
3360 } else {
Will Deacon1e7066a2018-09-05 15:34:42 +01003361 if (unlikely(ss_size < min_ss_size))
3362 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 }
3364
3365 current->sas_ss_sp = (unsigned long) ss_sp;
3366 current->sas_ss_size = ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +03003367 current->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 }
3369
Linus Torvalds0083fc22009-08-01 10:34:56 -07003370 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 if (uoss) {
3372 error = -EFAULT;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003373 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 goto out;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003375 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3376 __put_user(oss.ss_size, &uoss->ss_size) |
3377 __put_user(oss.ss_flags, &uoss->ss_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 }
3379
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380out:
3381 return error;
3382}
Al Viro6bf9adf2012-12-14 14:09:47 -05003383SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3384{
Will Deacon1e7066a2018-09-05 15:34:42 +01003385 return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
3386 MINSIGSTKSZ);
Al Viro6bf9adf2012-12-14 14:09:47 -05003387}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388
Al Viro5c495742012-11-18 15:29:16 -05003389int restore_altstack(const stack_t __user *uss)
3390{
Will Deacon1e7066a2018-09-05 15:34:42 +01003391 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
3392 MINSIGSTKSZ);
Al Viro5c495742012-11-18 15:29:16 -05003393 /* squash all but EFAULT for now */
3394 return err == -EFAULT ? err : 0;
3395}
3396
Al Viroc40702c2012-11-20 14:24:26 -05003397int __save_altstack(stack_t __user *uss, unsigned long sp)
3398{
3399 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003400 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3401 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003402 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003403 if (err)
3404 return err;
3405 if (t->sas_ss_flags & SS_AUTODISARM)
3406 sas_ss_reset(t);
3407 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003408}
3409
Al Viro90268432012-12-14 14:47:53 -05003410#ifdef CONFIG_COMPAT
Al Viro90228fc2012-12-23 03:33:38 -05003411COMPAT_SYSCALL_DEFINE2(sigaltstack,
3412 const compat_stack_t __user *, uss_ptr,
3413 compat_stack_t __user *, uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003414{
3415 stack_t uss, uoss;
3416 int ret;
3417 mm_segment_t seg;
3418
3419 if (uss_ptr) {
3420 compat_stack_t uss32;
3421
3422 memset(&uss, 0, sizeof(stack_t));
3423 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3424 return -EFAULT;
3425 uss.ss_sp = compat_ptr(uss32.ss_sp);
3426 uss.ss_flags = uss32.ss_flags;
3427 uss.ss_size = uss32.ss_size;
3428 }
3429 seg = get_fs();
3430 set_fs(KERNEL_DS);
3431 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3432 (stack_t __force __user *) &uoss,
Will Deacon1e7066a2018-09-05 15:34:42 +01003433 compat_user_stack_pointer(),
3434 COMPAT_MINSIGSTKSZ);
Al Viro90268432012-12-14 14:47:53 -05003435 set_fs(seg);
3436 if (ret >= 0 && uoss_ptr) {
3437 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3438 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3439 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3440 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3441 ret = -EFAULT;
3442 }
3443 return ret;
3444}
3445
3446int compat_restore_altstack(const compat_stack_t __user *uss)
3447{
3448 int err = compat_sys_sigaltstack(uss, NULL);
3449 /* squash all but -EFAULT for now */
3450 return err == -EFAULT ? err : 0;
3451}
Al Viroc40702c2012-11-20 14:24:26 -05003452
3453int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3454{
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003455 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003456 struct task_struct *t = current;
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003457 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3458 &uss->ss_sp) |
3459 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003460 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003461 if (err)
3462 return err;
3463 if (t->sas_ss_flags & SS_AUTODISARM)
3464 sas_ss_reset(t);
3465 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003466}
Al Viro90268432012-12-14 14:47:53 -05003467#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468
3469#ifdef __ARCH_WANT_SYS_SIGPENDING
3470
Randy Dunlap41c57892011-04-04 15:00:26 -07003471/**
3472 * sys_sigpending - examine pending signals
3473 * @set: where mask of pending signal is returned
3474 */
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003475SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476{
Al Virofe9c1db2012-12-25 14:31:38 -05003477 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478}
3479
3480#endif
3481
3482#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003483/**
3484 * sys_sigprocmask - examine and change blocked signals
3485 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003486 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003487 * @oset: previous value of signal mask if non-null
3488 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003489 * Some platforms have their own version with special arguments;
3490 * others support only sys_rt_sigprocmask.
3491 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
Oleg Nesterovb013c392011-04-28 11:36:20 +02003493SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003494 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003497 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
Oleg Nesterovb013c392011-04-28 11:36:20 +02003499 old_set = current->blocked.sig[0];
3500
3501 if (nset) {
3502 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3503 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003505 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003509 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 break;
3511 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003512 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 break;
3514 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003515 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003517 default:
3518 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 }
3520
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003521 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003523
3524 if (oset) {
3525 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3526 return -EFAULT;
3527 }
3528
3529 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530}
3531#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3532
Al Viroeaca6ea2012-11-25 23:12:10 -05003533#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003534/**
3535 * sys_rt_sigaction - alter an action taken by a process
3536 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003537 * @act: new sigaction
3538 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003539 * @sigsetsize: size of sigset_t type
3540 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003541SYSCALL_DEFINE4(rt_sigaction, int, sig,
3542 const struct sigaction __user *, act,
3543 struct sigaction __user *, oact,
3544 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545{
3546 struct k_sigaction new_sa, old_sa;
3547 int ret = -EINVAL;
3548
3549 /* XXX: Don't preclude handling different sized sigset_t's. */
3550 if (sigsetsize != sizeof(sigset_t))
3551 goto out;
3552
3553 if (act) {
3554 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3555 return -EFAULT;
3556 }
3557
3558 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3559
3560 if (!ret && oact) {
3561 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3562 return -EFAULT;
3563 }
3564out:
3565 return ret;
3566}
Al Viro08d32fe2012-12-25 18:38:15 -05003567#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003568COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3569 const struct compat_sigaction __user *, act,
3570 struct compat_sigaction __user *, oact,
3571 compat_size_t, sigsetsize)
3572{
3573 struct k_sigaction new_ka, old_ka;
3574 compat_sigset_t mask;
3575#ifdef __ARCH_HAS_SA_RESTORER
3576 compat_uptr_t restorer;
3577#endif
3578 int ret;
3579
3580 /* XXX: Don't preclude handling different sized sigset_t's. */
3581 if (sigsetsize != sizeof(compat_sigset_t))
3582 return -EINVAL;
3583
3584 if (act) {
3585 compat_uptr_t handler;
3586 ret = get_user(handler, &act->sa_handler);
3587 new_ka.sa.sa_handler = compat_ptr(handler);
3588#ifdef __ARCH_HAS_SA_RESTORER
3589 ret |= get_user(restorer, &act->sa_restorer);
3590 new_ka.sa.sa_restorer = compat_ptr(restorer);
3591#endif
3592 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003593 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003594 if (ret)
3595 return -EFAULT;
3596 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3597 }
3598
3599 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3600 if (!ret && oact) {
3601 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3602 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3603 &oact->sa_handler);
3604 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003605 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003606#ifdef __ARCH_HAS_SA_RESTORER
3607 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3608 &oact->sa_restorer);
3609#endif
3610 }
3611 return ret;
3612}
3613#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003614#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615
Al Viro495dfbf2012-12-25 19:09:45 -05003616#ifdef CONFIG_OLD_SIGACTION
3617SYSCALL_DEFINE3(sigaction, int, sig,
3618 const struct old_sigaction __user *, act,
3619 struct old_sigaction __user *, oact)
3620{
3621 struct k_sigaction new_ka, old_ka;
3622 int ret;
3623
3624 if (act) {
3625 old_sigset_t mask;
3626 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3627 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3628 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3629 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3630 __get_user(mask, &act->sa_mask))
3631 return -EFAULT;
3632#ifdef __ARCH_HAS_KA_RESTORER
3633 new_ka.ka_restorer = NULL;
3634#endif
3635 siginitset(&new_ka.sa.sa_mask, mask);
3636 }
3637
3638 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3639
3640 if (!ret && oact) {
3641 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3642 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3643 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3644 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3645 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3646 return -EFAULT;
3647 }
3648
3649 return ret;
3650}
3651#endif
3652#ifdef CONFIG_COMPAT_OLD_SIGACTION
3653COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3654 const struct compat_old_sigaction __user *, act,
3655 struct compat_old_sigaction __user *, oact)
3656{
3657 struct k_sigaction new_ka, old_ka;
3658 int ret;
3659 compat_old_sigset_t mask;
3660 compat_uptr_t handler, restorer;
3661
3662 if (act) {
3663 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3664 __get_user(handler, &act->sa_handler) ||
3665 __get_user(restorer, &act->sa_restorer) ||
3666 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3667 __get_user(mask, &act->sa_mask))
3668 return -EFAULT;
3669
3670#ifdef __ARCH_HAS_KA_RESTORER
3671 new_ka.ka_restorer = NULL;
3672#endif
3673 new_ka.sa.sa_handler = compat_ptr(handler);
3674 new_ka.sa.sa_restorer = compat_ptr(restorer);
3675 siginitset(&new_ka.sa.sa_mask, mask);
3676 }
3677
3678 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3679
3680 if (!ret && oact) {
3681 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3682 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3683 &oact->sa_handler) ||
3684 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3685 &oact->sa_restorer) ||
3686 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3687 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3688 return -EFAULT;
3689 }
3690 return ret;
3691}
3692#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693
Fabian Frederickf6187762014-06-04 16:11:12 -07003694#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
3696/*
3697 * For backwards compatibility. Functionality superseded by sigprocmask.
3698 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003699SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700{
3701 /* SMP safe */
3702 return current->blocked.sig[0];
3703}
3704
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003705SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003707 int old = current->blocked.sig[0];
3708 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003710 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003711 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712
3713 return old;
3714}
Fabian Frederickf6187762014-06-04 16:11:12 -07003715#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716
3717#ifdef __ARCH_WANT_SYS_SIGNAL
3718/*
3719 * For backwards compatibility. Functionality superseded by sigaction.
3720 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003721SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722{
3723 struct k_sigaction new_sa, old_sa;
3724 int ret;
3725
3726 new_sa.sa.sa_handler = handler;
3727 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003728 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729
3730 ret = do_sigaction(sig, &new_sa, &old_sa);
3731
3732 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3733}
3734#endif /* __ARCH_WANT_SYS_SIGNAL */
3735
3736#ifdef __ARCH_WANT_SYS_PAUSE
3737
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003738SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003740 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003741 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003742 schedule();
3743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 return -ERESTARTNOHAND;
3745}
3746
3747#endif
3748
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003749static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003750{
Al Viro68f3f162012-05-21 21:42:32 -04003751 current->saved_sigmask = current->blocked;
3752 set_current_blocked(set);
3753
Sasha Levin823dd322016-02-05 15:36:05 -08003754 while (!signal_pending(current)) {
3755 __set_current_state(TASK_INTERRUPTIBLE);
3756 schedule();
3757 }
Al Viro68f3f162012-05-21 21:42:32 -04003758 set_restore_sigmask();
3759 return -ERESTARTNOHAND;
3760}
Al Viro68f3f162012-05-21 21:42:32 -04003761
Randy Dunlap41c57892011-04-04 15:00:26 -07003762/**
3763 * sys_rt_sigsuspend - replace the signal mask for a value with the
3764 * @unewset value until a signal is received
3765 * @unewset: new signal mask value
3766 * @sigsetsize: size of sigset_t type
3767 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003768SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003769{
3770 sigset_t newset;
3771
3772 /* XXX: Don't preclude handling different sized sigset_t's. */
3773 if (sigsetsize != sizeof(sigset_t))
3774 return -EINVAL;
3775
3776 if (copy_from_user(&newset, unewset, sizeof(newset)))
3777 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003778 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003779}
Al Viroad4b65a2012-12-24 21:43:56 -05003780
3781#ifdef CONFIG_COMPAT
3782COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3783{
3784#ifdef __BIG_ENDIAN
3785 sigset_t newset;
3786 compat_sigset_t newset32;
3787
3788 /* XXX: Don't preclude handling different sized sigset_t's. */
3789 if (sigsetsize != sizeof(sigset_t))
3790 return -EINVAL;
3791
3792 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3793 return -EFAULT;
3794 sigset_from_compat(&newset, &newset32);
3795 return sigsuspend(&newset);
3796#else
3797 /* on little-endian bitmaps don't care about granularity */
3798 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3799#endif
3800}
3801#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003802
Al Viro0a0e8cd2012-12-25 16:04:12 -05003803#ifdef CONFIG_OLD_SIGSUSPEND
3804SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3805{
3806 sigset_t blocked;
3807 siginitset(&blocked, mask);
3808 return sigsuspend(&blocked);
3809}
3810#endif
3811#ifdef CONFIG_OLD_SIGSUSPEND3
3812SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3813{
3814 sigset_t blocked;
3815 siginitset(&blocked, mask);
3816 return sigsuspend(&blocked);
3817}
3818#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07003820__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003821{
3822 return NULL;
3823}
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825void __init signals_init(void)
3826{
Helge Deller41b27152016-03-22 14:27:54 -07003827 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3828 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3829 != offsetof(struct siginfo, _sifields._pad));
3830
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003831 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003833
3834#ifdef CONFIG_KGDB_KDB
3835#include <linux/kdb.h>
3836/*
3837 * kdb_send_sig_info - Allows kdb to send signals without exposing
3838 * signal internals. This function checks if the required locks are
3839 * available before calling the main signal code, to avoid kdb
3840 * deadlocks.
3841 */
3842void
3843kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3844{
3845 static struct task_struct *kdb_prev_t;
3846 int sig, new_t;
3847 if (!spin_trylock(&t->sighand->siglock)) {
3848 kdb_printf("Can't do kill command now.\n"
3849 "The sigmask lock is held somewhere else in "
3850 "kernel, try again later\n");
3851 return;
3852 }
3853 spin_unlock(&t->sighand->siglock);
3854 new_t = kdb_prev_t != t;
3855 kdb_prev_t = t;
3856 if (t->state != TASK_RUNNING && new_t) {
3857 kdb_printf("Process is not RUNNING, sending a signal from "
3858 "kdb risks deadlock\n"
3859 "on the run queue locks. "
3860 "The signal has _not_ been sent.\n"
3861 "Reissue the kill command if you want to risk "
3862 "the deadlock.\n");
3863 return;
3864 }
3865 sig = info->si_signo;
3866 if (send_sig_info(sig, info, t))
3867 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3868 sig, t->pid);
3869 else
3870 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3871}
3872#endif /* CONFIG_KGDB_KDB */