blob: 1a75001caa6c3347e081ae304b4d19e405e9497a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
Christian Brauner6dc8e7c2018-11-19 00:51:56 +010017#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/fs.h>
Christian Brauner6dc8e7c2018-11-19 00:51:56 +010019#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/tty.h>
21#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070022#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/security.h>
24#include <linux/syscalls.h>
25#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070026#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070027#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090028#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070029#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080030#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080031#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080032#include <linux/pid_namespace.h>
33#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080034#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053035#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050036#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000037#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070038#include <linux/compiler.h>
39
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050040#define CREATE_TRACE_POINTS
41#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/param.h>
44#include <asm/uaccess.h>
45#include <asm/unistd.h>
46#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010047#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040048#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50/*
51 * SLAB caches for signal bits.
52 */
53
Christoph Lametere18b8902006-12-06 20:33:20 -080054static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090056int print_fatal_signals __read_mostly;
57
Roland McGrath35de2542008-07-25 19:45:51 -070058static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070059{
Roland McGrath35de2542008-07-25 19:45:51 -070060 return t->sighand->action[sig - 1].sa.sa_handler;
61}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070062
Roland McGrath35de2542008-07-25 19:45:51 -070063static int sig_handler_ignored(void __user *handler, int sig)
64{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070065 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070066 return handler == SIG_IGN ||
67 (handler == SIG_DFL && sig_kernel_ignore(sig));
68}
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070070static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Roland McGrath35de2542008-07-25 19:45:51 -070072 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Oleg Nesterovf008faf2009-04-02 16:58:02 -070074 handler = sig_handler(t, sig);
75
76 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterov794ac8e2017-11-17 15:30:04 -080077 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Oleg Nesterovf008faf2009-04-02 16:58:02 -070078 return 1;
79
80 return sig_handler_ignored(handler, sig);
81}
82
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070083static int sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070084{
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 /*
86 * Blocked signals are never ignored, since the
87 * signal handler may change by the time it is
88 * unblocked.
89 */
Roland McGrath325d22d2007-11-12 15:41:55 -080090 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 return 0;
92
Oleg Nesterov1453b3a2017-11-17 15:30:01 -080093 /*
94 * Tracers may want to know about even ignored signal unless it
95 * is SIGKILL which can't be reported anyway but can be ignored
96 * by SIGNAL_UNKILLABLE task.
97 */
98 if (t->ptrace && sig != SIGKILL)
Roland McGrath35de2542008-07-25 19:45:51 -070099 return 0;
100
Oleg Nesterov1453b3a2017-11-17 15:30:01 -0800101 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
103
104/*
105 * Re-calculate pending state from the set of locally pending
106 * signals, globally pending signals, and blocked signals.
107 */
108static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
109{
110 unsigned long ready;
111 long i;
112
113 switch (_NSIG_WORDS) {
114 default:
115 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
116 ready |= signal->sig[i] &~ blocked->sig[i];
117 break;
118
119 case 4: ready = signal->sig[3] &~ blocked->sig[3];
120 ready |= signal->sig[2] &~ blocked->sig[2];
121 ready |= signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
123 break;
124
125 case 2: ready = signal->sig[1] &~ blocked->sig[1];
126 ready |= signal->sig[0] &~ blocked->sig[0];
127 break;
128
129 case 1: ready = signal->sig[0] &~ blocked->sig[0];
130 }
131 return ready != 0;
132}
133
134#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
135
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700136static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200138 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700140 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700142 return 1;
143 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700144 /*
145 * We must never clear the flag in another thread, or in current
146 * when it's possible the current syscall is returning -ERESTART*.
147 * So we don't clear it here, and only callers who know they should do.
148 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700149 return 0;
150}
151
152/*
153 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
154 * This is superfluous when called on current, the wakeup is a harmless no-op.
155 */
156void recalc_sigpending_and_wake(struct task_struct *t)
157{
158 if (recalc_sigpending_tsk(t))
159 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
162void recalc_sigpending(void)
163{
Tejun Heodd1d6772011-06-02 11:14:00 +0200164 if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700165 clear_thread_flag(TIF_SIGPENDING);
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
169/* Given the mask, find the first available signal that should be serviced. */
170
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800171#define SYNCHRONOUS_MASK \
172 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500173 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800174
Davide Libenzifba2afa2007-05-10 22:23:13 -0700175int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 unsigned long i, *s, *m, x;
178 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 s = pending->signal.sig;
181 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800182
183 /*
184 * Handle the first word specially: it contains the
185 * synchronous signals that need to be dequeued first.
186 */
187 x = *s &~ *m;
188 if (x) {
189 if (x & SYNCHRONOUS_MASK)
190 x &= SYNCHRONOUS_MASK;
191 sig = ffz(~x) + 1;
192 return sig;
193 }
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 switch (_NSIG_WORDS) {
196 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800197 for (i = 1; i < _NSIG_WORDS; ++i) {
198 x = *++s &~ *++m;
199 if (!x)
200 continue;
201 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 break;
205
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800206 case 2:
207 x = s[1] &~ m[1];
208 if (!x)
209 break;
210 sig = ffz(~x) + _NSIG_BPW + 1;
211 break;
212
213 case 1:
214 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 break;
216 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 return sig;
219}
220
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900221static inline void print_dropped_signal(int sig)
222{
223 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
224
225 if (!print_fatal_signals)
226 return;
227
228 if (!__ratelimit(&ratelimit_state))
229 return;
230
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700231 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900232 current->comm, current->pid, sig);
233}
234
Tejun Heoe5c19022011-03-23 10:37:00 +0100235/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200236 * task_set_jobctl_pending - set jobctl pending bits
237 * @task: target task
238 * @mask: pending bits to set
239 *
240 * Clear @mask from @task->jobctl. @mask must be subset of
241 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
242 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
243 * cleared. If @task is already being killed or exiting, this function
244 * becomes noop.
245 *
246 * CONTEXT:
247 * Must be called with @task->sighand->siglock held.
248 *
249 * RETURNS:
250 * %true if @mask is set, %false if made noop because @task was dying.
251 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700252bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200253{
254 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
255 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
256 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
257
258 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
259 return false;
260
261 if (mask & JOBCTL_STOP_SIGMASK)
262 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
263
264 task->jobctl |= mask;
265 return true;
266}
267
268/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200269 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100270 * @task: target task
271 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200272 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
273 * Clear it and wake up the ptracer. Note that we don't need any further
274 * locking. @task->siglock guarantees that @task->parent points to the
275 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100276 *
277 * CONTEXT:
278 * Must be called with @task->sighand->siglock held.
279 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200280void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100281{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200282 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
283 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700284 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200285 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100286 }
287}
288
289/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200290 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c19022011-03-23 10:37:00 +0100291 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200292 * @mask: pending bits to clear
Tejun Heoe5c19022011-03-23 10:37:00 +0100293 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200294 * Clear @mask from @task->jobctl. @mask must be subset of
295 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
296 * STOP bits are cleared together.
Tejun Heoe5c19022011-03-23 10:37:00 +0100297 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200298 * If clearing of @mask leaves no stop or trap pending, this function calls
299 * task_clear_jobctl_trapping().
Tejun Heoe5c19022011-03-23 10:37:00 +0100300 *
301 * CONTEXT:
302 * Must be called with @task->sighand->siglock held.
303 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700304void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c19022011-03-23 10:37:00 +0100305{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200306 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
307
308 if (mask & JOBCTL_STOP_PENDING)
309 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
310
311 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200312
313 if (!(task->jobctl & JOBCTL_PENDING_MASK))
314 task_clear_jobctl_trapping(task);
Tejun Heoe5c19022011-03-23 10:37:00 +0100315}
316
317/**
318 * task_participate_group_stop - participate in a group stop
319 * @task: task participating in a group stop
320 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200321 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100322 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200323 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100324 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c19022011-03-23 10:37:00 +0100325 *
326 * CONTEXT:
327 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100328 *
329 * RETURNS:
330 * %true if group stop completion should be notified to the parent, %false
331 * otherwise.
Tejun Heoe5c19022011-03-23 10:37:00 +0100332 */
333static bool task_participate_group_stop(struct task_struct *task)
334{
335 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200336 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c19022011-03-23 10:37:00 +0100337
Tejun Heoa8f072c2011-06-02 11:13:59 +0200338 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100339
Tejun Heo3759a0d2011-06-02 11:14:00 +0200340 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c19022011-03-23 10:37:00 +0100341
342 if (!consume)
343 return false;
344
345 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
346 sig->group_stop_count--;
347
Tejun Heo244056f2011-03-23 10:37:01 +0100348 /*
349 * Tell the caller to notify completion iff we are entering into a
350 * fresh group stop. Read comment in do_signal_stop() for details.
351 */
352 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles916a05b2017-01-10 16:57:54 -0800353 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c19022011-03-23 10:37:00 +0100354 return true;
355 }
356 return false;
357}
358
David Howellsc69e8d92008-11-14 10:39:19 +1100359/*
360 * allocate a new signal queue record
361 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700362 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100363 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900364static struct sigqueue *
365__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800368 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800370 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000371 * Protect access to @t credentials. This can go away when all
372 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800373 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000374 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100375 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800376 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000377 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800380 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800381 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900383 } else {
384 print_dropped_signal(sig);
385 }
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800388 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100389 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 } else {
391 INIT_LIST_HEAD(&q->list);
392 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100393 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
David Howellsd84f4f92008-11-14 10:39:23 +1100395
396 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
Andrew Morton514a01b2006-02-03 03:04:41 -0800399static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400{
401 if (q->flags & SIGQUEUE_PREALLOC)
402 return;
403 atomic_dec(&q->user->sigpending);
404 free_uid(q->user);
405 kmem_cache_free(sigqueue_cachep, q);
406}
407
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800408void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
410 struct sigqueue *q;
411
412 sigemptyset(&queue->signal);
413 while (!list_empty(&queue->list)) {
414 q = list_entry(queue->list.next, struct sigqueue , list);
415 list_del_init(&q->list);
416 __sigqueue_free(q);
417 }
418}
419
420/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400421 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800423void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400428 clear_tsk_thread_flag(t, TIF_SIGPENDING);
429 flush_sigqueue(&t->pending);
430 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 spin_unlock_irqrestore(&t->sighand->siglock, flags);
432}
433
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400434static void __flush_itimer_signals(struct sigpending *pending)
435{
436 sigset_t signal, retain;
437 struct sigqueue *q, *n;
438
439 signal = pending->signal;
440 sigemptyset(&retain);
441
442 list_for_each_entry_safe(q, n, &pending->list, list) {
443 int sig = q->info.si_signo;
444
445 if (likely(q->info.si_code != SI_TIMER)) {
446 sigaddset(&retain, sig);
447 } else {
448 sigdelset(&signal, sig);
449 list_del_init(&q->list);
450 __sigqueue_free(q);
451 }
452 }
453
454 sigorsets(&pending->signal, &signal, &retain);
455}
456
457void flush_itimer_signals(void)
458{
459 struct task_struct *tsk = current;
460 unsigned long flags;
461
462 spin_lock_irqsave(&tsk->sighand->siglock, flags);
463 __flush_itimer_signals(&tsk->pending);
464 __flush_itimer_signals(&tsk->signal->shared_pending);
465 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
466}
467
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700468void ignore_signals(struct task_struct *t)
469{
470 int i;
471
472 for (i = 0; i < _NSIG; ++i)
473 t->sighand->action[i].sa.sa_handler = SIG_IGN;
474
475 flush_signals(t);
476}
477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 * Flush all handlers for a task.
480 */
481
482void
483flush_signal_handlers(struct task_struct *t, int force_default)
484{
485 int i;
486 struct k_sigaction *ka = &t->sighand->action[0];
487 for (i = _NSIG ; i != 0 ; i--) {
488 if (force_default || ka->sa.sa_handler != SIG_IGN)
489 ka->sa.sa_handler = SIG_DFL;
490 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700491#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700492 ka->sa.sa_restorer = NULL;
493#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 sigemptyset(&ka->sa.sa_mask);
495 ka++;
496 }
497}
498
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200499int unhandled_signal(struct task_struct *tsk, int sig)
500{
Roland McGrath445a91d2008-07-25 19:45:52 -0700501 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700502 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200503 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700504 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200505 return 0;
Tejun Heoa288eec2011-06-17 16:50:37 +0200506 /* if ptraced, let the tracer determine */
507 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200508}
509
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500510static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
511 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
513 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 /*
516 * Collect the siginfo appropriate to this signal. Check if
517 * there is another siginfo for the same signal.
518 */
519 list_for_each_entry(q, &list->list, list) {
520 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700521 if (first)
522 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 first = q;
524 }
525 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700526
527 sigdelset(&list->signal, sig);
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700530still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 list_del_init(&first->list);
532 copy_siginfo(info, &first->info);
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500533
534 *resched_timer =
535 (first->flags & SIGQUEUE_PREALLOC) &&
536 (info->si_code == SI_TIMER) &&
537 (info->si_sys_private);
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700541 /*
542 * Ok, it wasn't in the queue. This must be
543 * a fast-pathed signal or we must have been
544 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 info->si_signo = sig;
547 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800548 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 info->si_pid = 0;
550 info->si_uid = 0;
551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552}
553
554static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500555 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556{
Roland McGrath27d91e02006-09-29 02:00:31 -0700557 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800559 if (sig)
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500560 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 return sig;
562}
563
564/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700565 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 * expected to free it.
567 *
568 * All callers have to hold the siglock.
569 */
570int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
571{
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500572 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700573 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000574
575 /* We only dequeue private signals from ourselves, we don't let
576 * signalfd steal them
577 */
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500578 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800579 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500581 mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800582 /*
583 * itimer signal ?
584 *
585 * itimers are process shared and we restart periodic
586 * itimers in the signal delivery path to prevent DoS
587 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700588 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800589 * itimers, as the SIGALRM is a legacy signal and only
590 * queued once. Changing the restart behaviour to
591 * restart the timer in the signal dequeue path is
592 * reducing the timer noise on heavy loaded !highres
593 * systems too.
594 */
595 if (unlikely(signr == SIGALRM)) {
596 struct hrtimer *tmr = &tsk->signal->real_timer;
597
598 if (!hrtimer_is_queued(tmr) &&
599 tsk->signal->it_real_incr.tv64 != 0) {
600 hrtimer_forward(tmr, tmr->base->get_time(),
601 tsk->signal->it_real_incr);
602 hrtimer_restart(tmr);
603 }
604 }
605 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700606
Davide Libenzib8fceee2007-09-20 12:40:16 -0700607 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700608 if (!signr)
609 return 0;
610
611 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800612 /*
613 * Set a marker that we have dequeued a stop signal. Our
614 * caller might release the siglock and then the pending
615 * stop signal it is about to process is no longer in the
616 * pending bitmasks, but must still be cleared by a SIGCONT
617 * (and overruled by a SIGKILL). So those cases clear this
618 * shared flag after we've set it. Note that this flag may
619 * remain set after the signal we return is ignored or
620 * handled. That doesn't matter because its only purpose
621 * is to alert stop-signal processing code when another
622 * processor has come along and cleared the flag.
623 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200624 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800625 }
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500626 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 /*
628 * Release the siglock to ensure proper locking order
629 * of timer locks outside of siglocks. Note, we leave
630 * irqs disabled here, since the posix-timers code is
631 * about to disable them again anyway.
632 */
633 spin_unlock(&tsk->sighand->siglock);
634 do_schedule_next_timer(info);
635 spin_lock(&tsk->sighand->siglock);
636 }
637 return signr;
638}
639
640/*
641 * Tell a process that it has a new active signal..
642 *
643 * NOTE! we rely on the previous spin_lock to
644 * lock interrupts for us! We can only be called with
645 * "siglock" held, and the local interrupt must
646 * have been disabled when that got acquired!
647 *
648 * No need to set need_resched since signal event passing
649 * goes through ->blocked
650 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100651void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100655 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500656 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 * executing another processor and just now entering stopped state.
658 * By using wake_up_state, we ensure the process will wake up and
659 * handle its death signal.
660 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100661 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 kick_process(t);
663}
664
665/*
666 * Remove signals in mask from the pending set and queue.
667 * Returns 1 if any signals were found.
668 *
669 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800670 */
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700671static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd52006-01-08 01:02:48 -0800672{
673 struct sigqueue *q, *n;
674 sigset_t m;
675
676 sigandsets(&m, mask, &s->signal);
677 if (sigisemptyset(&m))
678 return 0;
679
Oleg Nesterov702a5072011-04-27 22:01:27 +0200680 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd52006-01-08 01:02:48 -0800681 list_for_each_entry_safe(q, n, &s->list, list) {
682 if (sigismember(mask, q->info.si_signo)) {
683 list_del_init(&q->list);
684 __sigqueue_free(q);
685 }
686 }
687 return 1;
688}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Oleg Nesterov614c5172009-12-15 16:47:22 -0800690static inline int is_si_special(const struct siginfo *info)
691{
692 return info <= SEND_SIG_FORCED;
693}
694
695static inline bool si_fromuser(const struct siginfo *info)
696{
697 return info == SEND_SIG_NOINFO ||
698 (!is_si_special(info) && SI_FROMUSER(info));
699}
700
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -0600701static int dequeue_synchronous_signal(siginfo_t *info)
702{
703 struct task_struct *tsk = current;
704 struct sigpending *pending = &tsk->pending;
705 struct sigqueue *q, *sync = NULL;
706
707 /*
708 * Might a synchronous signal be in the queue?
709 */
710 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
711 return 0;
712
713 /*
714 * Return the first synchronous signal in the queue.
715 */
716 list_for_each_entry(q, &pending->list, list) {
717 /* Synchronous signals have a postive si_code */
718 if ((q->info.si_code > SI_USER) &&
719 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
720 sync = q;
721 goto next;
722 }
723 }
724 return 0;
725next:
726 /*
727 * Check if there is another siginfo for the same signal.
728 */
729 list_for_each_entry_continue(q, &pending->list, list) {
730 if (q->info.si_signo == sync->info.si_signo)
731 goto still_pending;
732 }
733
734 sigdelset(&pending->signal, sync->info.si_signo);
735 recalc_sigpending();
736still_pending:
737 list_del_init(&sync->list);
738 copy_siginfo(info, &sync->info);
739 __sigqueue_free(sync);
740 return info->si_signo;
741}
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700744 * called with RCU read lock from check_kill_permission()
745 */
746static int kill_ok_by_cred(struct task_struct *t)
747{
748 const struct cred *cred = current_cred();
749 const struct cred *tcred = __task_cred(t);
750
Eric W. Biederman5af66202012-03-03 20:21:47 -0800751 if (uid_eq(cred->euid, tcred->suid) ||
752 uid_eq(cred->euid, tcred->uid) ||
753 uid_eq(cred->uid, tcred->suid) ||
754 uid_eq(cred->uid, tcred->uid))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700755 return 1;
756
Eric W. Biedermanc4a4d602011-11-16 23:15:31 -0800757 if (ns_capable(tcred->user_ns, CAP_KILL))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700758 return 1;
759
760 return 0;
761}
762
763/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100765 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 */
767static int check_kill_permission(int sig, struct siginfo *info,
768 struct task_struct *t)
769{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700770 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700771 int error;
772
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700773 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700774 return -EINVAL;
775
Oleg Nesterov614c5172009-12-15 16:47:22 -0800776 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700777 return 0;
778
779 error = audit_signal_info(sig, t); /* Let audit system see the signal */
780 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400782
Oleg Nesterov065add32010-05-26 14:42:54 -0700783 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700784 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700785 switch (sig) {
786 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700787 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700788 /*
789 * We don't return the error if sid == NULL. The
790 * task was unhashed, the caller must notice this.
791 */
792 if (!sid || sid == task_session(current))
793 break;
794 default:
795 return -EPERM;
796 }
797 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100798
Amy Griffise54dc242007-03-29 18:01:04 -0400799 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
Tejun Heofb1d9102011-06-14 11:20:17 +0200802/**
803 * ptrace_trap_notify - schedule trap to notify ptracer
804 * @t: tracee wanting to notify tracer
805 *
806 * This function schedules sticky ptrace trap which is cleared on the next
807 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
808 * ptracer.
809 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200810 * If @t is running, STOP trap will be taken. If trapped for STOP and
811 * ptracer is listening for events, tracee is woken up so that it can
812 * re-trap for the new event. If trapped otherwise, STOP trap will be
813 * eventually taken without returning to userland after the existing traps
814 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200815 *
816 * CONTEXT:
817 * Must be called with @task->sighand->siglock held.
818 */
819static void ptrace_trap_notify(struct task_struct *t)
820{
821 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
822 assert_spin_locked(&t->sighand->siglock);
823
824 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100825 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200826}
827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700829 * Handle magic process-wide effects of stop/continue signals. Unlike
830 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 * time regardless of blocking, ignoring, or handling. This does the
832 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700833 * signals. The process stop is done as a signal action for SIG_DFL.
834 *
835 * Returns true if the signal should be actually delivered, otherwise
836 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700838static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700840 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700842 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Oleg Nesterov403bad72013-04-30 15:28:10 -0700844 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800845 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700846 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700848 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700850 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /*
852 * This is a stop signal. Remove SIGCONT from all queues.
853 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700854 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700855 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700856 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700857 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700859 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200861 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700863 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700864 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700865 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700866 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200867 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200868 if (likely(!(t->ptrace & PT_SEIZED)))
869 wake_up_state(t, __TASK_STOPPED);
870 else
871 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700874 /*
875 * Notify the parent with CLD_CONTINUED if we were stopped.
876 *
877 * If we were in the middle of a group stop, we pretend it
878 * was already finished, and then continued. Since SIGCHLD
879 * doesn't queue we report only CLD_STOPPED, as if the next
880 * CLD_CONTINUED was dropped.
881 */
882 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700883 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700884 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700885 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700886 why |= SIGNAL_CLD_STOPPED;
887
888 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700889 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700890 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700891 * will take ->siglock, notice SIGNAL_CLD_MASK, and
892 * notify its parent. See get_signal_to_deliver().
893 */
Jamie Iles916a05b2017-01-10 16:57:54 -0800894 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700895 signal->group_stop_count = 0;
896 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700899
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700900 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
902
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700903/*
904 * Test if P wants to take SIG. After we've checked all threads with this,
905 * it's equivalent to finding no threads not blocking SIG. Any threads not
906 * blocking SIG were ruled out because they are not running and already
907 * have pending signals. Such threads will dequeue from the shared queue
908 * as soon as they're available, so putting the signal on the shared queue
909 * will be equivalent to sending it to one such thread.
910 */
911static inline int wants_signal(int sig, struct task_struct *p)
912{
913 if (sigismember(&p->blocked, sig))
914 return 0;
915 if (p->flags & PF_EXITING)
916 return 0;
917 if (sig == SIGKILL)
918 return 1;
919 if (task_is_stopped_or_traced(p))
920 return 0;
921 return task_curr(p) || !signal_pending(p);
922}
923
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700924static void complete_signal(int sig, struct task_struct *p, int group)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700925{
926 struct signal_struct *signal = p->signal;
927 struct task_struct *t;
928
929 /*
930 * Now find a thread we can wake up to take the signal off the queue.
931 *
932 * If the main thread wants the signal, it gets first crack.
933 * Probably the least surprising to the average bear.
934 */
935 if (wants_signal(sig, p))
936 t = p;
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700937 else if (!group || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700938 /*
939 * There is just one thread and it does not need to be woken.
940 * It will dequeue unblocked signals before it runs again.
941 */
942 return;
943 else {
944 /*
945 * Otherwise try to find a suitable thread.
946 */
947 t = signal->curr_target;
948 while (!wants_signal(sig, t)) {
949 t = next_thread(t);
950 if (t == signal->curr_target)
951 /*
952 * No thread needs to be woken.
953 * Any eligible threads will see
954 * the signal in the queue soon.
955 */
956 return;
957 }
958 signal->curr_target = t;
959 }
960
961 /*
962 * Found a killable thread. If the signal will be fatal,
963 * then start taking the whole group down immediately.
964 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700965 if (sig_fatal(p, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800966 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700967 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800968 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700969 /*
970 * This signal will be fatal to the whole group.
971 */
972 if (!sig_kernel_coredump(sig)) {
973 /*
974 * Start a group exit and wake everybody up.
975 * This way we don't have other threads
976 * running and doing things after a slower
977 * thread has the fatal signal pending.
978 */
979 signal->flags = SIGNAL_GROUP_EXIT;
980 signal->group_exit_code = sig;
981 signal->group_stop_count = 0;
982 t = p;
983 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200984 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700985 sigaddset(&t->pending.signal, SIGKILL);
986 signal_wake_up(t, 1);
987 } while_each_thread(p, t);
988 return;
989 }
990 }
991
992 /*
993 * The signal is already in the shared-pending queue.
994 * Tell the chosen thread to wake up and dequeue it.
995 */
996 signal_wake_up(t, sig == SIGKILL);
997 return;
998}
999
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001000static inline int legacy_queue(struct sigpending *signals, int sig)
1001{
1002 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1003}
1004
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001005#ifdef CONFIG_USER_NS
1006static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1007{
1008 if (current_user_ns() == task_cred_xxx(t, user_ns))
1009 return;
1010
1011 if (SI_FROMKERNEL(info))
1012 return;
1013
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001014 rcu_read_lock();
1015 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1016 make_kuid(current_user_ns(), info->si_uid));
1017 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001018}
1019#else
1020static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1021{
1022 return;
1023}
1024#endif
1025
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001026static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1027 int group, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001029 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001030 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001031 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001032 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001033
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001034 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001035
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001036 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001037 if (!prepare_signal(sig, t,
Eric W. Biedermanba277fe2018-09-03 20:02:46 +02001038 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001039 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001040
1041 pending = group ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001043 * Short-circuit ignored signals and support queuing
1044 * exactly one non-rt signal, so that we can get more
1045 * detailed information about the cause of the signal.
1046 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001047 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001048 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001049 goto ret;
1050
1051 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001052 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 * fast-pathed signals for kernel-internal things like SIGSTOP
1054 * or SIGKILL.
1055 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001056 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 goto out_set;
1058
Randy Dunlap5aba0852011-04-04 14:59:31 -07001059 /*
1060 * Real-time signals must be queued if sent by sigqueue, or
1061 * some other real-time mechanism. It is implementation
1062 * defined whether kill() does so. We attempt to do so, on
1063 * the principle of least surprise, but since kill is not
1064 * allowed to fail with EAGAIN when low on memory we just
1065 * make sure at least one signal gets delivered and don't
1066 * pass on the info struct.
1067 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001068 if (sig < SIGRTMIN)
1069 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1070 else
1071 override_rlimit = 0;
1072
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001073 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001074 override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001076 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001078 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 q->info.si_signo = sig;
1080 q->info.si_errno = 0;
1081 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001082 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001083 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001084 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001086 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 q->info.si_signo = sig;
1088 q->info.si_errno = 0;
1089 q->info.si_code = SI_KERNEL;
1090 q->info.si_pid = 0;
1091 q->info.si_uid = 0;
1092 break;
1093 default:
1094 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001095 if (from_ancestor_ns)
1096 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 break;
1098 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001099
1100 userns_fixup_signal_uid(&q->info, t);
1101
Oleg Nesterov621d3122005-10-30 15:03:45 -08001102 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001103 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1104 /*
1105 * Queue overflow, abort. We may abort if the
1106 * signal was rt and sent by user using something
1107 * other than kill().
1108 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001109 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1110 ret = -EAGAIN;
1111 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001112 } else {
1113 /*
1114 * This is a silent loss of information. We still
1115 * send the signal, but the *info bits are lost.
1116 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001117 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
1120
1121out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001122 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001123 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001124 complete_signal(sig, t, group);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001125ret:
1126 trace_signal_generate(sig, info, t, group, result);
1127 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001130static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1131 int group)
1132{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001133 int from_ancestor_ns = 0;
1134
1135#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001136 from_ancestor_ns = si_fromuser(info) &&
1137 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001138#endif
1139
1140 return __send_signal(sig, info, t, group, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001141}
1142
Al Viro4aaefee2012-11-05 13:09:56 -05001143static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001144{
Al Viro4aaefee2012-11-05 13:09:56 -05001145 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001146 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001147
Al Viroca5cd872007-10-29 04:31:16 +00001148#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001149 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001150 {
1151 int i;
1152 for (i = 0; i < 16; i++) {
1153 unsigned char insn;
1154
Andi Kleenb45c6e72010-01-08 14:42:52 -08001155 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1156 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001157 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001158 }
1159 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001160 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001161#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001162 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001163 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001164 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001165}
1166
1167static int __init setup_print_fatal_signals(char *str)
1168{
1169 get_option (&str, &print_fatal_signals);
1170
1171 return 1;
1172}
1173
1174__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001176int
1177__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1178{
1179 return send_signal(sig, info, p, 1);
1180}
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182static int
1183specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1184{
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001185 return send_signal(sig, info, t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186}
1187
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001188int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1189 bool group)
1190{
1191 unsigned long flags;
1192 int ret = -ESRCH;
1193
1194 if (lock_task_sighand(p, &flags)) {
1195 ret = send_signal(sig, info, p, group);
1196 unlock_task_sighand(p, &flags);
1197 }
1198
1199 return ret;
1200}
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202/*
1203 * Force a signal that the process can't ignore: if necessary
1204 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001205 *
1206 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1207 * since we do not want to have a signal handler that was blocked
1208 * be invoked when user space had explicitly blocked it.
1209 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001210 * We don't want to have recursive SIGSEGV's etc, for example,
1211 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213int
1214force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1215{
1216 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001217 int ret, blocked, ignored;
1218 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001221 action = &t->sighand->action[sig-1];
1222 ignored = action->sa.sa_handler == SIG_IGN;
1223 blocked = sigismember(&t->blocked, sig);
1224 if (blocked || ignored) {
1225 action->sa.sa_handler = SIG_DFL;
1226 if (blocked) {
1227 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001228 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 }
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001231 if (action->sa.sa_handler == SIG_DFL)
1232 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 ret = specific_send_sig_info(sig, info, t);
1234 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1235
1236 return ret;
1237}
1238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239/*
1240 * Nuke all other threads in the group.
1241 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001242int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001244 struct task_struct *t = p;
1245 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 p->signal->group_stop_count = 0;
1248
Oleg Nesterov09faef12010-05-26 14:43:11 -07001249 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001250 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001251 count++;
1252
1253 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 if (t->exit_state)
1255 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 signal_wake_up(t, 1);
1258 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001259
1260 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261}
1262
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001263struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1264 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001265{
1266 struct sighand_struct *sighand;
1267
1268 for (;;) {
Paul E. McKenneyc41247e2014-05-05 08:18:30 -07001269 /*
1270 * Disable interrupts early to avoid deadlocks.
1271 * See rcu_read_unlock() comment header for details.
1272 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001273 local_irq_save(*flags);
1274 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001275 sighand = rcu_dereference(tsk->sighand);
Paul E. McKenneya8417962011-07-19 03:25:36 -07001276 if (unlikely(sighand == NULL)) {
1277 rcu_read_unlock();
1278 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001279 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001280 }
Oleg Nesterov392809b2014-09-28 23:44:18 +02001281 /*
1282 * This sighand can be already freed and even reused, but
1283 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1284 * initializes ->siglock: this slab can't go away, it has
1285 * the same object type, ->siglock can't be reinitialized.
1286 *
1287 * We need to ensure that tsk->sighand is still the same
1288 * after we take the lock, we can race with de_thread() or
1289 * __exit_signal(). In the latter case the next iteration
1290 * must see ->sighand == NULL.
1291 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001292 spin_lock(&sighand->siglock);
1293 if (likely(sighand == tsk->sighand)) {
1294 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001295 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001296 }
1297 spin_unlock(&sighand->siglock);
1298 rcu_read_unlock();
1299 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001300 }
1301
1302 return sighand;
1303}
1304
David Howellsc69e8d92008-11-14 10:39:19 +11001305/*
1306 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001307 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1309{
David Howells694f6902010-08-04 16:59:14 +01001310 int ret;
1311
1312 rcu_read_lock();
1313 ret = check_kill_permission(sig, info, p);
1314 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001316 if (!ret && sig)
1317 ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 return ret;
1320}
1321
1322/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001323 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001325 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001327int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328{
1329 struct task_struct *p = NULL;
1330 int retval, success;
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 success = 0;
1333 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001334 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 int err = group_send_sig_info(sig, info, p);
1336 success |= !err;
1337 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001338 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 return success ? 0 : retval;
1340}
1341
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001342int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001344 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 struct task_struct *p;
1346
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001347 for (;;) {
1348 rcu_read_lock();
1349 p = pid_task(pid, PIDTYPE_PID);
1350 if (p)
1351 error = group_send_sig_info(sig, info, p);
1352 rcu_read_unlock();
1353 if (likely(!p || error != -ESRCH))
1354 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001355
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001356 /*
1357 * The task was unhashed in between, try again. If it
1358 * is dead, pid_task() will return NULL, if we race with
1359 * de_thread() it will find the new leader.
1360 */
1361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
1363
Randy Dunlap5aba0852011-04-04 14:59:31 -07001364int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001365{
1366 int error;
1367 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001368 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001369 rcu_read_unlock();
1370 return error;
1371}
1372
Serge Hallynd178bc32011-09-26 10:45:18 -05001373static int kill_as_cred_perm(const struct cred *cred,
1374 struct task_struct *target)
1375{
1376 const struct cred *pcred = __task_cred(target);
Eric W. Biederman5af66202012-03-03 20:21:47 -08001377 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1378 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
Serge Hallynd178bc32011-09-26 10:45:18 -05001379 return 0;
1380 return 1;
1381}
1382
Eric W. Biederman2425c082006-10-02 02:17:28 -07001383/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001384int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1385 const struct cred *cred, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001386{
1387 int ret = -EINVAL;
1388 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001389 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001390
1391 if (!valid_signal(sig))
1392 return ret;
1393
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001394 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001395 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001396 if (!p) {
1397 ret = -ESRCH;
1398 goto out_unlock;
1399 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001400 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001401 ret = -EPERM;
1402 goto out_unlock;
1403 }
David Quigley8f95dc52006-06-30 01:55:47 -07001404 ret = security_task_kill(p, info, sig, secid);
1405 if (ret)
1406 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001407
1408 if (sig) {
1409 if (lock_task_sighand(p, &flags)) {
1410 ret = __send_signal(sig, info, p, 1, 0);
1411 unlock_task_sighand(p, &flags);
1412 } else
1413 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001414 }
1415out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001416 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001417 return ret;
1418}
Serge Hallynd178bc32011-09-26 10:45:18 -05001419EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421/*
1422 * kill_something_info() interprets pid in interesting ways just like kill(2).
1423 *
1424 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1425 * is probably wrong. Should make it like BSD or SYSV.
1426 */
1427
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001428static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001430 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001431
1432 if (pid > 0) {
1433 rcu_read_lock();
1434 ret = kill_pid_info(sig, info, find_vpid(pid));
1435 rcu_read_unlock();
1436 return ret;
1437 }
1438
zhongjiangec1975a2017-07-10 15:52:57 -07001439 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1440 if (pid == INT_MIN)
1441 return -ESRCH;
1442
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001443 read_lock(&tasklist_lock);
1444 if (pid != -1) {
1445 ret = __kill_pgrp_info(sig, info,
1446 pid ? find_vpid(-pid) : task_pgrp(current));
1447 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 int retval = 0, count = 0;
1449 struct task_struct * p;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001452 if (task_pid_vnr(p) > 1 &&
1453 !same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 int err = group_send_sig_info(sig, info, p);
1455 ++count;
1456 if (err != -EPERM)
1457 retval = err;
1458 }
1459 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001460 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001462 read_unlock(&tasklist_lock);
1463
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001464 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465}
1466
1467/*
1468 * These are for backward compatibility with the rest of the kernel source.
1469 */
1470
Randy Dunlap5aba0852011-04-04 14:59:31 -07001471int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 /*
1474 * Make sure legacy kernel users don't send in bad values
1475 * (normal paths check this in check_kill_permission).
1476 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001477 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 return -EINVAL;
1479
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001480 return do_send_sig_info(sig, info, p, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
1482
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001483#define __si_special(priv) \
1484 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486int
1487send_sig(int sig, struct task_struct *p, int priv)
1488{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001489 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490}
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492void
1493force_sig(int sig, struct task_struct *p)
1494{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001495 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496}
1497
1498/*
1499 * When things go south during signal handling, we
1500 * will force a SIGSEGV. And if the signal that caused
1501 * the problem was already a SIGSEGV, we'll want to
1502 * make sure we don't even try to deliver the signal..
1503 */
1504int
1505force_sigsegv(int sig, struct task_struct *p)
1506{
1507 if (sig == SIGSEGV) {
1508 unsigned long flags;
1509 spin_lock_irqsave(&p->sighand->siglock, flags);
1510 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1511 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1512 }
1513 force_sig(SIGSEGV, p);
1514 return 0;
1515}
1516
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001517int kill_pgrp(struct pid *pid, int sig, int priv)
1518{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001519 int ret;
1520
1521 read_lock(&tasklist_lock);
1522 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1523 read_unlock(&tasklist_lock);
1524
1525 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001526}
1527EXPORT_SYMBOL(kill_pgrp);
1528
1529int kill_pid(struct pid *pid, int sig, int priv)
1530{
1531 return kill_pid_info(sig, __si_special(priv), pid);
1532}
1533EXPORT_SYMBOL(kill_pid);
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535/*
1536 * These functions support sending signals using preallocated sigqueue
1537 * structures. This is needed "because realtime applications cannot
1538 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001539 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 * we allocate the sigqueue structure from the timer_create. If this
1541 * allocation fails we are able to report the failure to the application
1542 * with an EAGAIN error.
1543 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544struct sigqueue *sigqueue_alloc(void)
1545{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001546 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001548 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001550
1551 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552}
1553
1554void sigqueue_free(struct sigqueue *q)
1555{
1556 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001557 spinlock_t *lock = &current->sighand->siglock;
1558
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1560 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001561 * We must hold ->siglock while testing q->list
1562 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001563 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001565 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001566 q->flags &= ~SIGQUEUE_PREALLOC;
1567 /*
1568 * If it is queued it will be freed when dequeued,
1569 * like the "regular" sigqueue.
1570 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001571 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001572 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001573 spin_unlock_irqrestore(lock, flags);
1574
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001575 if (q)
1576 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577}
1578
Oleg Nesterovac5c2152008-04-30 00:52:57 -07001579int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001580{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001581 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001582 struct sigpending *pending;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001583 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001584 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001585
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001586 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001587
1588 ret = -1;
1589 if (!likely(lock_task_sighand(t, &flags)))
1590 goto ret;
1591
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001592 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001593 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001594 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001595 goto out;
1596
1597 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001598 if (unlikely(!list_empty(&q->list))) {
1599 /*
1600 * If an SI_TIMER entry is already queue just increment
1601 * the overrun count.
1602 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001603 BUG_ON(q->info.si_code != SI_TIMER);
1604 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001605 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001606 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001607 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001608 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001609
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001610 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001611 pending = group ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001612 list_add_tail(&q->list, &pending->list);
1613 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001614 complete_signal(sig, t, group);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001615 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001616out:
Oleg Nesterov163566f2011-11-22 21:37:41 +01001617 trace_signal_generate(sig, &q->info, t, group, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001618 unlock_task_sighand(t, &flags);
1619ret:
1620 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001621}
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 * Let a parent know about the death of a child.
1625 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001626 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001627 * Returns true if our parent ignored us and so we've switched to
1628 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001630bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631{
1632 struct siginfo info;
1633 unsigned long flags;
1634 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001635 bool autoreap = false;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001636 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
1638 BUG_ON(sig == -1);
1639
1640 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001641 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
Tejun Heod21142e2011-06-17 16:50:34 +02001643 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1645
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001646 if (sig != SIGCHLD) {
1647 /*
1648 * This is only possible if parent == real_parent.
1649 * Check if it has changed security domain.
1650 */
1651 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1652 sig = SIGCHLD;
1653 }
1654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 info.si_signo = sig;
1656 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001657 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001658 * We are under tasklist_lock here so our parent is tied to
1659 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001660 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001661 * task_active_pid_ns will always return the same pid namespace
1662 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001663 *
1664 * write_lock() currently calls preempt_disable() which is the
1665 * same as rcu_read_lock(), but according to Oleg, this is not
1666 * correct to rely on this
1667 */
1668 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001669 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001670 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1671 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001672 rcu_read_unlock();
1673
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001674 task_cputime(tsk, &utime, &stime);
1675 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1676 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 info.si_status = tsk->exit_code & 0x7f;
1679 if (tsk->exit_code & 0x80)
1680 info.si_code = CLD_DUMPED;
1681 else if (tsk->exit_code & 0x7f)
1682 info.si_code = CLD_KILLED;
1683 else {
1684 info.si_code = CLD_EXITED;
1685 info.si_status = tsk->exit_code >> 8;
1686 }
1687
1688 psig = tsk->parent->sighand;
1689 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001690 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1692 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1693 /*
1694 * We are exiting and our parent doesn't care. POSIX.1
1695 * defines special semantics for setting SIGCHLD to SIG_IGN
1696 * or setting the SA_NOCLDWAIT flag: we should be reaped
1697 * automatically and not left for our parent's wait4 call.
1698 * Rather than having the parent do it as a magic kind of
1699 * signal handler, we just set this to tell do_exit that we
1700 * can be cleaned up without becoming a zombie. Note that
1701 * we still call __wake_up_parent in this case, because a
1702 * blocked sys_wait4 might now return -ECHILD.
1703 *
1704 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1705 * is implementation-defined: we do (if you don't want
1706 * it, just use SIG_IGN instead).
1707 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001708 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001710 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001712 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 __group_send_sig_info(sig, &info, tsk->parent);
1714 __wake_up_parent(tsk, tsk->parent);
1715 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001716
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001717 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718}
1719
Tejun Heo75b95952011-03-23 10:37:01 +01001720/**
1721 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1722 * @tsk: task reporting the state change
1723 * @for_ptracer: the notification is for ptracer
1724 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1725 *
1726 * Notify @tsk's parent that the stopped/continued state has changed. If
1727 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1728 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1729 *
1730 * CONTEXT:
1731 * Must be called with tasklist_lock at least read locked.
1732 */
1733static void do_notify_parent_cldstop(struct task_struct *tsk,
1734 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
1736 struct siginfo info;
1737 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001738 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 struct sighand_struct *sighand;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001740 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
Tejun Heo75b95952011-03-23 10:37:01 +01001742 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001743 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001744 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001745 tsk = tsk->group_leader;
1746 parent = tsk->real_parent;
1747 }
1748
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 info.si_signo = SIGCHLD;
1750 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001751 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001752 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001753 */
1754 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001755 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001756 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001757 rcu_read_unlock();
1758
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001759 task_cputime(tsk, &utime, &stime);
1760 info.si_utime = cputime_to_clock_t(utime);
1761 info.si_stime = cputime_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
1763 info.si_code = why;
1764 switch (why) {
1765 case CLD_CONTINUED:
1766 info.si_status = SIGCONT;
1767 break;
1768 case CLD_STOPPED:
1769 info.si_status = tsk->signal->group_exit_code & 0x7f;
1770 break;
1771 case CLD_TRAPPED:
1772 info.si_status = tsk->exit_code & 0x7f;
1773 break;
1774 default:
1775 BUG();
1776 }
1777
1778 sighand = parent->sighand;
1779 spin_lock_irqsave(&sighand->siglock, flags);
1780 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1781 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1782 __group_send_sig_info(SIGCHLD, &info, parent);
1783 /*
1784 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1785 */
1786 __wake_up_parent(tsk, parent);
1787 spin_unlock_irqrestore(&sighand->siglock, flags);
1788}
1789
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001790static inline int may_ptrace_stop(void)
1791{
Tejun Heod21142e2011-06-17 16:50:34 +02001792 if (!likely(current->ptrace))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001793 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001794 /*
1795 * Are we in the middle of do_coredump?
1796 * If so and our tracer is also part of the coredump stopping
1797 * is a deadlock situation, and pointless because our tracer
1798 * is dead so don't allow us to stop.
1799 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001800 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001801 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001802 *
1803 * This is almost outdated, a task with the pending SIGKILL can't
1804 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1805 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001806 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001807 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001808 unlikely(current->mm == current->parent->mm))
1809 return 0;
1810
1811 return 1;
1812}
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001815 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08001816 * Called with the siglock held.
1817 */
1818static int sigkill_pending(struct task_struct *tsk)
1819{
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001820 return sigismember(&tsk->pending.signal, SIGKILL) ||
1821 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001822}
1823
1824/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 * This must be called with current->sighand->siglock held.
1826 *
1827 * This should be the path for all ptrace stops.
1828 * We always set current->last_siginfo while stopped here.
1829 * That makes it a way to test a stopped process for
1830 * being ptrace-stopped vs being job-control-stopped.
1831 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001832 * If we actually decide not to stop at all because the tracer
1833 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001835static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001836 __releases(&current->sighand->siglock)
1837 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001839 bool gstop_done = false;
1840
Roland McGrath1a669c22008-02-06 01:37:37 -08001841 if (arch_ptrace_stop_needed(exit_code, info)) {
1842 /*
1843 * The arch code has something special to do before a
1844 * ptrace stop. This is allowed to block, e.g. for faults
1845 * on user stack pages. We can't keep the siglock while
1846 * calling arch_ptrace_stop, so we must release it now.
1847 * To preserve proper semantics, we must do this before
1848 * any signal bookkeeping like checking group_stop_count.
1849 * Meanwhile, a SIGKILL could come in before we retake the
1850 * siglock. That must prevent us from sleeping in TASK_TRACED.
1851 * So after regaining the lock, we must check for SIGKILL.
1852 */
1853 spin_unlock_irq(&current->sighand->siglock);
1854 arch_ptrace_stop(exit_code, info);
1855 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001856 if (sigkill_pending(current))
1857 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001858 }
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02001861 * We're committing to trapping. TRACED should be visible before
1862 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1863 * Also, transition to TRACED and updates to ->jobctl should be
1864 * atomic with respect to siglock and should be done after the arch
1865 * hook as siglock is released and regrabbed across it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 */
Tejun Heo81be24b2011-06-02 11:13:59 +02001867 set_current_state(TASK_TRACED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
1869 current->last_siginfo = info;
1870 current->exit_code = exit_code;
1871
Tejun Heod79fdd62011-03-23 10:37:00 +01001872 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 * If @why is CLD_STOPPED, we're trapping to participate in a group
1874 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02001875 * across siglock relocks since INTERRUPT was scheduled, PENDING
1876 * could be clear now. We act as if SIGCONT is received after
1877 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01001878 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001879 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001881
Tejun Heofb1d9102011-06-14 11:20:17 +02001882 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02001883 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02001884 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1885 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02001886
Tejun Heo81be24b2011-06-02 11:13:59 +02001887 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001888 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 spin_unlock_irq(&current->sighand->siglock);
1891 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001892 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01001893 /*
1894 * Notify parents of the stop.
1895 *
1896 * While ptraced, there are two parents - the ptracer and
1897 * the real_parent of the group_leader. The ptracer should
1898 * know about every stop while the real parent is only
1899 * interested in the completion of group stop. The states
1900 * for the two don't interact with each other. Notify
1901 * separately unless they're gonna be duplicates.
1902 */
1903 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02001904 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01001905 do_notify_parent_cldstop(current, false, why);
1906
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001907 /*
1908 * Don't want to allow preemption here, because
1909 * sys_ptrace() needs this task to be inactive.
1910 *
1911 * XXX: implement read_unlock_no_resched().
1912 */
1913 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001915 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02001916 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 } else {
1918 /*
1919 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001920 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01001921 *
1922 * If @gstop_done, the ptracer went away between group stop
1923 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02001924 * JOBCTL_STOP_PENDING on us and we'll re-enter
1925 * TASK_STOPPED in do_signal_stop() on return, so notifying
1926 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01001928 if (gstop_done)
1929 do_notify_parent_cldstop(current, false, why);
1930
Oleg Nesterov9899d112013-01-21 20:48:00 +01001931 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001932 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08001933 if (clear_code)
1934 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001935 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 }
1937
1938 /*
1939 * We are back. Now reacquire the siglock before touching
1940 * last_siginfo, so that we are sure to have synchronized with
1941 * any signal-sending on another CPU that wants to examine it.
1942 */
1943 spin_lock_irq(&current->sighand->siglock);
1944 current->last_siginfo = NULL;
1945
Tejun Heo544b2c92011-06-14 11:20:18 +02001946 /* LISTENING can be set only during STOP traps, clear it */
1947 current->jobctl &= ~JOBCTL_LISTENING;
1948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 /*
1950 * Queued signals ignored us while we were stopped for tracing.
1951 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07001952 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07001954 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955}
1956
Tejun Heo3544d722011-06-14 11:20:15 +02001957static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958{
1959 siginfo_t info;
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 memset(&info, 0, sizeof info);
Tejun Heo3544d722011-06-14 11:20:15 +02001962 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001964 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001965 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
1967 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02001968 ptrace_stop(exit_code, why, 1, &info);
1969}
1970
1971void ptrace_notify(int exit_code)
1972{
1973 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02001974 if (unlikely(current->task_works))
1975 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02001976
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02001978 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 spin_unlock_irq(&current->sighand->siglock);
1980}
1981
Tejun Heo73ddff22011-06-14 11:20:14 +02001982/**
1983 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1984 * @signr: signr causing group stop if initiating
1985 *
1986 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1987 * and participate in it. If already set, participate in the existing
1988 * group stop. If participated in a group stop (and thus slept), %true is
1989 * returned with siglock released.
1990 *
1991 * If ptraced, this function doesn't handle stop itself. Instead,
1992 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1993 * untouched. The caller must ensure that INTERRUPT trap handling takes
1994 * places afterwards.
1995 *
1996 * CONTEXT:
1997 * Must be called with @current->sighand->siglock held, which is released
1998 * on %true return.
1999 *
2000 * RETURNS:
2001 * %false if group stop is already cancelled or ptrace trap is scheduled.
2002 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002004static bool do_signal_stop(int signr)
2005 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006{
2007 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Tejun Heoa8f072c2011-06-02 11:13:59 +02002009 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002010 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002011 struct task_struct *t;
2012
Tejun Heoa8f072c2011-06-02 11:13:59 +02002013 /* signr will be recorded in task->jobctl for retries */
2014 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002015
Tejun Heoa8f072c2011-06-02 11:13:59 +02002016 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002017 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002018 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002020 * There is no group stop already in progress. We must
2021 * initiate one now.
2022 *
2023 * While ptraced, a task may be resumed while group stop is
2024 * still in effect and then receive a stop signal and
2025 * initiate another group stop. This deviates from the
2026 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002027 * cause two group stops when !ptraced. That is why we
2028 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002029 *
2030 * The condition can be distinguished by testing whether
2031 * SIGNAL_STOP_STOPPED is already set. Don't generate
2032 * group_exit_code in such case.
2033 *
2034 * This is not necessary for SIGNAL_STOP_CONTINUED because
2035 * an intervening stop signal is required to cause two
2036 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002038 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2039 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002040
Tejun Heo7dd3db52011-06-02 11:14:00 +02002041 sig->group_stop_count = 0;
2042
2043 if (task_set_jobctl_pending(current, signr | gstop))
2044 sig->group_stop_count++;
2045
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002046 t = current;
2047 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002048 /*
2049 * Setting state to TASK_STOPPED for a group
2050 * stop is always done with the siglock held,
2051 * so this check has no races.
2052 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002053 if (!task_is_stopped(t) &&
2054 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002055 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002056 if (likely(!(t->ptrace & PT_SEIZED)))
2057 signal_wake_up(t, 0);
2058 else
2059 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002060 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002061 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002062 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002063
Tejun Heod21142e2011-06-17 16:50:34 +02002064 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002065 int notify = 0;
2066
2067 /*
2068 * If there are no other threads in the group, or if there
2069 * is a group stop in progress and we are the last to stop,
2070 * report to the parent.
2071 */
2072 if (task_participate_group_stop(current))
2073 notify = CLD_STOPPED;
2074
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002075 __set_current_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002076 spin_unlock_irq(&current->sighand->siglock);
2077
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002078 /*
2079 * Notify the parent of the group stop completion. Because
2080 * we're not holding either the siglock or tasklist_lock
2081 * here, ptracer may attach inbetween; however, this is for
2082 * group stop and should always be delivered to the real
2083 * parent of the group leader. The new ptracer will get
2084 * its notification when this task transitions into
2085 * TASK_TRACED.
2086 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002087 if (notify) {
2088 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002089 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002090 read_unlock(&tasklist_lock);
2091 }
2092
2093 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002094 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002095 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002096 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002097 /*
2098 * While ptraced, group stop is handled by STOP trap.
2099 * Schedule it and let the caller deal with it.
2100 */
2101 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2102 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002103 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002104}
Tejun Heod79fdd62011-03-23 10:37:00 +01002105
Tejun Heo73ddff22011-06-14 11:20:14 +02002106/**
2107 * do_jobctl_trap - take care of ptrace jobctl traps
2108 *
Tejun Heo3544d722011-06-14 11:20:15 +02002109 * When PT_SEIZED, it's used for both group stop and explicit
2110 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2111 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2112 * the stop signal; otherwise, %SIGTRAP.
2113 *
2114 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2115 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002116 *
2117 * CONTEXT:
2118 * Must be called with @current->sighand->siglock held, which may be
2119 * released and re-acquired before returning with intervening sleep.
2120 */
2121static void do_jobctl_trap(void)
2122{
Tejun Heo3544d722011-06-14 11:20:15 +02002123 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002124 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002125
Tejun Heo3544d722011-06-14 11:20:15 +02002126 if (current->ptrace & PT_SEIZED) {
2127 if (!signal->group_stop_count &&
2128 !(signal->flags & SIGNAL_STOP_STOPPED))
2129 signr = SIGTRAP;
2130 WARN_ON_ONCE(!signr);
2131 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2132 CLD_STOPPED);
2133 } else {
2134 WARN_ON_ONCE(!signr);
2135 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002136 current->exit_code = 0;
2137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138}
2139
Al Viro94eb22d2012-11-05 13:08:06 -05002140static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002141{
Al Virob7f95912012-11-05 13:06:22 -05002142 ptrace_signal_deliver();
Oleg Nesterov8a352412011-07-21 17:06:53 +02002143 /*
2144 * We do not check sig_kernel_stop(signr) but set this marker
2145 * unconditionally because we do not know whether debugger will
2146 * change signr. This flag has no meaning unless we are going
2147 * to stop after return from ptrace_stop(). In this case it will
2148 * be checked in do_signal_stop(), we should only stop if it was
2149 * not cleared by SIGCONT while we were sleeping. See also the
2150 * comment in dequeue_signal().
2151 */
2152 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002153 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002154
2155 /* We're back. Did the debugger cancel the sig? */
2156 signr = current->exit_code;
2157 if (signr == 0)
2158 return signr;
2159
2160 current->exit_code = 0;
2161
Randy Dunlap5aba0852011-04-04 14:59:31 -07002162 /*
2163 * Update the siginfo structure if the signal has
2164 * changed. If the debugger wanted something
2165 * specific in the siginfo structure then it should
2166 * have updated *info via PTRACE_SETSIGINFO.
2167 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002168 if (signr != info->si_signo) {
2169 info->si_signo = signr;
2170 info->si_errno = 0;
2171 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002172 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002173 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002174 info->si_uid = from_kuid_munged(current_user_ns(),
2175 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002176 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002177 }
2178
2179 /* If the (new) signal is now blocked, requeue it. */
2180 if (sigismember(&current->blocked, signr)) {
2181 specific_send_sig_info(signr, info, current);
2182 signr = 0;
2183 }
2184
2185 return signr;
2186}
2187
Richard Weinberger828b1f62013-10-07 15:26:57 +02002188int get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002190 struct sighand_struct *sighand = current->sighand;
2191 struct signal_struct *signal = current->signal;
2192 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002194 if (unlikely(current->task_works))
2195 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002196
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302197 if (unlikely(uprobe_deny_signal()))
2198 return 0;
2199
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002200 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002201 * Do this once, we can't return to user-mode if freezing() == T.
2202 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2203 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002204 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002205 try_to_freeze();
2206
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002207relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002208 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002209 /*
2210 * Every stopped thread goes here after wakeup. Check to see if
2211 * we should notify the parent, prepare_signal(SIGCONT) encodes
2212 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2213 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002214 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002215 int why;
2216
2217 if (signal->flags & SIGNAL_CLD_CONTINUED)
2218 why = CLD_CONTINUED;
2219 else
2220 why = CLD_STOPPED;
2221
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002222 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002223
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002224 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002225
Tejun Heoceb6bd62011-03-23 10:37:01 +01002226 /*
2227 * Notify the parent that we're continuing. This event is
2228 * always per-process and doesn't make whole lot of sense
2229 * for ptracers, who shouldn't consume the state via
2230 * wait(2) either, but, for backward compatibility, notify
2231 * the ptracer of the group leader too unless it's gonna be
2232 * a duplicate.
2233 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002234 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002235 do_notify_parent_cldstop(current, false, why);
2236
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002237 if (ptrace_reparented(current->group_leader))
2238 do_notify_parent_cldstop(current->group_leader,
2239 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002240 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002241
Oleg Nesterove4420552008-04-30 00:52:44 -07002242 goto relock;
2243 }
2244
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002245 /* Has this task already been marked for death? */
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002246 if (signal_group_exit(signal)) {
2247 ksig->info.si_signo = signr = SIGKILL;
2248 sigdelset(&current->pending.signal, SIGKILL);
Zhenliang Wei9adcdd52019-05-31 22:30:52 -07002249 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2250 &sighand->action[SIGKILL - 1]);
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002251 recalc_sigpending();
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002252 goto fatal;
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002253 }
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 for (;;) {
2256 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002257
2258 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2259 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002260 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002261
Tejun Heo73ddff22011-06-14 11:20:14 +02002262 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2263 do_jobctl_trap();
2264 spin_unlock_irq(&sighand->siglock);
2265 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 }
2267
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -06002268 /*
2269 * Signals generated by the execution of an instruction
2270 * need to be delivered before any other pending signals
2271 * so that the instruction pointer in the signal stack
2272 * frame points to the faulting instruction.
2273 */
2274 signr = dequeue_synchronous_signal(&ksig->info);
2275 if (!signr)
2276 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
Tejun Heodd1d6772011-06-02 11:14:00 +02002278 if (!signr)
2279 break; /* will return 0 */
2280
Oleg Nesterov8a352412011-07-21 17:06:53 +02002281 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002282 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002284 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 }
2286
Tejun Heodd1d6772011-06-02 11:14:00 +02002287 ka = &sighand->action[signr-1];
2288
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002289 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002290 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002291
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2293 continue;
2294 if (ka->sa.sa_handler != SIG_DFL) {
2295 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002296 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298 if (ka->sa.sa_flags & SA_ONESHOT)
2299 ka->sa.sa_handler = SIG_DFL;
2300
2301 break; /* will return non-zero "signr" value */
2302 }
2303
2304 /*
2305 * Now we are doing the default action for this signal.
2306 */
2307 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2308 continue;
2309
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002310 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002311 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002312 * Container-init gets no signals it doesn't want from same
2313 * container.
2314 *
2315 * Note that if global/container-init sees a sig_kernel_only()
2316 * signal here, the signal must have been generated internally
2317 * or must have come from an ancestor namespace. In either
2318 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002319 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002320 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002321 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 continue;
2323
2324 if (sig_kernel_stop(signr)) {
2325 /*
2326 * The default action is to stop all threads in
2327 * the thread group. The job control signals
2328 * do nothing in an orphaned pgrp, but SIGSTOP
2329 * always works. Note that siglock needs to be
2330 * dropped during the call to is_orphaned_pgrp()
2331 * because of lock ordering with tasklist_lock.
2332 * This allows an intervening SIGCONT to be posted.
2333 * We need to check for that and bail out if necessary.
2334 */
2335 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002336 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
2338 /* signals can be posted during this window */
2339
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002340 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 goto relock;
2342
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002343 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 }
2345
Richard Weinberger828b1f62013-10-07 15:26:57 +02002346 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 /* It released the siglock. */
2348 goto relock;
2349 }
2350
2351 /*
2352 * We didn't actually stop, due to a race
2353 * with SIGCONT or something like that.
2354 */
2355 continue;
2356 }
2357
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002358 fatal:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002359 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360
2361 /*
2362 * Anything else is fatal, maybe with a core dump.
2363 */
2364 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002367 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002368 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002369 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 /*
2371 * If it was able to dump core, this kills all
2372 * other threads in the group and synchronizes with
2373 * their demise. If we lost the race with another
2374 * thread getting here, it set group_exit_code
2375 * first and our do_group_exit call below will use
2376 * that value and ignore the one we pass it.
2377 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002378 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 }
2380
2381 /*
2382 * Death signals, no core dump.
2383 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002384 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 /* NOTREACHED */
2386 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002387 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002388
2389 ksig->sig = signr;
2390 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391}
2392
Matt Fleming5e6292c2012-01-10 15:11:17 -08002393/**
Al Viroefee9842012-04-28 02:04:15 -04002394 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002395 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002396 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002397 *
Masanari Iidae2278672014-02-18 22:54:36 +09002398 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002399 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002400 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002401 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002402 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002403static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002404{
2405 sigset_t blocked;
2406
Al Viroa610d6e2012-05-21 23:42:15 -04002407 /* A signal was successfully delivered, and the
2408 saved sigmask was stored on the signal frame,
2409 and will be restored by sigreturn. So we can
2410 simply clear the restore sigmask flag. */
2411 clear_restore_sigmask();
2412
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002413 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2414 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2415 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002416 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002417 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002418}
2419
Al Viro2ce5da12012-11-07 15:11:25 -05002420void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2421{
2422 if (failed)
2423 force_sigsegv(ksig->sig, current);
2424 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002425 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002426}
2427
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002428/*
2429 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002430 * group-wide signal. Other threads should be notified now to take
2431 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002432 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002433static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002434{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002435 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002436 struct task_struct *t;
2437
Oleg Nesterovf646e222011-04-27 19:18:39 +02002438 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2439 if (sigisemptyset(&retarget))
2440 return;
2441
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002442 t = tsk;
2443 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002444 if (t->flags & PF_EXITING)
2445 continue;
2446
2447 if (!has_pending_signals(&retarget, &t->blocked))
2448 continue;
2449 /* Remove the signals this thread can handle. */
2450 sigandsets(&retarget, &retarget, &t->blocked);
2451
2452 if (!signal_pending(t))
2453 signal_wake_up(t, 0);
2454
2455 if (sigisemptyset(&retarget))
2456 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002457 }
2458}
2459
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002460void exit_signals(struct task_struct *tsk)
2461{
2462 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002463 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002464
Tejun Heo77e4ef92011-12-12 18:12:21 -08002465 /*
2466 * @tsk is about to have PF_EXITING set - lock out users which
2467 * expect stable threadgroup.
2468 */
2469 threadgroup_change_begin(tsk);
2470
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002471 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2472 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002473 threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002474 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002475 }
2476
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002477 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002478 /*
2479 * From now this task is not visible for group-wide signals,
2480 * see wants_signal(), do_signal_stop().
2481 */
2482 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002483
2484 threadgroup_change_end(tsk);
2485
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002486 if (!signal_pending(tsk))
2487 goto out;
2488
Oleg Nesterovf646e222011-04-27 19:18:39 +02002489 unblocked = tsk->blocked;
2490 signotset(&unblocked);
2491 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002492
Tejun Heoa8f072c2011-06-02 11:13:59 +02002493 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c19022011-03-23 10:37:00 +01002494 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002495 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002496out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002497 spin_unlock_irq(&tsk->sighand->siglock);
2498
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002499 /*
2500 * If group stop has completed, deliver the notification. This
2501 * should always go to the real parent of the group leader.
2502 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002503 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002504 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002505 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002506 read_unlock(&tasklist_lock);
2507 }
2508}
2509
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510EXPORT_SYMBOL(recalc_sigpending);
2511EXPORT_SYMBOL_GPL(dequeue_signal);
2512EXPORT_SYMBOL(flush_signals);
2513EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514EXPORT_SYMBOL(send_sig);
2515EXPORT_SYMBOL(send_sig_info);
2516EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
2518/*
2519 * System call entry points.
2520 */
2521
Randy Dunlap41c57892011-04-04 15:00:26 -07002522/**
2523 * sys_restart_syscall - restart a system call
2524 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002525SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002527 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 return restart->fn(restart);
2529}
2530
2531long do_no_restart_syscall(struct restart_block *param)
2532{
2533 return -EINTR;
2534}
2535
Oleg Nesterovb1828012011-04-27 21:56:14 +02002536static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2537{
2538 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2539 sigset_t newblocked;
2540 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002541 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002542 retarget_shared_pending(tsk, &newblocked);
2543 }
2544 tsk->blocked = *newset;
2545 recalc_sigpending();
2546}
2547
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002548/**
2549 * set_current_blocked - change current->blocked mask
2550 * @newset: new mask
2551 *
2552 * It is wrong to change ->blocked directly, this helper should be used
2553 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 */
Al Viro77097ae2012-04-27 13:58:59 -04002555void set_current_blocked(sigset_t *newset)
2556{
Al Viro77097ae2012-04-27 13:58:59 -04002557 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002558 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002559}
2560
2561void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002562{
2563 struct task_struct *tsk = current;
2564
Waiman Long20a30612016-12-14 15:04:10 -08002565 /*
2566 * In case the signal mask hasn't changed, there is nothing we need
2567 * to do. The current->blocked shouldn't be modified by other task.
2568 */
2569 if (sigequalsets(&tsk->blocked, newset))
2570 return;
2571
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002572 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002573 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002574 spin_unlock_irq(&tsk->sighand->siglock);
2575}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576
2577/*
2578 * This is also useful for kernel threads that want to temporarily
2579 * (or permanently) block certain signals.
2580 *
2581 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2582 * interface happily blocks "unblockable" signals like SIGKILL
2583 * and friends.
2584 */
2585int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2586{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002587 struct task_struct *tsk = current;
2588 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002590 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002591 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002592 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002593
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 switch (how) {
2595 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002596 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 break;
2598 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002599 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 break;
2601 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002602 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 break;
2604 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002605 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002607
Al Viro77097ae2012-04-27 13:58:59 -04002608 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002609 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610}
2611
Randy Dunlap41c57892011-04-04 15:00:26 -07002612/**
2613 * sys_rt_sigprocmask - change the list of currently blocked signals
2614 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002615 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002616 * @oset: previous value of signal mask if non-null
2617 * @sigsetsize: size of sigset_t type
2618 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002619SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002620 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002623 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
2625 /* XXX: Don't preclude handling different sized sigset_t's. */
2626 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002627 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002629 old_set = current->blocked;
2630
2631 if (nset) {
2632 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2633 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2635
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002636 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002638 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002640
2641 if (oset) {
2642 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2643 return -EFAULT;
2644 }
2645
2646 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647}
2648
Al Viro322a56c2012-12-25 13:32:58 -05002649#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002650COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2651 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652{
Al Viro322a56c2012-12-25 13:32:58 -05002653#ifdef __BIG_ENDIAN
2654 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655
Al Viro322a56c2012-12-25 13:32:58 -05002656 /* XXX: Don't preclude handling different sized sigset_t's. */
2657 if (sigsetsize != sizeof(sigset_t))
2658 return -EINVAL;
2659
2660 if (nset) {
2661 compat_sigset_t new32;
2662 sigset_t new_set;
2663 int error;
2664 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2665 return -EFAULT;
2666
2667 sigset_from_compat(&new_set, &new32);
2668 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2669
2670 error = sigprocmask(how, &new_set, NULL);
2671 if (error)
2672 return error;
2673 }
2674 if (oset) {
2675 compat_sigset_t old32;
2676 sigset_to_compat(&old32, &old_set);
Al Virodb61ec22013-03-02 20:39:15 -05002677 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
Al Viro322a56c2012-12-25 13:32:58 -05002678 return -EFAULT;
2679 }
2680 return 0;
2681#else
2682 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2683 (sigset_t __user *)oset, sigsetsize);
2684#endif
2685}
2686#endif
Al Viro322a56c2012-12-25 13:32:58 -05002687
Al Virofe9c1db2012-12-25 14:31:38 -05002688static int do_sigpending(void *set, unsigned long sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 if (sigsetsize > sizeof(sigset_t))
Al Virofe9c1db2012-12-25 14:31:38 -05002691 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692
2693 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002694 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 &current->signal->shared_pending.signal);
2696 spin_unlock_irq(&current->sighand->siglock);
2697
2698 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002699 sigandsets(set, &current->blocked, set);
2700 return 0;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002701}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702
Randy Dunlap41c57892011-04-04 15:00:26 -07002703/**
2704 * sys_rt_sigpending - examine a pending signal that has been raised
2705 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002706 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002707 * @sigsetsize: size of sigset_t type or larger
2708 */
Al Virofe9c1db2012-12-25 14:31:38 -05002709SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710{
Al Virofe9c1db2012-12-25 14:31:38 -05002711 sigset_t set;
2712 int err = do_sigpending(&set, sigsetsize);
2713 if (!err && copy_to_user(uset, &set, sigsetsize))
2714 err = -EFAULT;
2715 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716}
2717
Al Virofe9c1db2012-12-25 14:31:38 -05002718#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002719COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2720 compat_size_t, sigsetsize)
2721{
2722#ifdef __BIG_ENDIAN
2723 sigset_t set;
2724 int err = do_sigpending(&set, sigsetsize);
2725 if (!err) {
2726 compat_sigset_t set32;
2727 sigset_to_compat(&set32, &set);
2728 /* we can get here only if sigsetsize <= sizeof(set) */
2729 if (copy_to_user(uset, &set32, sigsetsize))
2730 err = -EFAULT;
2731 }
2732 return err;
2733#else
2734 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2735#endif
2736}
2737#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2740
Al Viroce395962013-10-13 17:23:53 -04002741int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742{
2743 int err;
2744
2745 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2746 return -EFAULT;
2747 if (from->si_code < 0)
2748 return __copy_to_user(to, from, sizeof(siginfo_t))
2749 ? -EFAULT : 0;
2750 /*
2751 * If you change siginfo_t structure, please be sure
2752 * this code is fixed accordingly.
Davide Libenzifba2afa2007-05-10 22:23:13 -07002753 * Please remember to update the signalfd_copyinfo() function
2754 * inside fs/signalfd.c too, in case siginfo_t changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 * It should never copy any pad contained in the structure
2756 * to avoid security leaks, but must copy the generic
2757 * 3 ints plus the relevant union member.
2758 */
2759 err = __put_user(from->si_signo, &to->si_signo);
2760 err |= __put_user(from->si_errno, &to->si_errno);
2761 err |= __put_user((short)from->si_code, &to->si_code);
2762 switch (from->si_code & __SI_MASK) {
2763 case __SI_KILL:
2764 err |= __put_user(from->si_pid, &to->si_pid);
2765 err |= __put_user(from->si_uid, &to->si_uid);
2766 break;
2767 case __SI_TIMER:
2768 err |= __put_user(from->si_tid, &to->si_tid);
2769 err |= __put_user(from->si_overrun, &to->si_overrun);
2770 err |= __put_user(from->si_ptr, &to->si_ptr);
2771 break;
2772 case __SI_POLL:
2773 err |= __put_user(from->si_band, &to->si_band);
2774 err |= __put_user(from->si_fd, &to->si_fd);
2775 break;
2776 case __SI_FAULT:
2777 err |= __put_user(from->si_addr, &to->si_addr);
2778#ifdef __ARCH_SI_TRAPNO
2779 err |= __put_user(from->si_trapno, &to->si_trapno);
2780#endif
Andi Kleena337fda2010-09-27 20:32:19 +02002781#ifdef BUS_MCEERR_AO
Randy Dunlap5aba0852011-04-04 14:59:31 -07002782 /*
Andi Kleena337fda2010-09-27 20:32:19 +02002783 * Other callers might not initialize the si_lsb field,
Randy Dunlap5aba0852011-04-04 14:59:31 -07002784 * so check explicitly for the right codes here.
Andi Kleena337fda2010-09-27 20:32:19 +02002785 */
Amanieu d'Antras26135022015-08-06 15:46:29 -07002786 if (from->si_signo == SIGBUS &&
2787 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
Andi Kleena337fda2010-09-27 20:32:19 +02002788 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2789#endif
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002790#ifdef SEGV_BNDERR
Amanieu d'Antras26135022015-08-06 15:46:29 -07002791 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2792 err |= __put_user(from->si_lower, &to->si_lower);
2793 err |= __put_user(from->si_upper, &to->si_upper);
2794 }
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002795#endif
Dave Hansencd0ea352016-02-12 13:02:12 -08002796#ifdef SEGV_PKUERR
2797 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2798 err |= __put_user(from->si_pkey, &to->si_pkey);
2799#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 break;
2801 case __SI_CHLD:
2802 err |= __put_user(from->si_pid, &to->si_pid);
2803 err |= __put_user(from->si_uid, &to->si_uid);
2804 err |= __put_user(from->si_status, &to->si_status);
2805 err |= __put_user(from->si_utime, &to->si_utime);
2806 err |= __put_user(from->si_stime, &to->si_stime);
2807 break;
2808 case __SI_RT: /* This is not generated by the kernel as of now. */
2809 case __SI_MESGQ: /* But this is */
2810 err |= __put_user(from->si_pid, &to->si_pid);
2811 err |= __put_user(from->si_uid, &to->si_uid);
2812 err |= __put_user(from->si_ptr, &to->si_ptr);
2813 break;
Will Drewrya0727e82012-04-12 16:48:00 -05002814#ifdef __ARCH_SIGSYS
2815 case __SI_SYS:
2816 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2817 err |= __put_user(from->si_syscall, &to->si_syscall);
2818 err |= __put_user(from->si_arch, &to->si_arch);
2819 break;
2820#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 default: /* this is just in case for now ... */
2822 err |= __put_user(from->si_pid, &to->si_pid);
2823 err |= __put_user(from->si_uid, &to->si_uid);
2824 break;
2825 }
2826 return err;
2827}
2828
2829#endif
2830
Randy Dunlap41c57892011-04-04 15:00:26 -07002831/**
Oleg Nesterov943df142011-04-27 21:44:14 +02002832 * do_sigtimedwait - wait for queued signals specified in @which
2833 * @which: queued signals to wait for
2834 * @info: if non-null, the signal's siginfo is returned here
2835 * @ts: upper bound on process time suspension
2836 */
2837int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002838 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02002839{
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002840 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
Oleg Nesterov943df142011-04-27 21:44:14 +02002841 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02002842 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002843 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02002844
2845 if (ts) {
2846 if (!timespec_valid(ts))
2847 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002848 timeout = timespec_to_ktime(*ts);
2849 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02002850 }
2851
2852 /*
2853 * Invert the set of allowed signals to get those we want to block.
2854 */
2855 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2856 signotset(&mask);
2857
2858 spin_lock_irq(&tsk->sighand->siglock);
2859 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002860 if (!sig && timeout.tv64) {
Oleg Nesterov943df142011-04-27 21:44:14 +02002861 /*
2862 * None ready, temporarily unblock those we're interested
2863 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02002864 * they arrive. Unblocking is always fine, we can avoid
2865 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02002866 */
2867 tsk->real_blocked = tsk->blocked;
2868 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2869 recalc_sigpending();
2870 spin_unlock_irq(&tsk->sighand->siglock);
2871
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002872 __set_current_state(TASK_INTERRUPTIBLE);
2873 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2874 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02002875 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002876 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07002877 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002878 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02002879 }
2880 spin_unlock_irq(&tsk->sighand->siglock);
2881
2882 if (sig)
2883 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002884 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02002885}
2886
2887/**
Randy Dunlap41c57892011-04-04 15:00:26 -07002888 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2889 * in @uthese
2890 * @uthese: queued signals to wait for
2891 * @uinfo: if non-null, the signal's siginfo is returned here
2892 * @uts: upper bound on process time suspension
2893 * @sigsetsize: size of sigset_t type
2894 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002895SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2896 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2897 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 sigset_t these;
2900 struct timespec ts;
2901 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02002902 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
2904 /* XXX: Don't preclude handling different sized sigset_t's. */
2905 if (sigsetsize != sizeof(sigset_t))
2906 return -EINVAL;
2907
2908 if (copy_from_user(&these, uthese, sizeof(these)))
2909 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002910
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 if (uts) {
2912 if (copy_from_user(&ts, uts, sizeof(ts)))
2913 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 }
2915
Oleg Nesterov943df142011-04-27 21:44:14 +02002916 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917
Oleg Nesterov943df142011-04-27 21:44:14 +02002918 if (ret > 0 && uinfo) {
2919 if (copy_siginfo_to_user(uinfo, &info))
2920 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 }
2922
2923 return ret;
2924}
2925
Christian Brauner6dc8e7c2018-11-19 00:51:56 +01002926static inline void prepare_kill_siginfo(int sig, struct siginfo *info)
2927{
2928 info->si_signo = sig;
2929 info->si_errno = 0;
2930 info->si_code = SI_USER;
2931 info->si_pid = task_tgid_vnr(current);
2932 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
2933}
2934
Randy Dunlap41c57892011-04-04 15:00:26 -07002935/**
2936 * sys_kill - send a signal to a process
2937 * @pid: the PID of the process
2938 * @sig: signal to be sent
2939 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002940SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941{
2942 struct siginfo info;
2943
Christian Brauner6dc8e7c2018-11-19 00:51:56 +01002944 prepare_kill_siginfo(sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
2946 return kill_something_info(sig, &info, pid);
2947}
2948
Christian Brauner6dc8e7c2018-11-19 00:51:56 +01002949#ifdef CONFIG_PROC_FS
2950/*
2951 * Verify that the signaler and signalee either are in the same pid namespace
2952 * or that the signaler's pid namespace is an ancestor of the signalee's pid
2953 * namespace.
2954 */
2955static bool access_pidfd_pidns(struct pid *pid)
2956{
2957 struct pid_namespace *active = task_active_pid_ns(current);
2958 struct pid_namespace *p = ns_of_pid(pid);
2959
2960 for (;;) {
2961 if (!p)
2962 return false;
2963 if (p == active)
2964 break;
2965 p = p->parent;
2966 }
2967
2968 return true;
2969}
2970
2971static int copy_siginfo_from_user_any(siginfo_t *kinfo, siginfo_t __user *info)
2972{
2973#ifdef CONFIG_COMPAT
2974 /*
2975 * Avoid hooking up compat syscalls and instead handle necessary
2976 * conversions here. Note, this is a stop-gap measure and should not be
2977 * considered a generic solution.
2978 */
2979 if (in_compat_syscall())
2980 return copy_siginfo_from_user32(
2981 kinfo, (struct compat_siginfo __user *)info);
2982#endif
2983 return copy_from_user(kinfo, info, sizeof(siginfo_t));
2984}
2985
2986/**
2987 * sys_pidfd_send_signal - send a signal to a process through a task file
2988 * descriptor
2989 * @pidfd: the file descriptor of the process
2990 * @sig: signal to be sent
2991 * @info: the signal info
2992 * @flags: future flags to be passed
2993 *
2994 * The syscall currently only signals via PIDTYPE_PID which covers
2995 * kill(<positive-pid>, <signal>. It does not signal threads or process
2996 * groups.
2997 * In order to extend the syscall to threads and process groups the @flags
2998 * argument should be used. In essence, the @flags argument will determine
2999 * what is signaled and not the file descriptor itself. Put in other words,
3000 * grouping is a property of the flags argument not a property of the file
3001 * descriptor.
3002 *
3003 * Return: 0 on success, negative errno on failure
3004 */
3005SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3006 siginfo_t __user *, info, unsigned int, flags)
3007{
3008 int ret;
3009 struct fd f;
3010 struct pid *pid;
3011 siginfo_t kinfo;
3012
3013 /* Enforce flags be set to 0 until we add an extension. */
3014 if (flags)
3015 return -EINVAL;
3016
Christian Braunerb063f4f2019-04-18 12:18:39 +02003017 f = fdget(pidfd);
Christian Brauner6dc8e7c2018-11-19 00:51:56 +01003018 if (!f.file)
3019 return -EBADF;
3020
3021 /* Is this a pidfd? */
3022 pid = tgid_pidfd_to_pid(f.file);
3023 if (IS_ERR(pid)) {
3024 ret = PTR_ERR(pid);
3025 goto err;
3026 }
3027
3028 ret = -EINVAL;
3029 if (!access_pidfd_pidns(pid))
3030 goto err;
3031
3032 if (info) {
3033 ret = copy_siginfo_from_user_any(&kinfo, info);
3034 if (unlikely(ret))
3035 goto err;
3036
3037 ret = -EINVAL;
3038 if (unlikely(sig != kinfo.si_signo))
3039 goto err;
3040
Jann Horn096ee7f2019-03-30 03:12:32 +01003041 /* Only allow sending arbitrary signals to yourself. */
3042 ret = -EPERM;
Christian Brauner6dc8e7c2018-11-19 00:51:56 +01003043 if ((task_pid(current) != pid) &&
Jann Horn096ee7f2019-03-30 03:12:32 +01003044 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3045 goto err;
Christian Brauner6dc8e7c2018-11-19 00:51:56 +01003046 } else {
3047 prepare_kill_siginfo(sig, &kinfo);
3048 }
3049
3050 ret = kill_pid_info(sig, &kinfo, pid);
3051
3052err:
3053 fdput(f);
3054 return ret;
3055}
3056#endif /* CONFIG_PROC_FS */
3057
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003058static int
3059do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003060{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003061 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003062 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003063
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003064 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003065 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003066 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003067 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003068 /*
3069 * The null signal is a permissions and process existence
3070 * probe. No signal is actually delivered.
3071 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003072 if (!error && sig) {
3073 error = do_send_sig_info(sig, info, p, false);
3074 /*
3075 * If lock_task_sighand() failed we pretend the task
3076 * dies after receiving the signal. The window is tiny,
3077 * and the signal is private anyway.
3078 */
3079 if (unlikely(error == -ESRCH))
3080 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003081 }
3082 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003083 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003084
3085 return error;
3086}
3087
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003088static int do_tkill(pid_t tgid, pid_t pid, int sig)
3089{
Emese Revfyb9e146d2013-04-17 15:58:36 -07003090 struct siginfo info = {};
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003091
3092 info.si_signo = sig;
3093 info.si_errno = 0;
3094 info.si_code = SI_TKILL;
3095 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003096 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003097
3098 return do_send_specific(tgid, pid, sig, &info);
3099}
3100
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101/**
3102 * sys_tgkill - send signal to one specific thread
3103 * @tgid: the thread group ID of the thread
3104 * @pid: the PID of the thread
3105 * @sig: signal to be sent
3106 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08003107 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 * exists but it's not belonging to the target process anymore. This
3109 * method solves the problem of threads exiting and PIDs getting reused.
3110 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003111SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 /* This is only valid for single tasks */
3114 if (pid <= 0 || tgid <= 0)
3115 return -EINVAL;
3116
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003117 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118}
3119
Randy Dunlap41c57892011-04-04 15:00:26 -07003120/**
3121 * sys_tkill - send signal to one specific task
3122 * @pid: the PID of the task
3123 * @sig: signal to be sent
3124 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3126 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003127SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 /* This is only valid for single tasks */
3130 if (pid <= 0)
3131 return -EINVAL;
3132
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003133 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134}
3135
Al Viro75907d42012-12-25 15:19:12 -05003136static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3137{
3138 /* Not even root can pretend to send signals from the kernel.
3139 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3140 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003141 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003142 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003143 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003144
Al Viro75907d42012-12-25 15:19:12 -05003145 info->si_signo = sig;
3146
3147 /* POSIX.1b doesn't mention process groups. */
3148 return kill_proc_info(sig, info, pid);
3149}
3150
Randy Dunlap41c57892011-04-04 15:00:26 -07003151/**
3152 * sys_rt_sigqueueinfo - send signal information to a signal
3153 * @pid: the PID of the thread
3154 * @sig: signal to be sent
3155 * @uinfo: signal info to be sent
3156 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003157SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3158 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159{
3160 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3162 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003163 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164}
3165
Al Viro75907d42012-12-25 15:19:12 -05003166#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003167COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3168 compat_pid_t, pid,
3169 int, sig,
3170 struct compat_siginfo __user *, uinfo)
3171{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003172 siginfo_t info = {};
Al Viro75907d42012-12-25 15:19:12 -05003173 int ret = copy_siginfo_from_user32(&info, uinfo);
3174 if (unlikely(ret))
3175 return ret;
3176 return do_rt_sigqueueinfo(pid, sig, &info);
3177}
3178#endif
Al Viro75907d42012-12-25 15:19:12 -05003179
Al Viro9aae8fc2012-12-24 23:12:04 -05003180static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003181{
3182 /* This is only valid for single tasks */
3183 if (pid <= 0 || tgid <= 0)
3184 return -EINVAL;
3185
3186 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003187 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3188 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003189 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3190 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003191 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003192
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003193 info->si_signo = sig;
3194
3195 return do_send_specific(tgid, pid, sig, info);
3196}
3197
3198SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3199 siginfo_t __user *, uinfo)
3200{
3201 siginfo_t info;
3202
3203 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3204 return -EFAULT;
3205
3206 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3207}
3208
Al Viro9aae8fc2012-12-24 23:12:04 -05003209#ifdef CONFIG_COMPAT
3210COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3211 compat_pid_t, tgid,
3212 compat_pid_t, pid,
3213 int, sig,
3214 struct compat_siginfo __user *, uinfo)
3215{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003216 siginfo_t info = {};
Al Viro9aae8fc2012-12-24 23:12:04 -05003217
3218 if (copy_siginfo_from_user32(&info, uinfo))
3219 return -EFAULT;
3220 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3221}
3222#endif
3223
Oleg Nesterov03417292014-06-06 14:36:53 -07003224/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003225 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003226 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003227void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003228{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003229 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003230 current->sighand->action[sig - 1].sa.sa_handler = action;
3231 if (action == SIG_IGN) {
3232 sigset_t mask;
3233
3234 sigemptyset(&mask);
3235 sigaddset(&mask, sig);
3236
3237 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3238 flush_sigqueue_mask(&mask, &current->pending);
3239 recalc_sigpending();
3240 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003241 spin_unlock_irq(&current->sighand->siglock);
3242}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003243EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003244
Dmitry Safonov68463512016-09-05 16:33:08 +03003245void __weak sigaction_compat_abi(struct k_sigaction *act,
3246 struct k_sigaction *oact)
3247{
3248}
3249
Oleg Nesterov88531f72006-03-28 16:11:24 -08003250int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003252 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08003254 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003256 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 return -EINVAL;
3258
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003259 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003261 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 if (oact)
3263 *oact = *k;
3264
Dmitry Safonov68463512016-09-05 16:33:08 +03003265 sigaction_compat_abi(act, oact);
3266
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003268 sigdelsetmask(&act->sa.sa_mask,
3269 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003270 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 /*
3272 * POSIX 3.3.1.3:
3273 * "Setting a signal action to SIG_IGN for a signal that is
3274 * pending shall cause the pending signal to be discarded,
3275 * whether or not it is blocked."
3276 *
3277 * "Setting a signal action to SIG_DFL for a signal that is
3278 * pending and whose default action is to ignore the signal
3279 * (for example, SIGCHLD), shall cause the pending signal to
3280 * be discarded, whether or not it is blocked"
3281 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003282 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08003283 sigemptyset(&mask);
3284 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003285 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3286 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003287 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 }
3290
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003291 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 return 0;
3293}
3294
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003295static int
Will Deacon1e7066a2018-09-05 15:34:42 +01003296do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp,
3297 size_t min_ss_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298{
3299 stack_t oss;
3300 int error;
3301
Linus Torvalds0083fc22009-08-01 10:34:56 -07003302 oss.ss_sp = (void __user *) current->sas_ss_sp;
3303 oss.ss_size = current->sas_ss_size;
Andy Lutomirski0318bc82016-05-03 10:31:51 -07003304 oss.ss_flags = sas_ss_flags(sp) |
3305 (current->sas_ss_flags & SS_FLAG_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306
3307 if (uss) {
3308 void __user *ss_sp;
3309 size_t ss_size;
Stas Sergeev407bc162016-04-14 23:20:03 +03003310 unsigned ss_flags;
3311 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312
3313 error = -EFAULT;
Linus Torvalds0dd84862009-08-01 11:18:56 -07003314 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3315 goto out;
3316 error = __get_user(ss_sp, &uss->ss_sp) |
3317 __get_user(ss_flags, &uss->ss_flags) |
3318 __get_user(ss_size, &uss->ss_size);
3319 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 goto out;
3321
3322 error = -EPERM;
3323 if (on_sig_stack(sp))
3324 goto out;
3325
Stas Sergeev407bc162016-04-14 23:20:03 +03003326 ss_mode = ss_flags & ~SS_FLAG_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 error = -EINVAL;
Stas Sergeev407bc162016-04-14 23:20:03 +03003328 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3329 ss_mode != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 goto out;
3331
Stas Sergeev407bc162016-04-14 23:20:03 +03003332 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 ss_size = 0;
3334 ss_sp = NULL;
3335 } else {
Will Deacon1e7066a2018-09-05 15:34:42 +01003336 if (unlikely(ss_size < min_ss_size))
3337 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338 }
3339
3340 current->sas_ss_sp = (unsigned long) ss_sp;
3341 current->sas_ss_size = ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +03003342 current->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 }
3344
Linus Torvalds0083fc22009-08-01 10:34:56 -07003345 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 if (uoss) {
3347 error = -EFAULT;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003348 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 goto out;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003350 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3351 __put_user(oss.ss_size, &uoss->ss_size) |
3352 __put_user(oss.ss_flags, &uoss->ss_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 }
3354
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355out:
3356 return error;
3357}
Al Viro6bf9adf2012-12-14 14:09:47 -05003358SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3359{
Will Deacon1e7066a2018-09-05 15:34:42 +01003360 return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
3361 MINSIGSTKSZ);
Al Viro6bf9adf2012-12-14 14:09:47 -05003362}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
Al Viro5c495742012-11-18 15:29:16 -05003364int restore_altstack(const stack_t __user *uss)
3365{
Will Deacon1e7066a2018-09-05 15:34:42 +01003366 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
3367 MINSIGSTKSZ);
Al Viro5c495742012-11-18 15:29:16 -05003368 /* squash all but EFAULT for now */
3369 return err == -EFAULT ? err : 0;
3370}
3371
Al Viroc40702c2012-11-20 14:24:26 -05003372int __save_altstack(stack_t __user *uss, unsigned long sp)
3373{
3374 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003375 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3376 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003377 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003378 if (err)
3379 return err;
3380 if (t->sas_ss_flags & SS_AUTODISARM)
3381 sas_ss_reset(t);
3382 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003383}
3384
Al Viro90268432012-12-14 14:47:53 -05003385#ifdef CONFIG_COMPAT
Al Viro90228fc2012-12-23 03:33:38 -05003386COMPAT_SYSCALL_DEFINE2(sigaltstack,
3387 const compat_stack_t __user *, uss_ptr,
3388 compat_stack_t __user *, uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003389{
3390 stack_t uss, uoss;
3391 int ret;
3392 mm_segment_t seg;
3393
3394 if (uss_ptr) {
3395 compat_stack_t uss32;
3396
3397 memset(&uss, 0, sizeof(stack_t));
3398 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3399 return -EFAULT;
3400 uss.ss_sp = compat_ptr(uss32.ss_sp);
3401 uss.ss_flags = uss32.ss_flags;
3402 uss.ss_size = uss32.ss_size;
3403 }
3404 seg = get_fs();
3405 set_fs(KERNEL_DS);
3406 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3407 (stack_t __force __user *) &uoss,
Will Deacon1e7066a2018-09-05 15:34:42 +01003408 compat_user_stack_pointer(),
3409 COMPAT_MINSIGSTKSZ);
Al Viro90268432012-12-14 14:47:53 -05003410 set_fs(seg);
3411 if (ret >= 0 && uoss_ptr) {
3412 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3413 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3414 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3415 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3416 ret = -EFAULT;
3417 }
3418 return ret;
3419}
3420
3421int compat_restore_altstack(const compat_stack_t __user *uss)
3422{
3423 int err = compat_sys_sigaltstack(uss, NULL);
3424 /* squash all but -EFAULT for now */
3425 return err == -EFAULT ? err : 0;
3426}
Al Viroc40702c2012-11-20 14:24:26 -05003427
3428int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3429{
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003430 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003431 struct task_struct *t = current;
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003432 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3433 &uss->ss_sp) |
3434 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003435 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003436 if (err)
3437 return err;
3438 if (t->sas_ss_flags & SS_AUTODISARM)
3439 sas_ss_reset(t);
3440 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003441}
Al Viro90268432012-12-14 14:47:53 -05003442#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443
3444#ifdef __ARCH_WANT_SYS_SIGPENDING
3445
Randy Dunlap41c57892011-04-04 15:00:26 -07003446/**
3447 * sys_sigpending - examine pending signals
3448 * @set: where mask of pending signal is returned
3449 */
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003450SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451{
Al Virofe9c1db2012-12-25 14:31:38 -05003452 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453}
3454
3455#endif
3456
3457#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003458/**
3459 * sys_sigprocmask - examine and change blocked signals
3460 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003461 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003462 * @oset: previous value of signal mask if non-null
3463 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003464 * Some platforms have their own version with special arguments;
3465 * others support only sys_rt_sigprocmask.
3466 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
Oleg Nesterovb013c392011-04-28 11:36:20 +02003468SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003469 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003472 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473
Oleg Nesterovb013c392011-04-28 11:36:20 +02003474 old_set = current->blocked.sig[0];
3475
3476 if (nset) {
3477 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3478 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003480 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003484 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 break;
3486 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003487 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 break;
3489 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003490 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003492 default:
3493 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494 }
3495
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003496 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003498
3499 if (oset) {
3500 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3501 return -EFAULT;
3502 }
3503
3504 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505}
3506#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3507
Al Viroeaca6ea2012-11-25 23:12:10 -05003508#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003509/**
3510 * sys_rt_sigaction - alter an action taken by a process
3511 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003512 * @act: new sigaction
3513 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003514 * @sigsetsize: size of sigset_t type
3515 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003516SYSCALL_DEFINE4(rt_sigaction, int, sig,
3517 const struct sigaction __user *, act,
3518 struct sigaction __user *, oact,
3519 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520{
3521 struct k_sigaction new_sa, old_sa;
3522 int ret = -EINVAL;
3523
3524 /* XXX: Don't preclude handling different sized sigset_t's. */
3525 if (sigsetsize != sizeof(sigset_t))
3526 goto out;
3527
3528 if (act) {
3529 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3530 return -EFAULT;
3531 }
3532
3533 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3534
3535 if (!ret && oact) {
3536 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3537 return -EFAULT;
3538 }
3539out:
3540 return ret;
3541}
Al Viro08d32fe2012-12-25 18:38:15 -05003542#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003543COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3544 const struct compat_sigaction __user *, act,
3545 struct compat_sigaction __user *, oact,
3546 compat_size_t, sigsetsize)
3547{
3548 struct k_sigaction new_ka, old_ka;
3549 compat_sigset_t mask;
3550#ifdef __ARCH_HAS_SA_RESTORER
3551 compat_uptr_t restorer;
3552#endif
3553 int ret;
3554
3555 /* XXX: Don't preclude handling different sized sigset_t's. */
3556 if (sigsetsize != sizeof(compat_sigset_t))
3557 return -EINVAL;
3558
3559 if (act) {
3560 compat_uptr_t handler;
3561 ret = get_user(handler, &act->sa_handler);
3562 new_ka.sa.sa_handler = compat_ptr(handler);
3563#ifdef __ARCH_HAS_SA_RESTORER
3564 ret |= get_user(restorer, &act->sa_restorer);
3565 new_ka.sa.sa_restorer = compat_ptr(restorer);
3566#endif
3567 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003568 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003569 if (ret)
3570 return -EFAULT;
3571 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3572 }
3573
3574 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3575 if (!ret && oact) {
3576 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3577 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3578 &oact->sa_handler);
3579 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003580 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003581#ifdef __ARCH_HAS_SA_RESTORER
3582 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3583 &oact->sa_restorer);
3584#endif
3585 }
3586 return ret;
3587}
3588#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003589#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590
Al Viro495dfbf2012-12-25 19:09:45 -05003591#ifdef CONFIG_OLD_SIGACTION
3592SYSCALL_DEFINE3(sigaction, int, sig,
3593 const struct old_sigaction __user *, act,
3594 struct old_sigaction __user *, oact)
3595{
3596 struct k_sigaction new_ka, old_ka;
3597 int ret;
3598
3599 if (act) {
3600 old_sigset_t mask;
3601 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3602 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3603 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3604 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3605 __get_user(mask, &act->sa_mask))
3606 return -EFAULT;
3607#ifdef __ARCH_HAS_KA_RESTORER
3608 new_ka.ka_restorer = NULL;
3609#endif
3610 siginitset(&new_ka.sa.sa_mask, mask);
3611 }
3612
3613 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3614
3615 if (!ret && oact) {
3616 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3617 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3618 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3619 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3620 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3621 return -EFAULT;
3622 }
3623
3624 return ret;
3625}
3626#endif
3627#ifdef CONFIG_COMPAT_OLD_SIGACTION
3628COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3629 const struct compat_old_sigaction __user *, act,
3630 struct compat_old_sigaction __user *, oact)
3631{
3632 struct k_sigaction new_ka, old_ka;
3633 int ret;
3634 compat_old_sigset_t mask;
3635 compat_uptr_t handler, restorer;
3636
3637 if (act) {
3638 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3639 __get_user(handler, &act->sa_handler) ||
3640 __get_user(restorer, &act->sa_restorer) ||
3641 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3642 __get_user(mask, &act->sa_mask))
3643 return -EFAULT;
3644
3645#ifdef __ARCH_HAS_KA_RESTORER
3646 new_ka.ka_restorer = NULL;
3647#endif
3648 new_ka.sa.sa_handler = compat_ptr(handler);
3649 new_ka.sa.sa_restorer = compat_ptr(restorer);
3650 siginitset(&new_ka.sa.sa_mask, mask);
3651 }
3652
3653 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3654
3655 if (!ret && oact) {
3656 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3657 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3658 &oact->sa_handler) ||
3659 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3660 &oact->sa_restorer) ||
3661 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3662 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3663 return -EFAULT;
3664 }
3665 return ret;
3666}
3667#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668
Fabian Frederickf6187762014-06-04 16:11:12 -07003669#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670
3671/*
3672 * For backwards compatibility. Functionality superseded by sigprocmask.
3673 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003674SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675{
3676 /* SMP safe */
3677 return current->blocked.sig[0];
3678}
3679
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003680SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003682 int old = current->blocked.sig[0];
3683 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003685 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003686 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687
3688 return old;
3689}
Fabian Frederickf6187762014-06-04 16:11:12 -07003690#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691
3692#ifdef __ARCH_WANT_SYS_SIGNAL
3693/*
3694 * For backwards compatibility. Functionality superseded by sigaction.
3695 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003696SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697{
3698 struct k_sigaction new_sa, old_sa;
3699 int ret;
3700
3701 new_sa.sa.sa_handler = handler;
3702 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003703 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
3705 ret = do_sigaction(sig, &new_sa, &old_sa);
3706
3707 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3708}
3709#endif /* __ARCH_WANT_SYS_SIGNAL */
3710
3711#ifdef __ARCH_WANT_SYS_PAUSE
3712
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003713SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003715 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003716 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003717 schedule();
3718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 return -ERESTARTNOHAND;
3720}
3721
3722#endif
3723
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003724static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003725{
Al Viro68f3f162012-05-21 21:42:32 -04003726 current->saved_sigmask = current->blocked;
3727 set_current_blocked(set);
3728
Sasha Levin823dd322016-02-05 15:36:05 -08003729 while (!signal_pending(current)) {
3730 __set_current_state(TASK_INTERRUPTIBLE);
3731 schedule();
3732 }
Al Viro68f3f162012-05-21 21:42:32 -04003733 set_restore_sigmask();
3734 return -ERESTARTNOHAND;
3735}
Al Viro68f3f162012-05-21 21:42:32 -04003736
Randy Dunlap41c57892011-04-04 15:00:26 -07003737/**
3738 * sys_rt_sigsuspend - replace the signal mask for a value with the
3739 * @unewset value until a signal is received
3740 * @unewset: new signal mask value
3741 * @sigsetsize: size of sigset_t type
3742 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003743SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003744{
3745 sigset_t newset;
3746
3747 /* XXX: Don't preclude handling different sized sigset_t's. */
3748 if (sigsetsize != sizeof(sigset_t))
3749 return -EINVAL;
3750
3751 if (copy_from_user(&newset, unewset, sizeof(newset)))
3752 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003753 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003754}
Al Viroad4b65a2012-12-24 21:43:56 -05003755
3756#ifdef CONFIG_COMPAT
3757COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3758{
3759#ifdef __BIG_ENDIAN
3760 sigset_t newset;
3761 compat_sigset_t newset32;
3762
3763 /* XXX: Don't preclude handling different sized sigset_t's. */
3764 if (sigsetsize != sizeof(sigset_t))
3765 return -EINVAL;
3766
3767 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3768 return -EFAULT;
3769 sigset_from_compat(&newset, &newset32);
3770 return sigsuspend(&newset);
3771#else
3772 /* on little-endian bitmaps don't care about granularity */
3773 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3774#endif
3775}
3776#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003777
Al Viro0a0e8cd2012-12-25 16:04:12 -05003778#ifdef CONFIG_OLD_SIGSUSPEND
3779SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3780{
3781 sigset_t blocked;
3782 siginitset(&blocked, mask);
3783 return sigsuspend(&blocked);
3784}
3785#endif
3786#ifdef CONFIG_OLD_SIGSUSPEND3
3787SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3788{
3789 sigset_t blocked;
3790 siginitset(&blocked, mask);
3791 return sigsuspend(&blocked);
3792}
3793#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07003795__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003796{
3797 return NULL;
3798}
3799
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800void __init signals_init(void)
3801{
Helge Deller41b27152016-03-22 14:27:54 -07003802 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3803 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3804 != offsetof(struct siginfo, _sifields._pad));
3805
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003806 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003808
3809#ifdef CONFIG_KGDB_KDB
3810#include <linux/kdb.h>
3811/*
3812 * kdb_send_sig_info - Allows kdb to send signals without exposing
3813 * signal internals. This function checks if the required locks are
3814 * available before calling the main signal code, to avoid kdb
3815 * deadlocks.
3816 */
3817void
3818kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3819{
3820 static struct task_struct *kdb_prev_t;
3821 int sig, new_t;
3822 if (!spin_trylock(&t->sighand->siglock)) {
3823 kdb_printf("Can't do kill command now.\n"
3824 "The sigmask lock is held somewhere else in "
3825 "kernel, try again later\n");
3826 return;
3827 }
3828 spin_unlock(&t->sighand->siglock);
3829 new_t = kdb_prev_t != t;
3830 kdb_prev_t = t;
3831 if (t->state != TASK_RUNNING && new_t) {
3832 kdb_printf("Process is not RUNNING, sending a signal from "
3833 "kdb risks deadlock\n"
3834 "on the run queue locks. "
3835 "The signal has _not_ been sent.\n"
3836 "Reissue the kill command if you want to risk "
3837 "the deadlock.\n");
3838 return;
3839 }
3840 sig = info->si_signo;
3841 if (send_sig_info(sig, info, t))
3842 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3843 sig, t->pid);
3844 else
3845 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3846}
3847#endif /* CONFIG_KGDB_KDB */