blob: 30914b3c76b21d3e7dd04dc7f5889463d4a99da2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070020#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/security.h>
22#include <linux/syscalls.h>
23#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070024#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070025#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090026#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070027#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080028#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080029#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080030#include <linux/pid_namespace.h>
31#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080032#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053033#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050034#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000035#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070036#include <linux/compiler.h>
37
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050038#define CREATE_TRACE_POINTS
39#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/param.h>
42#include <asm/uaccess.h>
43#include <asm/unistd.h>
44#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010045#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040046#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/*
49 * SLAB caches for signal bits.
50 */
51
Christoph Lametere18b8902006-12-06 20:33:20 -080052static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090054int print_fatal_signals __read_mostly;
55
Roland McGrath35de2542008-07-25 19:45:51 -070056static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070057{
Roland McGrath35de2542008-07-25 19:45:51 -070058 return t->sighand->action[sig - 1].sa.sa_handler;
59}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070060
Roland McGrath35de2542008-07-25 19:45:51 -070061static int sig_handler_ignored(void __user *handler, int sig)
62{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070063 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070064 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
66}
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070068static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Roland McGrath35de2542008-07-25 19:45:51 -070070 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Oleg Nesterovf008faf2009-04-02 16:58:02 -070072 handler = sig_handler(t, sig);
73
Eric W. Biederman1f7d8a22018-07-19 19:47:27 -050074 /* SIGKILL and SIGSTOP may not be sent to the global init */
75 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
76 return true;
77
Oleg Nesterovf008faf2009-04-02 16:58:02 -070078 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterov794ac8e2017-11-17 15:30:04 -080079 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Oleg Nesterovf008faf2009-04-02 16:58:02 -070080 return 1;
81
82 return sig_handler_ignored(handler, sig);
83}
84
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070085static int sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070086{
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /*
88 * Blocked signals are never ignored, since the
89 * signal handler may change by the time it is
90 * unblocked.
91 */
Roland McGrath325d22d2007-11-12 15:41:55 -080092 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 return 0;
94
Oleg Nesterov1453b3a2017-11-17 15:30:01 -080095 /*
96 * Tracers may want to know about even ignored signal unless it
97 * is SIGKILL which can't be reported anyway but can be ignored
98 * by SIGNAL_UNKILLABLE task.
99 */
100 if (t->ptrace && sig != SIGKILL)
Roland McGrath35de2542008-07-25 19:45:51 -0700101 return 0;
102
Oleg Nesterov1453b3a2017-11-17 15:30:01 -0800103 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105
106/*
107 * Re-calculate pending state from the set of locally pending
108 * signals, globally pending signals, and blocked signals.
109 */
110static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
111{
112 unsigned long ready;
113 long i;
114
115 switch (_NSIG_WORDS) {
116 default:
117 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
118 ready |= signal->sig[i] &~ blocked->sig[i];
119 break;
120
121 case 4: ready = signal->sig[3] &~ blocked->sig[3];
122 ready |= signal->sig[2] &~ blocked->sig[2];
123 ready |= signal->sig[1] &~ blocked->sig[1];
124 ready |= signal->sig[0] &~ blocked->sig[0];
125 break;
126
127 case 2: ready = signal->sig[1] &~ blocked->sig[1];
128 ready |= signal->sig[0] &~ blocked->sig[0];
129 break;
130
131 case 1: ready = signal->sig[0] &~ blocked->sig[0];
132 }
133 return ready != 0;
134}
135
136#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
137
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700138static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200140 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700142 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700144 return 1;
145 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700146 /*
147 * We must never clear the flag in another thread, or in current
148 * when it's possible the current syscall is returning -ERESTART*.
149 * So we don't clear it here, and only callers who know they should do.
150 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700151 return 0;
152}
153
154/*
155 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
156 * This is superfluous when called on current, the wakeup is a harmless no-op.
157 */
158void recalc_sigpending_and_wake(struct task_struct *t)
159{
160 if (recalc_sigpending_tsk(t))
161 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
164void recalc_sigpending(void)
165{
Tejun Heodd1d6772011-06-02 11:14:00 +0200166 if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700167 clear_thread_flag(TIF_SIGPENDING);
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
171/* Given the mask, find the first available signal that should be serviced. */
172
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800173#define SYNCHRONOUS_MASK \
174 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500175 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800176
Davide Libenzifba2afa2007-05-10 22:23:13 -0700177int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 unsigned long i, *s, *m, x;
180 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 s = pending->signal.sig;
183 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800184
185 /*
186 * Handle the first word specially: it contains the
187 * synchronous signals that need to be dequeued first.
188 */
189 x = *s &~ *m;
190 if (x) {
191 if (x & SYNCHRONOUS_MASK)
192 x &= SYNCHRONOUS_MASK;
193 sig = ffz(~x) + 1;
194 return sig;
195 }
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 switch (_NSIG_WORDS) {
198 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800199 for (i = 1; i < _NSIG_WORDS; ++i) {
200 x = *++s &~ *++m;
201 if (!x)
202 continue;
203 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 break;
207
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800208 case 2:
209 x = s[1] &~ m[1];
210 if (!x)
211 break;
212 sig = ffz(~x) + _NSIG_BPW + 1;
213 break;
214
215 case 1:
216 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 break;
218 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 return sig;
221}
222
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900223static inline void print_dropped_signal(int sig)
224{
225 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
226
227 if (!print_fatal_signals)
228 return;
229
230 if (!__ratelimit(&ratelimit_state))
231 return;
232
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700233 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900234 current->comm, current->pid, sig);
235}
236
Tejun Heoe5c19022011-03-23 10:37:00 +0100237/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200238 * task_set_jobctl_pending - set jobctl pending bits
239 * @task: target task
240 * @mask: pending bits to set
241 *
242 * Clear @mask from @task->jobctl. @mask must be subset of
243 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
244 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
245 * cleared. If @task is already being killed or exiting, this function
246 * becomes noop.
247 *
248 * CONTEXT:
249 * Must be called with @task->sighand->siglock held.
250 *
251 * RETURNS:
252 * %true if @mask is set, %false if made noop because @task was dying.
253 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700254bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200255{
256 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
257 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
258 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
259
260 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
261 return false;
262
263 if (mask & JOBCTL_STOP_SIGMASK)
264 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
265
266 task->jobctl |= mask;
267 return true;
268}
269
270/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200271 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100272 * @task: target task
273 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200274 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
275 * Clear it and wake up the ptracer. Note that we don't need any further
276 * locking. @task->siglock guarantees that @task->parent points to the
277 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100278 *
279 * CONTEXT:
280 * Must be called with @task->sighand->siglock held.
281 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200282void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100283{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200284 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
285 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700286 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200287 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100288 }
289}
290
291/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200292 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c19022011-03-23 10:37:00 +0100293 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200294 * @mask: pending bits to clear
Tejun Heoe5c19022011-03-23 10:37:00 +0100295 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200296 * Clear @mask from @task->jobctl. @mask must be subset of
297 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
298 * STOP bits are cleared together.
Tejun Heoe5c19022011-03-23 10:37:00 +0100299 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200300 * If clearing of @mask leaves no stop or trap pending, this function calls
301 * task_clear_jobctl_trapping().
Tejun Heoe5c19022011-03-23 10:37:00 +0100302 *
303 * CONTEXT:
304 * Must be called with @task->sighand->siglock held.
305 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700306void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c19022011-03-23 10:37:00 +0100307{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200308 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
309
310 if (mask & JOBCTL_STOP_PENDING)
311 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
312
313 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200314
315 if (!(task->jobctl & JOBCTL_PENDING_MASK))
316 task_clear_jobctl_trapping(task);
Tejun Heoe5c19022011-03-23 10:37:00 +0100317}
318
319/**
320 * task_participate_group_stop - participate in a group stop
321 * @task: task participating in a group stop
322 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200323 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100324 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200325 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100326 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c19022011-03-23 10:37:00 +0100327 *
328 * CONTEXT:
329 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100330 *
331 * RETURNS:
332 * %true if group stop completion should be notified to the parent, %false
333 * otherwise.
Tejun Heoe5c19022011-03-23 10:37:00 +0100334 */
335static bool task_participate_group_stop(struct task_struct *task)
336{
337 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200338 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c19022011-03-23 10:37:00 +0100339
Tejun Heoa8f072c2011-06-02 11:13:59 +0200340 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100341
Tejun Heo3759a0d2011-06-02 11:14:00 +0200342 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c19022011-03-23 10:37:00 +0100343
344 if (!consume)
345 return false;
346
347 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
348 sig->group_stop_count--;
349
Tejun Heo244056f2011-03-23 10:37:01 +0100350 /*
351 * Tell the caller to notify completion iff we are entering into a
352 * fresh group stop. Read comment in do_signal_stop() for details.
353 */
354 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles916a05b2017-01-10 16:57:54 -0800355 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c19022011-03-23 10:37:00 +0100356 return true;
357 }
358 return false;
359}
360
David Howellsc69e8d92008-11-14 10:39:19 +1100361/*
362 * allocate a new signal queue record
363 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700364 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100365 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900366static struct sigqueue *
367__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800370 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800372 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000373 * Protect access to @t credentials. This can go away when all
374 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800375 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000376 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100377 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800378 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000379 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800382 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800383 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900385 } else {
386 print_dropped_signal(sig);
387 }
388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800390 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100391 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 } else {
393 INIT_LIST_HEAD(&q->list);
394 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100395 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
David Howellsd84f4f92008-11-14 10:39:23 +1100397
398 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400
Andrew Morton514a01b2006-02-03 03:04:41 -0800401static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
403 if (q->flags & SIGQUEUE_PREALLOC)
404 return;
405 atomic_dec(&q->user->sigpending);
406 free_uid(q->user);
407 kmem_cache_free(sigqueue_cachep, q);
408}
409
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800410void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411{
412 struct sigqueue *q;
413
414 sigemptyset(&queue->signal);
415 while (!list_empty(&queue->list)) {
416 q = list_entry(queue->list.next, struct sigqueue , list);
417 list_del_init(&q->list);
418 __sigqueue_free(q);
419 }
420}
421
422/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400423 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800425void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 unsigned long flags;
428
429 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400430 clear_tsk_thread_flag(t, TIF_SIGPENDING);
431 flush_sigqueue(&t->pending);
432 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 spin_unlock_irqrestore(&t->sighand->siglock, flags);
434}
435
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400436static void __flush_itimer_signals(struct sigpending *pending)
437{
438 sigset_t signal, retain;
439 struct sigqueue *q, *n;
440
441 signal = pending->signal;
442 sigemptyset(&retain);
443
444 list_for_each_entry_safe(q, n, &pending->list, list) {
445 int sig = q->info.si_signo;
446
447 if (likely(q->info.si_code != SI_TIMER)) {
448 sigaddset(&retain, sig);
449 } else {
450 sigdelset(&signal, sig);
451 list_del_init(&q->list);
452 __sigqueue_free(q);
453 }
454 }
455
456 sigorsets(&pending->signal, &signal, &retain);
457}
458
459void flush_itimer_signals(void)
460{
461 struct task_struct *tsk = current;
462 unsigned long flags;
463
464 spin_lock_irqsave(&tsk->sighand->siglock, flags);
465 __flush_itimer_signals(&tsk->pending);
466 __flush_itimer_signals(&tsk->signal->shared_pending);
467 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
468}
469
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700470void ignore_signals(struct task_struct *t)
471{
472 int i;
473
474 for (i = 0; i < _NSIG; ++i)
475 t->sighand->action[i].sa.sa_handler = SIG_IGN;
476
477 flush_signals(t);
478}
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 * Flush all handlers for a task.
482 */
483
484void
485flush_signal_handlers(struct task_struct *t, int force_default)
486{
487 int i;
488 struct k_sigaction *ka = &t->sighand->action[0];
489 for (i = _NSIG ; i != 0 ; i--) {
490 if (force_default || ka->sa.sa_handler != SIG_IGN)
491 ka->sa.sa_handler = SIG_DFL;
492 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700493#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700494 ka->sa.sa_restorer = NULL;
495#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 sigemptyset(&ka->sa.sa_mask);
497 ka++;
498 }
499}
500
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200501int unhandled_signal(struct task_struct *tsk, int sig)
502{
Roland McGrath445a91d2008-07-25 19:45:52 -0700503 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700504 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200505 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700506 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200507 return 0;
Tejun Heoa288eec2011-06-17 16:50:37 +0200508 /* if ptraced, let the tracer determine */
509 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200510}
511
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500512static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
513 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514{
515 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 /*
518 * Collect the siginfo appropriate to this signal. Check if
519 * there is another siginfo for the same signal.
520 */
521 list_for_each_entry(q, &list->list, list) {
522 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700523 if (first)
524 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 first = q;
526 }
527 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700528
529 sigdelset(&list->signal, sig);
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700532still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 list_del_init(&first->list);
534 copy_siginfo(info, &first->info);
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500535
536 *resched_timer =
537 (first->flags & SIGQUEUE_PREALLOC) &&
538 (info->si_code == SI_TIMER) &&
539 (info->si_sys_private);
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700543 /*
544 * Ok, it wasn't in the queue. This must be
545 * a fast-pathed signal or we must have been
546 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 info->si_signo = sig;
549 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800550 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 info->si_pid = 0;
552 info->si_uid = 0;
553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554}
555
556static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500557 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
Roland McGrath27d91e02006-09-29 02:00:31 -0700559 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800561 if (sig)
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500562 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 return sig;
564}
565
566/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700567 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 * expected to free it.
569 *
570 * All callers have to hold the siglock.
571 */
572int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
573{
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500574 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700575 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000576
577 /* We only dequeue private signals from ourselves, we don't let
578 * signalfd steal them
579 */
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500580 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800581 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500583 mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800584 /*
585 * itimer signal ?
586 *
587 * itimers are process shared and we restart periodic
588 * itimers in the signal delivery path to prevent DoS
589 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700590 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800591 * itimers, as the SIGALRM is a legacy signal and only
592 * queued once. Changing the restart behaviour to
593 * restart the timer in the signal dequeue path is
594 * reducing the timer noise on heavy loaded !highres
595 * systems too.
596 */
597 if (unlikely(signr == SIGALRM)) {
598 struct hrtimer *tmr = &tsk->signal->real_timer;
599
600 if (!hrtimer_is_queued(tmr) &&
601 tsk->signal->it_real_incr.tv64 != 0) {
602 hrtimer_forward(tmr, tmr->base->get_time(),
603 tsk->signal->it_real_incr);
604 hrtimer_restart(tmr);
605 }
606 }
607 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700608
Davide Libenzib8fceee2007-09-20 12:40:16 -0700609 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700610 if (!signr)
611 return 0;
612
613 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800614 /*
615 * Set a marker that we have dequeued a stop signal. Our
616 * caller might release the siglock and then the pending
617 * stop signal it is about to process is no longer in the
618 * pending bitmasks, but must still be cleared by a SIGCONT
619 * (and overruled by a SIGKILL). So those cases clear this
620 * shared flag after we've set it. Note that this flag may
621 * remain set after the signal we return is ignored or
622 * handled. That doesn't matter because its only purpose
623 * is to alert stop-signal processing code when another
624 * processor has come along and cleared the flag.
625 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200626 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800627 }
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500628 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 /*
630 * Release the siglock to ensure proper locking order
631 * of timer locks outside of siglocks. Note, we leave
632 * irqs disabled here, since the posix-timers code is
633 * about to disable them again anyway.
634 */
635 spin_unlock(&tsk->sighand->siglock);
636 do_schedule_next_timer(info);
637 spin_lock(&tsk->sighand->siglock);
638 }
639 return signr;
640}
641
642/*
643 * Tell a process that it has a new active signal..
644 *
645 * NOTE! we rely on the previous spin_lock to
646 * lock interrupts for us! We can only be called with
647 * "siglock" held, and the local interrupt must
648 * have been disabled when that got acquired!
649 *
650 * No need to set need_resched since signal event passing
651 * goes through ->blocked
652 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100653void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100657 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500658 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 * executing another processor and just now entering stopped state.
660 * By using wake_up_state, we ensure the process will wake up and
661 * handle its death signal.
662 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100663 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 kick_process(t);
665}
666
667/*
668 * Remove signals in mask from the pending set and queue.
669 * Returns 1 if any signals were found.
670 *
671 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800672 */
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700673static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd52006-01-08 01:02:48 -0800674{
675 struct sigqueue *q, *n;
676 sigset_t m;
677
678 sigandsets(&m, mask, &s->signal);
679 if (sigisemptyset(&m))
680 return 0;
681
Oleg Nesterov702a5072011-04-27 22:01:27 +0200682 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd52006-01-08 01:02:48 -0800683 list_for_each_entry_safe(q, n, &s->list, list) {
684 if (sigismember(mask, q->info.si_signo)) {
685 list_del_init(&q->list);
686 __sigqueue_free(q);
687 }
688 }
689 return 1;
690}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Oleg Nesterov614c5172009-12-15 16:47:22 -0800692static inline int is_si_special(const struct siginfo *info)
693{
694 return info <= SEND_SIG_FORCED;
695}
696
697static inline bool si_fromuser(const struct siginfo *info)
698{
699 return info == SEND_SIG_NOINFO ||
700 (!is_si_special(info) && SI_FROMUSER(info));
701}
702
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -0600703static int dequeue_synchronous_signal(siginfo_t *info)
704{
705 struct task_struct *tsk = current;
706 struct sigpending *pending = &tsk->pending;
707 struct sigqueue *q, *sync = NULL;
708
709 /*
710 * Might a synchronous signal be in the queue?
711 */
712 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
713 return 0;
714
715 /*
716 * Return the first synchronous signal in the queue.
717 */
718 list_for_each_entry(q, &pending->list, list) {
719 /* Synchronous signals have a postive si_code */
720 if ((q->info.si_code > SI_USER) &&
721 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
722 sync = q;
723 goto next;
724 }
725 }
726 return 0;
727next:
728 /*
729 * Check if there is another siginfo for the same signal.
730 */
731 list_for_each_entry_continue(q, &pending->list, list) {
732 if (q->info.si_signo == sync->info.si_signo)
733 goto still_pending;
734 }
735
736 sigdelset(&pending->signal, sync->info.si_signo);
737 recalc_sigpending();
738still_pending:
739 list_del_init(&sync->list);
740 copy_siginfo(info, &sync->info);
741 __sigqueue_free(sync);
742 return info->si_signo;
743}
744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700746 * called with RCU read lock from check_kill_permission()
747 */
748static int kill_ok_by_cred(struct task_struct *t)
749{
750 const struct cred *cred = current_cred();
751 const struct cred *tcred = __task_cred(t);
752
Eric W. Biederman5af66202012-03-03 20:21:47 -0800753 if (uid_eq(cred->euid, tcred->suid) ||
754 uid_eq(cred->euid, tcred->uid) ||
755 uid_eq(cred->uid, tcred->suid) ||
756 uid_eq(cred->uid, tcred->uid))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700757 return 1;
758
Eric W. Biedermanc4a4d602011-11-16 23:15:31 -0800759 if (ns_capable(tcred->user_ns, CAP_KILL))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700760 return 1;
761
762 return 0;
763}
764
765/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100767 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 */
769static int check_kill_permission(int sig, struct siginfo *info,
770 struct task_struct *t)
771{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700772 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700773 int error;
774
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700775 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700776 return -EINVAL;
777
Oleg Nesterov614c5172009-12-15 16:47:22 -0800778 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700779 return 0;
780
781 error = audit_signal_info(sig, t); /* Let audit system see the signal */
782 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400784
Oleg Nesterov065add32010-05-26 14:42:54 -0700785 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700786 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700787 switch (sig) {
788 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700789 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700790 /*
791 * We don't return the error if sid == NULL. The
792 * task was unhashed, the caller must notice this.
793 */
794 if (!sid || sid == task_session(current))
795 break;
796 default:
797 return -EPERM;
798 }
799 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100800
Amy Griffise54dc242007-03-29 18:01:04 -0400801 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
803
Tejun Heofb1d9102011-06-14 11:20:17 +0200804/**
805 * ptrace_trap_notify - schedule trap to notify ptracer
806 * @t: tracee wanting to notify tracer
807 *
808 * This function schedules sticky ptrace trap which is cleared on the next
809 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
810 * ptracer.
811 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200812 * If @t is running, STOP trap will be taken. If trapped for STOP and
813 * ptracer is listening for events, tracee is woken up so that it can
814 * re-trap for the new event. If trapped otherwise, STOP trap will be
815 * eventually taken without returning to userland after the existing traps
816 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200817 *
818 * CONTEXT:
819 * Must be called with @task->sighand->siglock held.
820 */
821static void ptrace_trap_notify(struct task_struct *t)
822{
823 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
824 assert_spin_locked(&t->sighand->siglock);
825
826 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100827 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200828}
829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700831 * Handle magic process-wide effects of stop/continue signals. Unlike
832 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 * time regardless of blocking, ignoring, or handling. This does the
834 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700835 * signals. The process stop is done as a signal action for SIG_DFL.
836 *
837 * Returns true if the signal should be actually delivered, otherwise
838 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700840static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700842 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700844 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Oleg Nesterov403bad72013-04-30 15:28:10 -0700846 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800847 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700848 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700850 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700852 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 /*
854 * This is a stop signal. Remove SIGCONT from all queues.
855 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700856 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700857 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700858 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700859 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700861 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200863 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700865 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700866 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700867 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700868 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200869 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200870 if (likely(!(t->ptrace & PT_SEIZED)))
871 wake_up_state(t, __TASK_STOPPED);
872 else
873 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700876 /*
877 * Notify the parent with CLD_CONTINUED if we were stopped.
878 *
879 * If we were in the middle of a group stop, we pretend it
880 * was already finished, and then continued. Since SIGCHLD
881 * doesn't queue we report only CLD_STOPPED, as if the next
882 * CLD_CONTINUED was dropped.
883 */
884 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700885 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700886 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700887 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700888 why |= SIGNAL_CLD_STOPPED;
889
890 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700891 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700892 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700893 * will take ->siglock, notice SIGNAL_CLD_MASK, and
894 * notify its parent. See get_signal_to_deliver().
895 */
Jamie Iles916a05b2017-01-10 16:57:54 -0800896 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700897 signal->group_stop_count = 0;
898 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700901
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700902 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903}
904
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700905/*
906 * Test if P wants to take SIG. After we've checked all threads with this,
907 * it's equivalent to finding no threads not blocking SIG. Any threads not
908 * blocking SIG were ruled out because they are not running and already
909 * have pending signals. Such threads will dequeue from the shared queue
910 * as soon as they're available, so putting the signal on the shared queue
911 * will be equivalent to sending it to one such thread.
912 */
913static inline int wants_signal(int sig, struct task_struct *p)
914{
915 if (sigismember(&p->blocked, sig))
916 return 0;
917 if (p->flags & PF_EXITING)
918 return 0;
919 if (sig == SIGKILL)
920 return 1;
921 if (task_is_stopped_or_traced(p))
922 return 0;
923 return task_curr(p) || !signal_pending(p);
924}
925
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700926static void complete_signal(int sig, struct task_struct *p, int group)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700927{
928 struct signal_struct *signal = p->signal;
929 struct task_struct *t;
930
931 /*
932 * Now find a thread we can wake up to take the signal off the queue.
933 *
934 * If the main thread wants the signal, it gets first crack.
935 * Probably the least surprising to the average bear.
936 */
937 if (wants_signal(sig, p))
938 t = p;
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700939 else if (!group || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700940 /*
941 * There is just one thread and it does not need to be woken.
942 * It will dequeue unblocked signals before it runs again.
943 */
944 return;
945 else {
946 /*
947 * Otherwise try to find a suitable thread.
948 */
949 t = signal->curr_target;
950 while (!wants_signal(sig, t)) {
951 t = next_thread(t);
952 if (t == signal->curr_target)
953 /*
954 * No thread needs to be woken.
955 * Any eligible threads will see
956 * the signal in the queue soon.
957 */
958 return;
959 }
960 signal->curr_target = t;
961 }
962
963 /*
964 * Found a killable thread. If the signal will be fatal,
965 * then start taking the whole group down immediately.
966 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700967 if (sig_fatal(p, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800968 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700969 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800970 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700971 /*
972 * This signal will be fatal to the whole group.
973 */
974 if (!sig_kernel_coredump(sig)) {
975 /*
976 * Start a group exit and wake everybody up.
977 * This way we don't have other threads
978 * running and doing things after a slower
979 * thread has the fatal signal pending.
980 */
981 signal->flags = SIGNAL_GROUP_EXIT;
982 signal->group_exit_code = sig;
983 signal->group_stop_count = 0;
984 t = p;
985 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200986 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700987 sigaddset(&t->pending.signal, SIGKILL);
988 signal_wake_up(t, 1);
989 } while_each_thread(p, t);
990 return;
991 }
992 }
993
994 /*
995 * The signal is already in the shared-pending queue.
996 * Tell the chosen thread to wake up and dequeue it.
997 */
998 signal_wake_up(t, sig == SIGKILL);
999 return;
1000}
1001
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001002static inline int legacy_queue(struct sigpending *signals, int sig)
1003{
1004 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1005}
1006
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001007#ifdef CONFIG_USER_NS
1008static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1009{
1010 if (current_user_ns() == task_cred_xxx(t, user_ns))
1011 return;
1012
1013 if (SI_FROMKERNEL(info))
1014 return;
1015
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001016 rcu_read_lock();
1017 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1018 make_kuid(current_user_ns(), info->si_uid));
1019 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001020}
1021#else
1022static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1023{
1024 return;
1025}
1026#endif
1027
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001028static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1029 int group, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001031 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001032 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001033 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001034 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001035
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001036 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001037
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001038 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001039 if (!prepare_signal(sig, t,
Eric W. Biedermanba277fe2018-09-03 20:02:46 +02001040 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001041 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001042
1043 pending = group ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001045 * Short-circuit ignored signals and support queuing
1046 * exactly one non-rt signal, so that we can get more
1047 * detailed information about the cause of the signal.
1048 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001049 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001050 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001051 goto ret;
1052
1053 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001054 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 * fast-pathed signals for kernel-internal things like SIGSTOP
1056 * or SIGKILL.
1057 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001058 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 goto out_set;
1060
Randy Dunlap5aba0852011-04-04 14:59:31 -07001061 /*
1062 * Real-time signals must be queued if sent by sigqueue, or
1063 * some other real-time mechanism. It is implementation
1064 * defined whether kill() does so. We attempt to do so, on
1065 * the principle of least surprise, but since kill is not
1066 * allowed to fail with EAGAIN when low on memory we just
1067 * make sure at least one signal gets delivered and don't
1068 * pass on the info struct.
1069 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001070 if (sig < SIGRTMIN)
1071 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1072 else
1073 override_rlimit = 0;
1074
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001075 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001076 override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001078 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001080 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 q->info.si_signo = sig;
1082 q->info.si_errno = 0;
1083 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001084 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001085 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001086 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001088 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 q->info.si_signo = sig;
1090 q->info.si_errno = 0;
1091 q->info.si_code = SI_KERNEL;
1092 q->info.si_pid = 0;
1093 q->info.si_uid = 0;
1094 break;
1095 default:
1096 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001097 if (from_ancestor_ns)
1098 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 break;
1100 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001101
1102 userns_fixup_signal_uid(&q->info, t);
1103
Oleg Nesterov621d3122005-10-30 15:03:45 -08001104 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001105 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1106 /*
1107 * Queue overflow, abort. We may abort if the
1108 * signal was rt and sent by user using something
1109 * other than kill().
1110 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001111 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1112 ret = -EAGAIN;
1113 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001114 } else {
1115 /*
1116 * This is a silent loss of information. We still
1117 * send the signal, but the *info bits are lost.
1118 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001119 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 }
1122
1123out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001124 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001125 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001126 complete_signal(sig, t, group);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001127ret:
1128 trace_signal_generate(sig, info, t, group, result);
1129 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130}
1131
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001132static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1133 int group)
1134{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001135 int from_ancestor_ns = 0;
1136
1137#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001138 from_ancestor_ns = si_fromuser(info) &&
1139 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001140#endif
1141
1142 return __send_signal(sig, info, t, group, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001143}
1144
Al Viro4aaefee2012-11-05 13:09:56 -05001145static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001146{
Al Viro4aaefee2012-11-05 13:09:56 -05001147 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001148 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001149
Al Viroca5cd872007-10-29 04:31:16 +00001150#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001151 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001152 {
1153 int i;
1154 for (i = 0; i < 16; i++) {
1155 unsigned char insn;
1156
Andi Kleenb45c6e72010-01-08 14:42:52 -08001157 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1158 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001159 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001160 }
1161 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001162 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001163#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001164 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001165 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001166 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001167}
1168
1169static int __init setup_print_fatal_signals(char *str)
1170{
1171 get_option (&str, &print_fatal_signals);
1172
1173 return 1;
1174}
1175
1176__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001178int
1179__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1180{
1181 return send_signal(sig, info, p, 1);
1182}
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184static int
1185specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1186{
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001187 return send_signal(sig, info, t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188}
1189
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001190int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1191 bool group)
1192{
1193 unsigned long flags;
1194 int ret = -ESRCH;
1195
1196 if (lock_task_sighand(p, &flags)) {
1197 ret = send_signal(sig, info, p, group);
1198 unlock_task_sighand(p, &flags);
1199 }
1200
1201 return ret;
1202}
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204/*
1205 * Force a signal that the process can't ignore: if necessary
1206 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001207 *
1208 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1209 * since we do not want to have a signal handler that was blocked
1210 * be invoked when user space had explicitly blocked it.
1211 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001212 * We don't want to have recursive SIGSEGV's etc, for example,
1213 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215int
1216force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1217{
1218 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001219 int ret, blocked, ignored;
1220 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001223 action = &t->sighand->action[sig-1];
1224 ignored = action->sa.sa_handler == SIG_IGN;
1225 blocked = sigismember(&t->blocked, sig);
1226 if (blocked || ignored) {
1227 action->sa.sa_handler = SIG_DFL;
1228 if (blocked) {
1229 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001230 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001231 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 }
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001233 if (action->sa.sa_handler == SIG_DFL)
1234 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 ret = specific_send_sig_info(sig, info, t);
1236 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1237
1238 return ret;
1239}
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241/*
1242 * Nuke all other threads in the group.
1243 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001244int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001246 struct task_struct *t = p;
1247 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 p->signal->group_stop_count = 0;
1250
Oleg Nesterov09faef12010-05-26 14:43:11 -07001251 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001252 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001253 count++;
1254
1255 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if (t->exit_state)
1257 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 signal_wake_up(t, 1);
1260 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001261
1262 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263}
1264
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001265struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1266 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001267{
1268 struct sighand_struct *sighand;
1269
1270 for (;;) {
Paul E. McKenneyc41247e2014-05-05 08:18:30 -07001271 /*
1272 * Disable interrupts early to avoid deadlocks.
1273 * See rcu_read_unlock() comment header for details.
1274 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001275 local_irq_save(*flags);
1276 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001277 sighand = rcu_dereference(tsk->sighand);
Paul E. McKenneya8417962011-07-19 03:25:36 -07001278 if (unlikely(sighand == NULL)) {
1279 rcu_read_unlock();
1280 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001281 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001282 }
Oleg Nesterov392809b2014-09-28 23:44:18 +02001283 /*
1284 * This sighand can be already freed and even reused, but
1285 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1286 * initializes ->siglock: this slab can't go away, it has
1287 * the same object type, ->siglock can't be reinitialized.
1288 *
1289 * We need to ensure that tsk->sighand is still the same
1290 * after we take the lock, we can race with de_thread() or
1291 * __exit_signal(). In the latter case the next iteration
1292 * must see ->sighand == NULL.
1293 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001294 spin_lock(&sighand->siglock);
1295 if (likely(sighand == tsk->sighand)) {
1296 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001297 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001298 }
1299 spin_unlock(&sighand->siglock);
1300 rcu_read_unlock();
1301 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001302 }
1303
1304 return sighand;
1305}
1306
David Howellsc69e8d92008-11-14 10:39:19 +11001307/*
1308 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001309 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1311{
David Howells694f6902010-08-04 16:59:14 +01001312 int ret;
1313
1314 rcu_read_lock();
1315 ret = check_kill_permission(sig, info, p);
1316 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001318 if (!ret && sig)
1319 ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 return ret;
1322}
1323
1324/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001325 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001327 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001329int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330{
1331 struct task_struct *p = NULL;
1332 int retval, success;
1333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 success = 0;
1335 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001336 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 int err = group_send_sig_info(sig, info, p);
1338 success |= !err;
1339 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001340 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 return success ? 0 : retval;
1342}
1343
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001344int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001346 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 struct task_struct *p;
1348
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001349 for (;;) {
1350 rcu_read_lock();
1351 p = pid_task(pid, PIDTYPE_PID);
1352 if (p)
1353 error = group_send_sig_info(sig, info, p);
1354 rcu_read_unlock();
1355 if (likely(!p || error != -ESRCH))
1356 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001357
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001358 /*
1359 * The task was unhashed in between, try again. If it
1360 * is dead, pid_task() will return NULL, if we race with
1361 * de_thread() it will find the new leader.
1362 */
1363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364}
1365
Randy Dunlap5aba0852011-04-04 14:59:31 -07001366int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001367{
1368 int error;
1369 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001370 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001371 rcu_read_unlock();
1372 return error;
1373}
1374
Serge Hallynd178bc32011-09-26 10:45:18 -05001375static int kill_as_cred_perm(const struct cred *cred,
1376 struct task_struct *target)
1377{
1378 const struct cred *pcred = __task_cred(target);
Eric W. Biederman5af66202012-03-03 20:21:47 -08001379 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1380 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
Serge Hallynd178bc32011-09-26 10:45:18 -05001381 return 0;
1382 return 1;
1383}
1384
Eric W. Biederman2425c082006-10-02 02:17:28 -07001385/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001386int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1387 const struct cred *cred, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001388{
1389 int ret = -EINVAL;
1390 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001391 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001392
1393 if (!valid_signal(sig))
1394 return ret;
1395
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001396 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001397 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001398 if (!p) {
1399 ret = -ESRCH;
1400 goto out_unlock;
1401 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001402 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001403 ret = -EPERM;
1404 goto out_unlock;
1405 }
David Quigley8f95dc52006-06-30 01:55:47 -07001406 ret = security_task_kill(p, info, sig, secid);
1407 if (ret)
1408 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001409
1410 if (sig) {
1411 if (lock_task_sighand(p, &flags)) {
1412 ret = __send_signal(sig, info, p, 1, 0);
1413 unlock_task_sighand(p, &flags);
1414 } else
1415 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001416 }
1417out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001418 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001419 return ret;
1420}
Serge Hallynd178bc32011-09-26 10:45:18 -05001421EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423/*
1424 * kill_something_info() interprets pid in interesting ways just like kill(2).
1425 *
1426 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1427 * is probably wrong. Should make it like BSD or SYSV.
1428 */
1429
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001430static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001432 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001433
1434 if (pid > 0) {
1435 rcu_read_lock();
1436 ret = kill_pid_info(sig, info, find_vpid(pid));
1437 rcu_read_unlock();
1438 return ret;
1439 }
1440
zhongjiangec1975a2017-07-10 15:52:57 -07001441 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1442 if (pid == INT_MIN)
1443 return -ESRCH;
1444
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001445 read_lock(&tasklist_lock);
1446 if (pid != -1) {
1447 ret = __kill_pgrp_info(sig, info,
1448 pid ? find_vpid(-pid) : task_pgrp(current));
1449 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 int retval = 0, count = 0;
1451 struct task_struct * p;
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001454 if (task_pid_vnr(p) > 1 &&
1455 !same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 int err = group_send_sig_info(sig, info, p);
1457 ++count;
1458 if (err != -EPERM)
1459 retval = err;
1460 }
1461 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001462 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001464 read_unlock(&tasklist_lock);
1465
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001466 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467}
1468
1469/*
1470 * These are for backward compatibility with the rest of the kernel source.
1471 */
1472
Randy Dunlap5aba0852011-04-04 14:59:31 -07001473int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 /*
1476 * Make sure legacy kernel users don't send in bad values
1477 * (normal paths check this in check_kill_permission).
1478 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001479 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 return -EINVAL;
1481
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001482 return do_send_sig_info(sig, info, p, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483}
1484
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001485#define __si_special(priv) \
1486 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488int
1489send_sig(int sig, struct task_struct *p, int priv)
1490{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001491 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492}
1493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494void
1495force_sig(int sig, struct task_struct *p)
1496{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001497 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
1499
1500/*
1501 * When things go south during signal handling, we
1502 * will force a SIGSEGV. And if the signal that caused
1503 * the problem was already a SIGSEGV, we'll want to
1504 * make sure we don't even try to deliver the signal..
1505 */
1506int
1507force_sigsegv(int sig, struct task_struct *p)
1508{
1509 if (sig == SIGSEGV) {
1510 unsigned long flags;
1511 spin_lock_irqsave(&p->sighand->siglock, flags);
1512 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1513 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1514 }
1515 force_sig(SIGSEGV, p);
1516 return 0;
1517}
1518
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001519int kill_pgrp(struct pid *pid, int sig, int priv)
1520{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001521 int ret;
1522
1523 read_lock(&tasklist_lock);
1524 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1525 read_unlock(&tasklist_lock);
1526
1527 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001528}
1529EXPORT_SYMBOL(kill_pgrp);
1530
1531int kill_pid(struct pid *pid, int sig, int priv)
1532{
1533 return kill_pid_info(sig, __si_special(priv), pid);
1534}
1535EXPORT_SYMBOL(kill_pid);
1536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537/*
1538 * These functions support sending signals using preallocated sigqueue
1539 * structures. This is needed "because realtime applications cannot
1540 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001541 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 * we allocate the sigqueue structure from the timer_create. If this
1543 * allocation fails we are able to report the failure to the application
1544 * with an EAGAIN error.
1545 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546struct sigqueue *sigqueue_alloc(void)
1547{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001548 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001550 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001552
1553 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554}
1555
1556void sigqueue_free(struct sigqueue *q)
1557{
1558 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001559 spinlock_t *lock = &current->sighand->siglock;
1560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1562 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001563 * We must hold ->siglock while testing q->list
1564 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001565 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001567 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001568 q->flags &= ~SIGQUEUE_PREALLOC;
1569 /*
1570 * If it is queued it will be freed when dequeued,
1571 * like the "regular" sigqueue.
1572 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001573 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001574 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001575 spin_unlock_irqrestore(lock, flags);
1576
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001577 if (q)
1578 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579}
1580
Oleg Nesterovac5c2152008-04-30 00:52:57 -07001581int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001582{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001583 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001584 struct sigpending *pending;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001585 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001586 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001587
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001588 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001589
1590 ret = -1;
1591 if (!likely(lock_task_sighand(t, &flags)))
1592 goto ret;
1593
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001594 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001595 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001596 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001597 goto out;
1598
1599 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001600 if (unlikely(!list_empty(&q->list))) {
1601 /*
1602 * If an SI_TIMER entry is already queue just increment
1603 * the overrun count.
1604 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001605 BUG_ON(q->info.si_code != SI_TIMER);
1606 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001607 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001608 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001609 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001610 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001611
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001612 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001613 pending = group ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001614 list_add_tail(&q->list, &pending->list);
1615 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001616 complete_signal(sig, t, group);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001617 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001618out:
Oleg Nesterov163566f2011-11-22 21:37:41 +01001619 trace_signal_generate(sig, &q->info, t, group, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001620 unlock_task_sighand(t, &flags);
1621ret:
1622 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001623}
1624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 * Let a parent know about the death of a child.
1627 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001628 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001629 * Returns true if our parent ignored us and so we've switched to
1630 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001632bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633{
1634 struct siginfo info;
1635 unsigned long flags;
1636 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001637 bool autoreap = false;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001638 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
1640 BUG_ON(sig == -1);
1641
1642 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001643 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Tejun Heod21142e2011-06-17 16:50:34 +02001645 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1647
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001648 if (sig != SIGCHLD) {
1649 /*
1650 * This is only possible if parent == real_parent.
1651 * Check if it has changed security domain.
1652 */
1653 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1654 sig = SIGCHLD;
1655 }
1656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 info.si_signo = sig;
1658 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001659 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001660 * We are under tasklist_lock here so our parent is tied to
1661 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001662 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001663 * task_active_pid_ns will always return the same pid namespace
1664 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001665 *
1666 * write_lock() currently calls preempt_disable() which is the
1667 * same as rcu_read_lock(), but according to Oleg, this is not
1668 * correct to rely on this
1669 */
1670 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001671 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001672 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1673 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001674 rcu_read_unlock();
1675
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001676 task_cputime(tsk, &utime, &stime);
1677 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1678 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
1680 info.si_status = tsk->exit_code & 0x7f;
1681 if (tsk->exit_code & 0x80)
1682 info.si_code = CLD_DUMPED;
1683 else if (tsk->exit_code & 0x7f)
1684 info.si_code = CLD_KILLED;
1685 else {
1686 info.si_code = CLD_EXITED;
1687 info.si_status = tsk->exit_code >> 8;
1688 }
1689
1690 psig = tsk->parent->sighand;
1691 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001692 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1694 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1695 /*
1696 * We are exiting and our parent doesn't care. POSIX.1
1697 * defines special semantics for setting SIGCHLD to SIG_IGN
1698 * or setting the SA_NOCLDWAIT flag: we should be reaped
1699 * automatically and not left for our parent's wait4 call.
1700 * Rather than having the parent do it as a magic kind of
1701 * signal handler, we just set this to tell do_exit that we
1702 * can be cleaned up without becoming a zombie. Note that
1703 * we still call __wake_up_parent in this case, because a
1704 * blocked sys_wait4 might now return -ECHILD.
1705 *
1706 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1707 * is implementation-defined: we do (if you don't want
1708 * it, just use SIG_IGN instead).
1709 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001710 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001712 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001714 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 __group_send_sig_info(sig, &info, tsk->parent);
1716 __wake_up_parent(tsk, tsk->parent);
1717 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001718
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001719 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720}
1721
Tejun Heo75b95952011-03-23 10:37:01 +01001722/**
1723 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1724 * @tsk: task reporting the state change
1725 * @for_ptracer: the notification is for ptracer
1726 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1727 *
1728 * Notify @tsk's parent that the stopped/continued state has changed. If
1729 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1730 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1731 *
1732 * CONTEXT:
1733 * Must be called with tasklist_lock at least read locked.
1734 */
1735static void do_notify_parent_cldstop(struct task_struct *tsk,
1736 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737{
1738 struct siginfo info;
1739 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001740 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 struct sighand_struct *sighand;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001742 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
Tejun Heo75b95952011-03-23 10:37:01 +01001744 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001745 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001746 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001747 tsk = tsk->group_leader;
1748 parent = tsk->real_parent;
1749 }
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 info.si_signo = SIGCHLD;
1752 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001753 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001754 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001755 */
1756 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001757 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001758 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001759 rcu_read_unlock();
1760
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001761 task_cputime(tsk, &utime, &stime);
1762 info.si_utime = cputime_to_clock_t(utime);
1763 info.si_stime = cputime_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
1765 info.si_code = why;
1766 switch (why) {
1767 case CLD_CONTINUED:
1768 info.si_status = SIGCONT;
1769 break;
1770 case CLD_STOPPED:
1771 info.si_status = tsk->signal->group_exit_code & 0x7f;
1772 break;
1773 case CLD_TRAPPED:
1774 info.si_status = tsk->exit_code & 0x7f;
1775 break;
1776 default:
1777 BUG();
1778 }
1779
1780 sighand = parent->sighand;
1781 spin_lock_irqsave(&sighand->siglock, flags);
1782 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1783 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1784 __group_send_sig_info(SIGCHLD, &info, parent);
1785 /*
1786 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1787 */
1788 __wake_up_parent(tsk, parent);
1789 spin_unlock_irqrestore(&sighand->siglock, flags);
1790}
1791
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001792static inline int may_ptrace_stop(void)
1793{
Tejun Heod21142e2011-06-17 16:50:34 +02001794 if (!likely(current->ptrace))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001795 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001796 /*
1797 * Are we in the middle of do_coredump?
1798 * If so and our tracer is also part of the coredump stopping
1799 * is a deadlock situation, and pointless because our tracer
1800 * is dead so don't allow us to stop.
1801 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001802 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001803 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001804 *
1805 * This is almost outdated, a task with the pending SIGKILL can't
1806 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1807 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001808 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001809 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001810 unlikely(current->mm == current->parent->mm))
1811 return 0;
1812
1813 return 1;
1814}
1815
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001817 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08001818 * Called with the siglock held.
1819 */
1820static int sigkill_pending(struct task_struct *tsk)
1821{
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001822 return sigismember(&tsk->pending.signal, SIGKILL) ||
1823 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001824}
1825
1826/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 * This must be called with current->sighand->siglock held.
1828 *
1829 * This should be the path for all ptrace stops.
1830 * We always set current->last_siginfo while stopped here.
1831 * That makes it a way to test a stopped process for
1832 * being ptrace-stopped vs being job-control-stopped.
1833 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001834 * If we actually decide not to stop at all because the tracer
1835 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001837static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001838 __releases(&current->sighand->siglock)
1839 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001841 bool gstop_done = false;
1842
Roland McGrath1a669c22008-02-06 01:37:37 -08001843 if (arch_ptrace_stop_needed(exit_code, info)) {
1844 /*
1845 * The arch code has something special to do before a
1846 * ptrace stop. This is allowed to block, e.g. for faults
1847 * on user stack pages. We can't keep the siglock while
1848 * calling arch_ptrace_stop, so we must release it now.
1849 * To preserve proper semantics, we must do this before
1850 * any signal bookkeeping like checking group_stop_count.
1851 * Meanwhile, a SIGKILL could come in before we retake the
1852 * siglock. That must prevent us from sleeping in TASK_TRACED.
1853 * So after regaining the lock, we must check for SIGKILL.
1854 */
1855 spin_unlock_irq(&current->sighand->siglock);
1856 arch_ptrace_stop(exit_code, info);
1857 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001858 if (sigkill_pending(current))
1859 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001860 }
1861
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02001863 * We're committing to trapping. TRACED should be visible before
1864 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1865 * Also, transition to TRACED and updates to ->jobctl should be
1866 * atomic with respect to siglock and should be done after the arch
1867 * hook as siglock is released and regrabbed across it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 */
Tejun Heo81be24b2011-06-02 11:13:59 +02001869 set_current_state(TASK_TRACED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
1871 current->last_siginfo = info;
1872 current->exit_code = exit_code;
1873
Tejun Heod79fdd62011-03-23 10:37:00 +01001874 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 * If @why is CLD_STOPPED, we're trapping to participate in a group
1876 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02001877 * across siglock relocks since INTERRUPT was scheduled, PENDING
1878 * could be clear now. We act as if SIGCONT is received after
1879 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01001880 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001881 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001883
Tejun Heofb1d9102011-06-14 11:20:17 +02001884 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02001885 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02001886 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1887 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02001888
Tejun Heo81be24b2011-06-02 11:13:59 +02001889 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001890 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001891
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 spin_unlock_irq(&current->sighand->siglock);
1893 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001894 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01001895 /*
1896 * Notify parents of the stop.
1897 *
1898 * While ptraced, there are two parents - the ptracer and
1899 * the real_parent of the group_leader. The ptracer should
1900 * know about every stop while the real parent is only
1901 * interested in the completion of group stop. The states
1902 * for the two don't interact with each other. Notify
1903 * separately unless they're gonna be duplicates.
1904 */
1905 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02001906 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01001907 do_notify_parent_cldstop(current, false, why);
1908
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001909 /*
1910 * Don't want to allow preemption here, because
1911 * sys_ptrace() needs this task to be inactive.
1912 *
1913 * XXX: implement read_unlock_no_resched().
1914 */
1915 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001917 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02001918 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 } else {
1920 /*
1921 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001922 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01001923 *
1924 * If @gstop_done, the ptracer went away between group stop
1925 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02001926 * JOBCTL_STOP_PENDING on us and we'll re-enter
1927 * TASK_STOPPED in do_signal_stop() on return, so notifying
1928 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01001930 if (gstop_done)
1931 do_notify_parent_cldstop(current, false, why);
1932
Oleg Nesterov9899d112013-01-21 20:48:00 +01001933 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001934 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08001935 if (clear_code)
1936 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001937 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939
1940 /*
1941 * We are back. Now reacquire the siglock before touching
1942 * last_siginfo, so that we are sure to have synchronized with
1943 * any signal-sending on another CPU that wants to examine it.
1944 */
1945 spin_lock_irq(&current->sighand->siglock);
1946 current->last_siginfo = NULL;
1947
Tejun Heo544b2c92011-06-14 11:20:18 +02001948 /* LISTENING can be set only during STOP traps, clear it */
1949 current->jobctl &= ~JOBCTL_LISTENING;
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 /*
1952 * Queued signals ignored us while we were stopped for tracing.
1953 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07001954 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07001956 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957}
1958
Tejun Heo3544d722011-06-14 11:20:15 +02001959static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960{
1961 siginfo_t info;
1962
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 memset(&info, 0, sizeof info);
Tejun Heo3544d722011-06-14 11:20:15 +02001964 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001966 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001967 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
1969 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02001970 ptrace_stop(exit_code, why, 1, &info);
1971}
1972
1973void ptrace_notify(int exit_code)
1974{
1975 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02001976 if (unlikely(current->task_works))
1977 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02001978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02001980 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 spin_unlock_irq(&current->sighand->siglock);
1982}
1983
Tejun Heo73ddff22011-06-14 11:20:14 +02001984/**
1985 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1986 * @signr: signr causing group stop if initiating
1987 *
1988 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1989 * and participate in it. If already set, participate in the existing
1990 * group stop. If participated in a group stop (and thus slept), %true is
1991 * returned with siglock released.
1992 *
1993 * If ptraced, this function doesn't handle stop itself. Instead,
1994 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1995 * untouched. The caller must ensure that INTERRUPT trap handling takes
1996 * places afterwards.
1997 *
1998 * CONTEXT:
1999 * Must be called with @current->sighand->siglock held, which is released
2000 * on %true return.
2001 *
2002 * RETURNS:
2003 * %false if group stop is already cancelled or ptrace trap is scheduled.
2004 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002006static bool do_signal_stop(int signr)
2007 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008{
2009 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Tejun Heoa8f072c2011-06-02 11:13:59 +02002011 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002012 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002013 struct task_struct *t;
2014
Tejun Heoa8f072c2011-06-02 11:13:59 +02002015 /* signr will be recorded in task->jobctl for retries */
2016 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002017
Tejun Heoa8f072c2011-06-02 11:13:59 +02002018 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002019 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002020 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002022 * There is no group stop already in progress. We must
2023 * initiate one now.
2024 *
2025 * While ptraced, a task may be resumed while group stop is
2026 * still in effect and then receive a stop signal and
2027 * initiate another group stop. This deviates from the
2028 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002029 * cause two group stops when !ptraced. That is why we
2030 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002031 *
2032 * The condition can be distinguished by testing whether
2033 * SIGNAL_STOP_STOPPED is already set. Don't generate
2034 * group_exit_code in such case.
2035 *
2036 * This is not necessary for SIGNAL_STOP_CONTINUED because
2037 * an intervening stop signal is required to cause two
2038 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002040 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2041 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002042
Tejun Heo7dd3db52011-06-02 11:14:00 +02002043 sig->group_stop_count = 0;
2044
2045 if (task_set_jobctl_pending(current, signr | gstop))
2046 sig->group_stop_count++;
2047
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002048 t = current;
2049 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002050 /*
2051 * Setting state to TASK_STOPPED for a group
2052 * stop is always done with the siglock held,
2053 * so this check has no races.
2054 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002055 if (!task_is_stopped(t) &&
2056 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002057 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002058 if (likely(!(t->ptrace & PT_SEIZED)))
2059 signal_wake_up(t, 0);
2060 else
2061 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002062 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002063 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002064 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002065
Tejun Heod21142e2011-06-17 16:50:34 +02002066 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002067 int notify = 0;
2068
2069 /*
2070 * If there are no other threads in the group, or if there
2071 * is a group stop in progress and we are the last to stop,
2072 * report to the parent.
2073 */
2074 if (task_participate_group_stop(current))
2075 notify = CLD_STOPPED;
2076
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002077 __set_current_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002078 spin_unlock_irq(&current->sighand->siglock);
2079
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002080 /*
2081 * Notify the parent of the group stop completion. Because
2082 * we're not holding either the siglock or tasklist_lock
2083 * here, ptracer may attach inbetween; however, this is for
2084 * group stop and should always be delivered to the real
2085 * parent of the group leader. The new ptracer will get
2086 * its notification when this task transitions into
2087 * TASK_TRACED.
2088 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002089 if (notify) {
2090 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002091 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002092 read_unlock(&tasklist_lock);
2093 }
2094
2095 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002096 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002097 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002098 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002099 /*
2100 * While ptraced, group stop is handled by STOP trap.
2101 * Schedule it and let the caller deal with it.
2102 */
2103 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2104 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002105 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002106}
Tejun Heod79fdd62011-03-23 10:37:00 +01002107
Tejun Heo73ddff22011-06-14 11:20:14 +02002108/**
2109 * do_jobctl_trap - take care of ptrace jobctl traps
2110 *
Tejun Heo3544d722011-06-14 11:20:15 +02002111 * When PT_SEIZED, it's used for both group stop and explicit
2112 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2113 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2114 * the stop signal; otherwise, %SIGTRAP.
2115 *
2116 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2117 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002118 *
2119 * CONTEXT:
2120 * Must be called with @current->sighand->siglock held, which may be
2121 * released and re-acquired before returning with intervening sleep.
2122 */
2123static void do_jobctl_trap(void)
2124{
Tejun Heo3544d722011-06-14 11:20:15 +02002125 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002126 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002127
Tejun Heo3544d722011-06-14 11:20:15 +02002128 if (current->ptrace & PT_SEIZED) {
2129 if (!signal->group_stop_count &&
2130 !(signal->flags & SIGNAL_STOP_STOPPED))
2131 signr = SIGTRAP;
2132 WARN_ON_ONCE(!signr);
2133 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2134 CLD_STOPPED);
2135 } else {
2136 WARN_ON_ONCE(!signr);
2137 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002138 current->exit_code = 0;
2139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140}
2141
Al Viro94eb22d2012-11-05 13:08:06 -05002142static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002143{
Al Virob7f95912012-11-05 13:06:22 -05002144 ptrace_signal_deliver();
Oleg Nesterov8a352412011-07-21 17:06:53 +02002145 /*
2146 * We do not check sig_kernel_stop(signr) but set this marker
2147 * unconditionally because we do not know whether debugger will
2148 * change signr. This flag has no meaning unless we are going
2149 * to stop after return from ptrace_stop(). In this case it will
2150 * be checked in do_signal_stop(), we should only stop if it was
2151 * not cleared by SIGCONT while we were sleeping. See also the
2152 * comment in dequeue_signal().
2153 */
2154 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002155 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002156
2157 /* We're back. Did the debugger cancel the sig? */
2158 signr = current->exit_code;
2159 if (signr == 0)
2160 return signr;
2161
2162 current->exit_code = 0;
2163
Randy Dunlap5aba0852011-04-04 14:59:31 -07002164 /*
2165 * Update the siginfo structure if the signal has
2166 * changed. If the debugger wanted something
2167 * specific in the siginfo structure then it should
2168 * have updated *info via PTRACE_SETSIGINFO.
2169 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002170 if (signr != info->si_signo) {
2171 info->si_signo = signr;
2172 info->si_errno = 0;
2173 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002174 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002175 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002176 info->si_uid = from_kuid_munged(current_user_ns(),
2177 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002178 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002179 }
2180
2181 /* If the (new) signal is now blocked, requeue it. */
2182 if (sigismember(&current->blocked, signr)) {
2183 specific_send_sig_info(signr, info, current);
2184 signr = 0;
2185 }
2186
2187 return signr;
2188}
2189
Richard Weinberger828b1f62013-10-07 15:26:57 +02002190int get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002192 struct sighand_struct *sighand = current->sighand;
2193 struct signal_struct *signal = current->signal;
2194 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002196 if (unlikely(current->task_works))
2197 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002198
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302199 if (unlikely(uprobe_deny_signal()))
2200 return 0;
2201
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002202 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002203 * Do this once, we can't return to user-mode if freezing() == T.
2204 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2205 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002206 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002207 try_to_freeze();
2208
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002209relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002210 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002211 /*
2212 * Every stopped thread goes here after wakeup. Check to see if
2213 * we should notify the parent, prepare_signal(SIGCONT) encodes
2214 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2215 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002216 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002217 int why;
2218
2219 if (signal->flags & SIGNAL_CLD_CONTINUED)
2220 why = CLD_CONTINUED;
2221 else
2222 why = CLD_STOPPED;
2223
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002224 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002225
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002226 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002227
Tejun Heoceb6bd62011-03-23 10:37:01 +01002228 /*
2229 * Notify the parent that we're continuing. This event is
2230 * always per-process and doesn't make whole lot of sense
2231 * for ptracers, who shouldn't consume the state via
2232 * wait(2) either, but, for backward compatibility, notify
2233 * the ptracer of the group leader too unless it's gonna be
2234 * a duplicate.
2235 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002236 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002237 do_notify_parent_cldstop(current, false, why);
2238
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002239 if (ptrace_reparented(current->group_leader))
2240 do_notify_parent_cldstop(current->group_leader,
2241 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002242 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002243
Oleg Nesterove4420552008-04-30 00:52:44 -07002244 goto relock;
2245 }
2246
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002247 /* Has this task already been marked for death? */
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002248 if (signal_group_exit(signal)) {
2249 ksig->info.si_signo = signr = SIGKILL;
2250 sigdelset(&current->pending.signal, SIGKILL);
Zhenliang Wei9adcdd52019-05-31 22:30:52 -07002251 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2252 &sighand->action[SIGKILL - 1]);
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002253 recalc_sigpending();
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002254 goto fatal;
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002255 }
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 for (;;) {
2258 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002259
2260 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2261 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002262 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002263
Tejun Heo73ddff22011-06-14 11:20:14 +02002264 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2265 do_jobctl_trap();
2266 spin_unlock_irq(&sighand->siglock);
2267 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 }
2269
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -06002270 /*
2271 * Signals generated by the execution of an instruction
2272 * need to be delivered before any other pending signals
2273 * so that the instruction pointer in the signal stack
2274 * frame points to the faulting instruction.
2275 */
2276 signr = dequeue_synchronous_signal(&ksig->info);
2277 if (!signr)
2278 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Tejun Heodd1d6772011-06-02 11:14:00 +02002280 if (!signr)
2281 break; /* will return 0 */
2282
Oleg Nesterov8a352412011-07-21 17:06:53 +02002283 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002284 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002286 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 }
2288
Tejun Heodd1d6772011-06-02 11:14:00 +02002289 ka = &sighand->action[signr-1];
2290
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002291 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002292 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002293
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2295 continue;
2296 if (ka->sa.sa_handler != SIG_DFL) {
2297 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002298 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
2300 if (ka->sa.sa_flags & SA_ONESHOT)
2301 ka->sa.sa_handler = SIG_DFL;
2302
2303 break; /* will return non-zero "signr" value */
2304 }
2305
2306 /*
2307 * Now we are doing the default action for this signal.
2308 */
2309 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2310 continue;
2311
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002312 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002313 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002314 * Container-init gets no signals it doesn't want from same
2315 * container.
2316 *
2317 * Note that if global/container-init sees a sig_kernel_only()
2318 * signal here, the signal must have been generated internally
2319 * or must have come from an ancestor namespace. In either
2320 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002321 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002322 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002323 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 continue;
2325
2326 if (sig_kernel_stop(signr)) {
2327 /*
2328 * The default action is to stop all threads in
2329 * the thread group. The job control signals
2330 * do nothing in an orphaned pgrp, but SIGSTOP
2331 * always works. Note that siglock needs to be
2332 * dropped during the call to is_orphaned_pgrp()
2333 * because of lock ordering with tasklist_lock.
2334 * This allows an intervening SIGCONT to be posted.
2335 * We need to check for that and bail out if necessary.
2336 */
2337 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002338 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
2340 /* signals can be posted during this window */
2341
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002342 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 goto relock;
2344
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002345 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 }
2347
Richard Weinberger828b1f62013-10-07 15:26:57 +02002348 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 /* It released the siglock. */
2350 goto relock;
2351 }
2352
2353 /*
2354 * We didn't actually stop, due to a race
2355 * with SIGCONT or something like that.
2356 */
2357 continue;
2358 }
2359
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002360 fatal:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002361 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
2363 /*
2364 * Anything else is fatal, maybe with a core dump.
2365 */
2366 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002369 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002370 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002371 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 /*
2373 * If it was able to dump core, this kills all
2374 * other threads in the group and synchronizes with
2375 * their demise. If we lost the race with another
2376 * thread getting here, it set group_exit_code
2377 * first and our do_group_exit call below will use
2378 * that value and ignore the one we pass it.
2379 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002380 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 }
2382
2383 /*
2384 * Death signals, no core dump.
2385 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002386 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 /* NOTREACHED */
2388 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002389 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002390
2391 ksig->sig = signr;
2392 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
2394
Matt Fleming5e6292c2012-01-10 15:11:17 -08002395/**
Al Viroefee9842012-04-28 02:04:15 -04002396 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002397 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002398 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002399 *
Masanari Iidae2278672014-02-18 22:54:36 +09002400 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002401 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002402 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002403 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002404 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002405static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002406{
2407 sigset_t blocked;
2408
Al Viroa610d6e2012-05-21 23:42:15 -04002409 /* A signal was successfully delivered, and the
2410 saved sigmask was stored on the signal frame,
2411 and will be restored by sigreturn. So we can
2412 simply clear the restore sigmask flag. */
2413 clear_restore_sigmask();
2414
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002415 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2416 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2417 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002418 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002419 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002420}
2421
Al Viro2ce5da12012-11-07 15:11:25 -05002422void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2423{
2424 if (failed)
2425 force_sigsegv(ksig->sig, current);
2426 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002427 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002428}
2429
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002430/*
2431 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002432 * group-wide signal. Other threads should be notified now to take
2433 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002434 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002435static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002436{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002437 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002438 struct task_struct *t;
2439
Oleg Nesterovf646e222011-04-27 19:18:39 +02002440 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2441 if (sigisemptyset(&retarget))
2442 return;
2443
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002444 t = tsk;
2445 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002446 if (t->flags & PF_EXITING)
2447 continue;
2448
2449 if (!has_pending_signals(&retarget, &t->blocked))
2450 continue;
2451 /* Remove the signals this thread can handle. */
2452 sigandsets(&retarget, &retarget, &t->blocked);
2453
2454 if (!signal_pending(t))
2455 signal_wake_up(t, 0);
2456
2457 if (sigisemptyset(&retarget))
2458 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002459 }
2460}
2461
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002462void exit_signals(struct task_struct *tsk)
2463{
2464 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002465 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002466
Tejun Heo77e4ef92011-12-12 18:12:21 -08002467 /*
2468 * @tsk is about to have PF_EXITING set - lock out users which
2469 * expect stable threadgroup.
2470 */
2471 threadgroup_change_begin(tsk);
2472
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002473 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2474 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002475 threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002476 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002477 }
2478
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002479 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002480 /*
2481 * From now this task is not visible for group-wide signals,
2482 * see wants_signal(), do_signal_stop().
2483 */
2484 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002485
2486 threadgroup_change_end(tsk);
2487
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002488 if (!signal_pending(tsk))
2489 goto out;
2490
Oleg Nesterovf646e222011-04-27 19:18:39 +02002491 unblocked = tsk->blocked;
2492 signotset(&unblocked);
2493 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002494
Tejun Heoa8f072c2011-06-02 11:13:59 +02002495 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c19022011-03-23 10:37:00 +01002496 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002497 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002498out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002499 spin_unlock_irq(&tsk->sighand->siglock);
2500
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002501 /*
2502 * If group stop has completed, deliver the notification. This
2503 * should always go to the real parent of the group leader.
2504 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002505 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002506 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002507 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002508 read_unlock(&tasklist_lock);
2509 }
2510}
2511
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512EXPORT_SYMBOL(recalc_sigpending);
2513EXPORT_SYMBOL_GPL(dequeue_signal);
2514EXPORT_SYMBOL(flush_signals);
2515EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516EXPORT_SYMBOL(send_sig);
2517EXPORT_SYMBOL(send_sig_info);
2518EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
2520/*
2521 * System call entry points.
2522 */
2523
Randy Dunlap41c57892011-04-04 15:00:26 -07002524/**
2525 * sys_restart_syscall - restart a system call
2526 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002527SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002529 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 return restart->fn(restart);
2531}
2532
2533long do_no_restart_syscall(struct restart_block *param)
2534{
2535 return -EINTR;
2536}
2537
Oleg Nesterovb1828012011-04-27 21:56:14 +02002538static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2539{
2540 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2541 sigset_t newblocked;
2542 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002543 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002544 retarget_shared_pending(tsk, &newblocked);
2545 }
2546 tsk->blocked = *newset;
2547 recalc_sigpending();
2548}
2549
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002550/**
2551 * set_current_blocked - change current->blocked mask
2552 * @newset: new mask
2553 *
2554 * It is wrong to change ->blocked directly, this helper should be used
2555 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 */
Al Viro77097ae2012-04-27 13:58:59 -04002557void set_current_blocked(sigset_t *newset)
2558{
Al Viro77097ae2012-04-27 13:58:59 -04002559 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002560 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002561}
2562
2563void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002564{
2565 struct task_struct *tsk = current;
2566
Waiman Long20a30612016-12-14 15:04:10 -08002567 /*
2568 * In case the signal mask hasn't changed, there is nothing we need
2569 * to do. The current->blocked shouldn't be modified by other task.
2570 */
2571 if (sigequalsets(&tsk->blocked, newset))
2572 return;
2573
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002574 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002575 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002576 spin_unlock_irq(&tsk->sighand->siglock);
2577}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578
2579/*
2580 * This is also useful for kernel threads that want to temporarily
2581 * (or permanently) block certain signals.
2582 *
2583 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2584 * interface happily blocks "unblockable" signals like SIGKILL
2585 * and friends.
2586 */
2587int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2588{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002589 struct task_struct *tsk = current;
2590 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002592 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002593 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002594 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002595
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 switch (how) {
2597 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002598 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 break;
2600 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002601 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 break;
2603 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002604 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 break;
2606 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002607 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002609
Al Viro77097ae2012-04-27 13:58:59 -04002610 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002611 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612}
2613
Randy Dunlap41c57892011-04-04 15:00:26 -07002614/**
2615 * sys_rt_sigprocmask - change the list of currently blocked signals
2616 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002617 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002618 * @oset: previous value of signal mask if non-null
2619 * @sigsetsize: size of sigset_t type
2620 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002621SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002622 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002625 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626
2627 /* XXX: Don't preclude handling different sized sigset_t's. */
2628 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002629 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002631 old_set = current->blocked;
2632
2633 if (nset) {
2634 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2635 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2637
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002638 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002640 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002642
2643 if (oset) {
2644 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2645 return -EFAULT;
2646 }
2647
2648 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649}
2650
Al Viro322a56c2012-12-25 13:32:58 -05002651#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002652COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2653 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654{
Al Viro322a56c2012-12-25 13:32:58 -05002655#ifdef __BIG_ENDIAN
2656 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
Al Viro322a56c2012-12-25 13:32:58 -05002658 /* XXX: Don't preclude handling different sized sigset_t's. */
2659 if (sigsetsize != sizeof(sigset_t))
2660 return -EINVAL;
2661
2662 if (nset) {
2663 compat_sigset_t new32;
2664 sigset_t new_set;
2665 int error;
2666 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2667 return -EFAULT;
2668
2669 sigset_from_compat(&new_set, &new32);
2670 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2671
2672 error = sigprocmask(how, &new_set, NULL);
2673 if (error)
2674 return error;
2675 }
2676 if (oset) {
2677 compat_sigset_t old32;
2678 sigset_to_compat(&old32, &old_set);
Al Virodb61ec22013-03-02 20:39:15 -05002679 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
Al Viro322a56c2012-12-25 13:32:58 -05002680 return -EFAULT;
2681 }
2682 return 0;
2683#else
2684 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2685 (sigset_t __user *)oset, sigsetsize);
2686#endif
2687}
2688#endif
Al Viro322a56c2012-12-25 13:32:58 -05002689
Al Virofe9c1db2012-12-25 14:31:38 -05002690static int do_sigpending(void *set, unsigned long sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 if (sigsetsize > sizeof(sigset_t))
Al Virofe9c1db2012-12-25 14:31:38 -05002693 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002696 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 &current->signal->shared_pending.signal);
2698 spin_unlock_irq(&current->sighand->siglock);
2699
2700 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002701 sigandsets(set, &current->blocked, set);
2702 return 0;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002703}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
Randy Dunlap41c57892011-04-04 15:00:26 -07002705/**
2706 * sys_rt_sigpending - examine a pending signal that has been raised
2707 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002708 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002709 * @sigsetsize: size of sigset_t type or larger
2710 */
Al Virofe9c1db2012-12-25 14:31:38 -05002711SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712{
Al Virofe9c1db2012-12-25 14:31:38 -05002713 sigset_t set;
2714 int err = do_sigpending(&set, sigsetsize);
2715 if (!err && copy_to_user(uset, &set, sigsetsize))
2716 err = -EFAULT;
2717 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718}
2719
Al Virofe9c1db2012-12-25 14:31:38 -05002720#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002721COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2722 compat_size_t, sigsetsize)
2723{
2724#ifdef __BIG_ENDIAN
2725 sigset_t set;
2726 int err = do_sigpending(&set, sigsetsize);
2727 if (!err) {
2728 compat_sigset_t set32;
2729 sigset_to_compat(&set32, &set);
2730 /* we can get here only if sigsetsize <= sizeof(set) */
2731 if (copy_to_user(uset, &set32, sigsetsize))
2732 err = -EFAULT;
2733 }
2734 return err;
2735#else
2736 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2737#endif
2738}
2739#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002740
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2742
Al Viroce395962013-10-13 17:23:53 -04002743int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744{
2745 int err;
2746
2747 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2748 return -EFAULT;
2749 if (from->si_code < 0)
2750 return __copy_to_user(to, from, sizeof(siginfo_t))
2751 ? -EFAULT : 0;
2752 /*
2753 * If you change siginfo_t structure, please be sure
2754 * this code is fixed accordingly.
Davide Libenzifba2afa2007-05-10 22:23:13 -07002755 * Please remember to update the signalfd_copyinfo() function
2756 * inside fs/signalfd.c too, in case siginfo_t changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 * It should never copy any pad contained in the structure
2758 * to avoid security leaks, but must copy the generic
2759 * 3 ints plus the relevant union member.
2760 */
2761 err = __put_user(from->si_signo, &to->si_signo);
2762 err |= __put_user(from->si_errno, &to->si_errno);
2763 err |= __put_user((short)from->si_code, &to->si_code);
2764 switch (from->si_code & __SI_MASK) {
2765 case __SI_KILL:
2766 err |= __put_user(from->si_pid, &to->si_pid);
2767 err |= __put_user(from->si_uid, &to->si_uid);
2768 break;
2769 case __SI_TIMER:
2770 err |= __put_user(from->si_tid, &to->si_tid);
2771 err |= __put_user(from->si_overrun, &to->si_overrun);
2772 err |= __put_user(from->si_ptr, &to->si_ptr);
2773 break;
2774 case __SI_POLL:
2775 err |= __put_user(from->si_band, &to->si_band);
2776 err |= __put_user(from->si_fd, &to->si_fd);
2777 break;
2778 case __SI_FAULT:
2779 err |= __put_user(from->si_addr, &to->si_addr);
2780#ifdef __ARCH_SI_TRAPNO
2781 err |= __put_user(from->si_trapno, &to->si_trapno);
2782#endif
Andi Kleena337fda2010-09-27 20:32:19 +02002783#ifdef BUS_MCEERR_AO
Randy Dunlap5aba0852011-04-04 14:59:31 -07002784 /*
Andi Kleena337fda2010-09-27 20:32:19 +02002785 * Other callers might not initialize the si_lsb field,
Randy Dunlap5aba0852011-04-04 14:59:31 -07002786 * so check explicitly for the right codes here.
Andi Kleena337fda2010-09-27 20:32:19 +02002787 */
Amanieu d'Antras26135022015-08-06 15:46:29 -07002788 if (from->si_signo == SIGBUS &&
2789 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
Andi Kleena337fda2010-09-27 20:32:19 +02002790 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2791#endif
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002792#ifdef SEGV_BNDERR
Amanieu d'Antras26135022015-08-06 15:46:29 -07002793 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2794 err |= __put_user(from->si_lower, &to->si_lower);
2795 err |= __put_user(from->si_upper, &to->si_upper);
2796 }
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002797#endif
Dave Hansencd0ea352016-02-12 13:02:12 -08002798#ifdef SEGV_PKUERR
2799 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2800 err |= __put_user(from->si_pkey, &to->si_pkey);
2801#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 break;
2803 case __SI_CHLD:
2804 err |= __put_user(from->si_pid, &to->si_pid);
2805 err |= __put_user(from->si_uid, &to->si_uid);
2806 err |= __put_user(from->si_status, &to->si_status);
2807 err |= __put_user(from->si_utime, &to->si_utime);
2808 err |= __put_user(from->si_stime, &to->si_stime);
2809 break;
2810 case __SI_RT: /* This is not generated by the kernel as of now. */
2811 case __SI_MESGQ: /* But this is */
2812 err |= __put_user(from->si_pid, &to->si_pid);
2813 err |= __put_user(from->si_uid, &to->si_uid);
2814 err |= __put_user(from->si_ptr, &to->si_ptr);
2815 break;
Will Drewrya0727e82012-04-12 16:48:00 -05002816#ifdef __ARCH_SIGSYS
2817 case __SI_SYS:
2818 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2819 err |= __put_user(from->si_syscall, &to->si_syscall);
2820 err |= __put_user(from->si_arch, &to->si_arch);
2821 break;
2822#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 default: /* this is just in case for now ... */
2824 err |= __put_user(from->si_pid, &to->si_pid);
2825 err |= __put_user(from->si_uid, &to->si_uid);
2826 break;
2827 }
2828 return err;
2829}
2830
2831#endif
2832
Randy Dunlap41c57892011-04-04 15:00:26 -07002833/**
Oleg Nesterov943df142011-04-27 21:44:14 +02002834 * do_sigtimedwait - wait for queued signals specified in @which
2835 * @which: queued signals to wait for
2836 * @info: if non-null, the signal's siginfo is returned here
2837 * @ts: upper bound on process time suspension
2838 */
2839int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002840 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02002841{
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002842 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
Oleg Nesterov943df142011-04-27 21:44:14 +02002843 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02002844 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002845 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02002846
2847 if (ts) {
2848 if (!timespec_valid(ts))
2849 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002850 timeout = timespec_to_ktime(*ts);
2851 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02002852 }
2853
2854 /*
2855 * Invert the set of allowed signals to get those we want to block.
2856 */
2857 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2858 signotset(&mask);
2859
2860 spin_lock_irq(&tsk->sighand->siglock);
2861 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002862 if (!sig && timeout.tv64) {
Oleg Nesterov943df142011-04-27 21:44:14 +02002863 /*
2864 * None ready, temporarily unblock those we're interested
2865 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02002866 * they arrive. Unblocking is always fine, we can avoid
2867 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02002868 */
2869 tsk->real_blocked = tsk->blocked;
2870 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2871 recalc_sigpending();
2872 spin_unlock_irq(&tsk->sighand->siglock);
2873
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002874 __set_current_state(TASK_INTERRUPTIBLE);
2875 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2876 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02002877 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002878 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07002879 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002880 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02002881 }
2882 spin_unlock_irq(&tsk->sighand->siglock);
2883
2884 if (sig)
2885 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002886 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02002887}
2888
2889/**
Randy Dunlap41c57892011-04-04 15:00:26 -07002890 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2891 * in @uthese
2892 * @uthese: queued signals to wait for
2893 * @uinfo: if non-null, the signal's siginfo is returned here
2894 * @uts: upper bound on process time suspension
2895 * @sigsetsize: size of sigset_t type
2896 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002897SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2898 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2899 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 sigset_t these;
2902 struct timespec ts;
2903 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02002904 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
2906 /* XXX: Don't preclude handling different sized sigset_t's. */
2907 if (sigsetsize != sizeof(sigset_t))
2908 return -EINVAL;
2909
2910 if (copy_from_user(&these, uthese, sizeof(these)))
2911 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002912
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 if (uts) {
2914 if (copy_from_user(&ts, uts, sizeof(ts)))
2915 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 }
2917
Oleg Nesterov943df142011-04-27 21:44:14 +02002918 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Oleg Nesterov943df142011-04-27 21:44:14 +02002920 if (ret > 0 && uinfo) {
2921 if (copy_siginfo_to_user(uinfo, &info))
2922 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 }
2924
2925 return ret;
2926}
2927
Randy Dunlap41c57892011-04-04 15:00:26 -07002928/**
2929 * sys_kill - send a signal to a process
2930 * @pid: the PID of the process
2931 * @sig: signal to be sent
2932 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002933SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934{
2935 struct siginfo info;
2936
2937 info.si_signo = sig;
2938 info.si_errno = 0;
2939 info.si_code = SI_USER;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002940 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002941 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942
2943 return kill_something_info(sig, &info, pid);
2944}
2945
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002946static int
2947do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002948{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002949 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002950 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002951
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002952 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07002953 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002954 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002955 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002956 /*
2957 * The null signal is a permissions and process existence
2958 * probe. No signal is actually delivered.
2959 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07002960 if (!error && sig) {
2961 error = do_send_sig_info(sig, info, p, false);
2962 /*
2963 * If lock_task_sighand() failed we pretend the task
2964 * dies after receiving the signal. The window is tiny,
2965 * and the signal is private anyway.
2966 */
2967 if (unlikely(error == -ESRCH))
2968 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002969 }
2970 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002971 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002972
2973 return error;
2974}
2975
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002976static int do_tkill(pid_t tgid, pid_t pid, int sig)
2977{
Emese Revfyb9e146d2013-04-17 15:58:36 -07002978 struct siginfo info = {};
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002979
2980 info.si_signo = sig;
2981 info.si_errno = 0;
2982 info.si_code = SI_TKILL;
2983 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002984 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002985
2986 return do_send_specific(tgid, pid, sig, &info);
2987}
2988
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989/**
2990 * sys_tgkill - send signal to one specific thread
2991 * @tgid: the thread group ID of the thread
2992 * @pid: the PID of the thread
2993 * @sig: signal to be sent
2994 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08002995 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 * exists but it's not belonging to the target process anymore. This
2997 * method solves the problem of threads exiting and PIDs getting reused.
2998 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002999SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 /* This is only valid for single tasks */
3002 if (pid <= 0 || tgid <= 0)
3003 return -EINVAL;
3004
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003005 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006}
3007
Randy Dunlap41c57892011-04-04 15:00:26 -07003008/**
3009 * sys_tkill - send signal to one specific task
3010 * @pid: the PID of the task
3011 * @sig: signal to be sent
3012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3014 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003015SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 /* This is only valid for single tasks */
3018 if (pid <= 0)
3019 return -EINVAL;
3020
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003021 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022}
3023
Al Viro75907d42012-12-25 15:19:12 -05003024static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3025{
3026 /* Not even root can pretend to send signals from the kernel.
3027 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3028 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003029 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003030 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003031 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003032
Al Viro75907d42012-12-25 15:19:12 -05003033 info->si_signo = sig;
3034
3035 /* POSIX.1b doesn't mention process groups. */
3036 return kill_proc_info(sig, info, pid);
3037}
3038
Randy Dunlap41c57892011-04-04 15:00:26 -07003039/**
3040 * sys_rt_sigqueueinfo - send signal information to a signal
3041 * @pid: the PID of the thread
3042 * @sig: signal to be sent
3043 * @uinfo: signal info to be sent
3044 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003045SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3046 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047{
3048 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3050 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003051 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052}
3053
Al Viro75907d42012-12-25 15:19:12 -05003054#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003055COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3056 compat_pid_t, pid,
3057 int, sig,
3058 struct compat_siginfo __user *, uinfo)
3059{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003060 siginfo_t info = {};
Al Viro75907d42012-12-25 15:19:12 -05003061 int ret = copy_siginfo_from_user32(&info, uinfo);
3062 if (unlikely(ret))
3063 return ret;
3064 return do_rt_sigqueueinfo(pid, sig, &info);
3065}
3066#endif
Al Viro75907d42012-12-25 15:19:12 -05003067
Al Viro9aae8fc2012-12-24 23:12:04 -05003068static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003069{
3070 /* This is only valid for single tasks */
3071 if (pid <= 0 || tgid <= 0)
3072 return -EINVAL;
3073
3074 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003075 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3076 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003077 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3078 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003079 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003080
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003081 info->si_signo = sig;
3082
3083 return do_send_specific(tgid, pid, sig, info);
3084}
3085
3086SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3087 siginfo_t __user *, uinfo)
3088{
3089 siginfo_t info;
3090
3091 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3092 return -EFAULT;
3093
3094 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3095}
3096
Al Viro9aae8fc2012-12-24 23:12:04 -05003097#ifdef CONFIG_COMPAT
3098COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3099 compat_pid_t, tgid,
3100 compat_pid_t, pid,
3101 int, sig,
3102 struct compat_siginfo __user *, uinfo)
3103{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003104 siginfo_t info = {};
Al Viro9aae8fc2012-12-24 23:12:04 -05003105
3106 if (copy_siginfo_from_user32(&info, uinfo))
3107 return -EFAULT;
3108 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3109}
3110#endif
3111
Oleg Nesterov03417292014-06-06 14:36:53 -07003112/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003113 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003114 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003115void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003116{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003117 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003118 current->sighand->action[sig - 1].sa.sa_handler = action;
3119 if (action == SIG_IGN) {
3120 sigset_t mask;
3121
3122 sigemptyset(&mask);
3123 sigaddset(&mask, sig);
3124
3125 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3126 flush_sigqueue_mask(&mask, &current->pending);
3127 recalc_sigpending();
3128 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003129 spin_unlock_irq(&current->sighand->siglock);
3130}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003131EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003132
Dmitry Safonov68463512016-09-05 16:33:08 +03003133void __weak sigaction_compat_abi(struct k_sigaction *act,
3134 struct k_sigaction *oact)
3135{
3136}
3137
Oleg Nesterov88531f72006-03-28 16:11:24 -08003138int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003140 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08003142 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003144 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 return -EINVAL;
3146
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003147 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003149 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 if (oact)
3151 *oact = *k;
3152
Dmitry Safonov68463512016-09-05 16:33:08 +03003153 sigaction_compat_abi(act, oact);
3154
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003156 sigdelsetmask(&act->sa.sa_mask,
3157 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003158 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 /*
3160 * POSIX 3.3.1.3:
3161 * "Setting a signal action to SIG_IGN for a signal that is
3162 * pending shall cause the pending signal to be discarded,
3163 * whether or not it is blocked."
3164 *
3165 * "Setting a signal action to SIG_DFL for a signal that is
3166 * pending and whose default action is to ignore the signal
3167 * (for example, SIGCHLD), shall cause the pending signal to
3168 * be discarded, whether or not it is blocked"
3169 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003170 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08003171 sigemptyset(&mask);
3172 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003173 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3174 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003175 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 }
3178
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003179 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 return 0;
3181}
3182
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003183static int
Will Deacon1e7066a2018-09-05 15:34:42 +01003184do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp,
3185 size_t min_ss_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186{
3187 stack_t oss;
3188 int error;
3189
Linus Torvalds0083fc22009-08-01 10:34:56 -07003190 oss.ss_sp = (void __user *) current->sas_ss_sp;
3191 oss.ss_size = current->sas_ss_size;
Andy Lutomirski0318bc82016-05-03 10:31:51 -07003192 oss.ss_flags = sas_ss_flags(sp) |
3193 (current->sas_ss_flags & SS_FLAG_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194
3195 if (uss) {
3196 void __user *ss_sp;
3197 size_t ss_size;
Stas Sergeev407bc162016-04-14 23:20:03 +03003198 unsigned ss_flags;
3199 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200
3201 error = -EFAULT;
Linus Torvalds0dd84862009-08-01 11:18:56 -07003202 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3203 goto out;
3204 error = __get_user(ss_sp, &uss->ss_sp) |
3205 __get_user(ss_flags, &uss->ss_flags) |
3206 __get_user(ss_size, &uss->ss_size);
3207 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 goto out;
3209
3210 error = -EPERM;
3211 if (on_sig_stack(sp))
3212 goto out;
3213
Stas Sergeev407bc162016-04-14 23:20:03 +03003214 ss_mode = ss_flags & ~SS_FLAG_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 error = -EINVAL;
Stas Sergeev407bc162016-04-14 23:20:03 +03003216 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3217 ss_mode != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 goto out;
3219
Stas Sergeev407bc162016-04-14 23:20:03 +03003220 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 ss_size = 0;
3222 ss_sp = NULL;
3223 } else {
Will Deacon1e7066a2018-09-05 15:34:42 +01003224 if (unlikely(ss_size < min_ss_size))
3225 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 }
3227
3228 current->sas_ss_sp = (unsigned long) ss_sp;
3229 current->sas_ss_size = ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +03003230 current->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 }
3232
Linus Torvalds0083fc22009-08-01 10:34:56 -07003233 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 if (uoss) {
3235 error = -EFAULT;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003236 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 goto out;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003238 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3239 __put_user(oss.ss_size, &uoss->ss_size) |
3240 __put_user(oss.ss_flags, &uoss->ss_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 }
3242
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243out:
3244 return error;
3245}
Al Viro6bf9adf2012-12-14 14:09:47 -05003246SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3247{
Will Deacon1e7066a2018-09-05 15:34:42 +01003248 return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
3249 MINSIGSTKSZ);
Al Viro6bf9adf2012-12-14 14:09:47 -05003250}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251
Al Viro5c495742012-11-18 15:29:16 -05003252int restore_altstack(const stack_t __user *uss)
3253{
Will Deacon1e7066a2018-09-05 15:34:42 +01003254 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
3255 MINSIGSTKSZ);
Al Viro5c495742012-11-18 15:29:16 -05003256 /* squash all but EFAULT for now */
3257 return err == -EFAULT ? err : 0;
3258}
3259
Al Viroc40702c2012-11-20 14:24:26 -05003260int __save_altstack(stack_t __user *uss, unsigned long sp)
3261{
3262 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003263 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3264 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003265 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003266 if (err)
3267 return err;
3268 if (t->sas_ss_flags & SS_AUTODISARM)
3269 sas_ss_reset(t);
3270 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003271}
3272
Al Viro90268432012-12-14 14:47:53 -05003273#ifdef CONFIG_COMPAT
Al Viro90228fc2012-12-23 03:33:38 -05003274COMPAT_SYSCALL_DEFINE2(sigaltstack,
3275 const compat_stack_t __user *, uss_ptr,
3276 compat_stack_t __user *, uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003277{
3278 stack_t uss, uoss;
3279 int ret;
3280 mm_segment_t seg;
3281
3282 if (uss_ptr) {
3283 compat_stack_t uss32;
3284
3285 memset(&uss, 0, sizeof(stack_t));
3286 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3287 return -EFAULT;
3288 uss.ss_sp = compat_ptr(uss32.ss_sp);
3289 uss.ss_flags = uss32.ss_flags;
3290 uss.ss_size = uss32.ss_size;
3291 }
3292 seg = get_fs();
3293 set_fs(KERNEL_DS);
3294 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3295 (stack_t __force __user *) &uoss,
Will Deacon1e7066a2018-09-05 15:34:42 +01003296 compat_user_stack_pointer(),
3297 COMPAT_MINSIGSTKSZ);
Al Viro90268432012-12-14 14:47:53 -05003298 set_fs(seg);
3299 if (ret >= 0 && uoss_ptr) {
3300 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3301 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3302 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3303 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3304 ret = -EFAULT;
3305 }
3306 return ret;
3307}
3308
3309int compat_restore_altstack(const compat_stack_t __user *uss)
3310{
3311 int err = compat_sys_sigaltstack(uss, NULL);
3312 /* squash all but -EFAULT for now */
3313 return err == -EFAULT ? err : 0;
3314}
Al Viroc40702c2012-11-20 14:24:26 -05003315
3316int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3317{
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003318 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003319 struct task_struct *t = current;
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003320 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3321 &uss->ss_sp) |
3322 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003323 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003324 if (err)
3325 return err;
3326 if (t->sas_ss_flags & SS_AUTODISARM)
3327 sas_ss_reset(t);
3328 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003329}
Al Viro90268432012-12-14 14:47:53 -05003330#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331
3332#ifdef __ARCH_WANT_SYS_SIGPENDING
3333
Randy Dunlap41c57892011-04-04 15:00:26 -07003334/**
3335 * sys_sigpending - examine pending signals
3336 * @set: where mask of pending signal is returned
3337 */
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003338SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339{
Al Virofe9c1db2012-12-25 14:31:38 -05003340 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341}
3342
3343#endif
3344
3345#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003346/**
3347 * sys_sigprocmask - examine and change blocked signals
3348 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003349 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003350 * @oset: previous value of signal mask if non-null
3351 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003352 * Some platforms have their own version with special arguments;
3353 * others support only sys_rt_sigprocmask.
3354 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
Oleg Nesterovb013c392011-04-28 11:36:20 +02003356SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003357 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003360 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361
Oleg Nesterovb013c392011-04-28 11:36:20 +02003362 old_set = current->blocked.sig[0];
3363
3364 if (nset) {
3365 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3366 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003368 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003372 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 break;
3374 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003375 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 break;
3377 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003378 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003380 default:
3381 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 }
3383
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003384 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003386
3387 if (oset) {
3388 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3389 return -EFAULT;
3390 }
3391
3392 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393}
3394#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3395
Al Viroeaca6ea2012-11-25 23:12:10 -05003396#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003397/**
3398 * sys_rt_sigaction - alter an action taken by a process
3399 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003400 * @act: new sigaction
3401 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003402 * @sigsetsize: size of sigset_t type
3403 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003404SYSCALL_DEFINE4(rt_sigaction, int, sig,
3405 const struct sigaction __user *, act,
3406 struct sigaction __user *, oact,
3407 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408{
3409 struct k_sigaction new_sa, old_sa;
3410 int ret = -EINVAL;
3411
3412 /* XXX: Don't preclude handling different sized sigset_t's. */
3413 if (sigsetsize != sizeof(sigset_t))
3414 goto out;
3415
3416 if (act) {
3417 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3418 return -EFAULT;
3419 }
3420
3421 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3422
3423 if (!ret && oact) {
3424 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3425 return -EFAULT;
3426 }
3427out:
3428 return ret;
3429}
Al Viro08d32fe2012-12-25 18:38:15 -05003430#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003431COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3432 const struct compat_sigaction __user *, act,
3433 struct compat_sigaction __user *, oact,
3434 compat_size_t, sigsetsize)
3435{
3436 struct k_sigaction new_ka, old_ka;
3437 compat_sigset_t mask;
3438#ifdef __ARCH_HAS_SA_RESTORER
3439 compat_uptr_t restorer;
3440#endif
3441 int ret;
3442
3443 /* XXX: Don't preclude handling different sized sigset_t's. */
3444 if (sigsetsize != sizeof(compat_sigset_t))
3445 return -EINVAL;
3446
3447 if (act) {
3448 compat_uptr_t handler;
3449 ret = get_user(handler, &act->sa_handler);
3450 new_ka.sa.sa_handler = compat_ptr(handler);
3451#ifdef __ARCH_HAS_SA_RESTORER
3452 ret |= get_user(restorer, &act->sa_restorer);
3453 new_ka.sa.sa_restorer = compat_ptr(restorer);
3454#endif
3455 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003456 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003457 if (ret)
3458 return -EFAULT;
3459 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3460 }
3461
3462 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3463 if (!ret && oact) {
3464 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3465 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3466 &oact->sa_handler);
3467 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003468 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003469#ifdef __ARCH_HAS_SA_RESTORER
3470 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3471 &oact->sa_restorer);
3472#endif
3473 }
3474 return ret;
3475}
3476#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003477#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478
Al Viro495dfbf2012-12-25 19:09:45 -05003479#ifdef CONFIG_OLD_SIGACTION
3480SYSCALL_DEFINE3(sigaction, int, sig,
3481 const struct old_sigaction __user *, act,
3482 struct old_sigaction __user *, oact)
3483{
3484 struct k_sigaction new_ka, old_ka;
3485 int ret;
3486
3487 if (act) {
3488 old_sigset_t mask;
3489 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3490 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3491 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3492 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3493 __get_user(mask, &act->sa_mask))
3494 return -EFAULT;
3495#ifdef __ARCH_HAS_KA_RESTORER
3496 new_ka.ka_restorer = NULL;
3497#endif
3498 siginitset(&new_ka.sa.sa_mask, mask);
3499 }
3500
3501 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3502
3503 if (!ret && oact) {
3504 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3505 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3506 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3507 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3508 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3509 return -EFAULT;
3510 }
3511
3512 return ret;
3513}
3514#endif
3515#ifdef CONFIG_COMPAT_OLD_SIGACTION
3516COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3517 const struct compat_old_sigaction __user *, act,
3518 struct compat_old_sigaction __user *, oact)
3519{
3520 struct k_sigaction new_ka, old_ka;
3521 int ret;
3522 compat_old_sigset_t mask;
3523 compat_uptr_t handler, restorer;
3524
3525 if (act) {
3526 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3527 __get_user(handler, &act->sa_handler) ||
3528 __get_user(restorer, &act->sa_restorer) ||
3529 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3530 __get_user(mask, &act->sa_mask))
3531 return -EFAULT;
3532
3533#ifdef __ARCH_HAS_KA_RESTORER
3534 new_ka.ka_restorer = NULL;
3535#endif
3536 new_ka.sa.sa_handler = compat_ptr(handler);
3537 new_ka.sa.sa_restorer = compat_ptr(restorer);
3538 siginitset(&new_ka.sa.sa_mask, mask);
3539 }
3540
3541 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3542
3543 if (!ret && oact) {
3544 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3545 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3546 &oact->sa_handler) ||
3547 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3548 &oact->sa_restorer) ||
3549 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3550 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3551 return -EFAULT;
3552 }
3553 return ret;
3554}
3555#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556
Fabian Frederickf6187762014-06-04 16:11:12 -07003557#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558
3559/*
3560 * For backwards compatibility. Functionality superseded by sigprocmask.
3561 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003562SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563{
3564 /* SMP safe */
3565 return current->blocked.sig[0];
3566}
3567
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003568SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003570 int old = current->blocked.sig[0];
3571 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003573 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003574 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
3576 return old;
3577}
Fabian Frederickf6187762014-06-04 16:11:12 -07003578#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579
3580#ifdef __ARCH_WANT_SYS_SIGNAL
3581/*
3582 * For backwards compatibility. Functionality superseded by sigaction.
3583 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003584SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585{
3586 struct k_sigaction new_sa, old_sa;
3587 int ret;
3588
3589 new_sa.sa.sa_handler = handler;
3590 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003591 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
3593 ret = do_sigaction(sig, &new_sa, &old_sa);
3594
3595 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3596}
3597#endif /* __ARCH_WANT_SYS_SIGNAL */
3598
3599#ifdef __ARCH_WANT_SYS_PAUSE
3600
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003601SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003603 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003604 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003605 schedule();
3606 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 return -ERESTARTNOHAND;
3608}
3609
3610#endif
3611
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003612static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003613{
Al Viro68f3f162012-05-21 21:42:32 -04003614 current->saved_sigmask = current->blocked;
3615 set_current_blocked(set);
3616
Sasha Levin823dd322016-02-05 15:36:05 -08003617 while (!signal_pending(current)) {
3618 __set_current_state(TASK_INTERRUPTIBLE);
3619 schedule();
3620 }
Al Viro68f3f162012-05-21 21:42:32 -04003621 set_restore_sigmask();
3622 return -ERESTARTNOHAND;
3623}
Al Viro68f3f162012-05-21 21:42:32 -04003624
Randy Dunlap41c57892011-04-04 15:00:26 -07003625/**
3626 * sys_rt_sigsuspend - replace the signal mask for a value with the
3627 * @unewset value until a signal is received
3628 * @unewset: new signal mask value
3629 * @sigsetsize: size of sigset_t type
3630 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003631SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003632{
3633 sigset_t newset;
3634
3635 /* XXX: Don't preclude handling different sized sigset_t's. */
3636 if (sigsetsize != sizeof(sigset_t))
3637 return -EINVAL;
3638
3639 if (copy_from_user(&newset, unewset, sizeof(newset)))
3640 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003641 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003642}
Al Viroad4b65a2012-12-24 21:43:56 -05003643
3644#ifdef CONFIG_COMPAT
3645COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3646{
3647#ifdef __BIG_ENDIAN
3648 sigset_t newset;
3649 compat_sigset_t newset32;
3650
3651 /* XXX: Don't preclude handling different sized sigset_t's. */
3652 if (sigsetsize != sizeof(sigset_t))
3653 return -EINVAL;
3654
3655 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3656 return -EFAULT;
3657 sigset_from_compat(&newset, &newset32);
3658 return sigsuspend(&newset);
3659#else
3660 /* on little-endian bitmaps don't care about granularity */
3661 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3662#endif
3663}
3664#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003665
Al Viro0a0e8cd2012-12-25 16:04:12 -05003666#ifdef CONFIG_OLD_SIGSUSPEND
3667SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3668{
3669 sigset_t blocked;
3670 siginitset(&blocked, mask);
3671 return sigsuspend(&blocked);
3672}
3673#endif
3674#ifdef CONFIG_OLD_SIGSUSPEND3
3675SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3676{
3677 sigset_t blocked;
3678 siginitset(&blocked, mask);
3679 return sigsuspend(&blocked);
3680}
3681#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07003683__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003684{
3685 return NULL;
3686}
3687
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688void __init signals_init(void)
3689{
Helge Deller41b27152016-03-22 14:27:54 -07003690 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3691 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3692 != offsetof(struct siginfo, _sifields._pad));
3693
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003694 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003696
3697#ifdef CONFIG_KGDB_KDB
3698#include <linux/kdb.h>
3699/*
3700 * kdb_send_sig_info - Allows kdb to send signals without exposing
3701 * signal internals. This function checks if the required locks are
3702 * available before calling the main signal code, to avoid kdb
3703 * deadlocks.
3704 */
3705void
3706kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3707{
3708 static struct task_struct *kdb_prev_t;
3709 int sig, new_t;
3710 if (!spin_trylock(&t->sighand->siglock)) {
3711 kdb_printf("Can't do kill command now.\n"
3712 "The sigmask lock is held somewhere else in "
3713 "kernel, try again later\n");
3714 return;
3715 }
3716 spin_unlock(&t->sighand->siglock);
3717 new_t = kdb_prev_t != t;
3718 kdb_prev_t = t;
3719 if (t->state != TASK_RUNNING && new_t) {
3720 kdb_printf("Process is not RUNNING, sending a signal from "
3721 "kdb risks deadlock\n"
3722 "on the run queue locks. "
3723 "The signal has _not_ been sent.\n"
3724 "Reissue the kill command if you want to risk "
3725 "the deadlock.\n");
3726 return;
3727 }
3728 sig = info->si_signo;
3729 if (send_sig_info(sig, info, t))
3730 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3731 sig, t->pid);
3732 else
3733 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3734}
3735#endif /* CONFIG_KGDB_KDB */