blob: 129337ac632849e71aa96917d673902c6eb0e923 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
Christian Braunercf9f8292018-11-19 00:51:56 +010017#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/fs.h>
Christian Braunercf9f8292018-11-19 00:51:56 +010019#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/tty.h>
21#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070022#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/security.h>
24#include <linux/syscalls.h>
25#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070026#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070027#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090028#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070029#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080030#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080031#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080032#include <linux/pid_namespace.h>
33#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080034#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053035#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050036#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000037#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070038#include <linux/compiler.h>
39
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050040#define CREATE_TRACE_POINTS
41#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/param.h>
44#include <asm/uaccess.h>
45#include <asm/unistd.h>
46#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010047#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040048#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50/*
51 * SLAB caches for signal bits.
52 */
53
Christoph Lametere18b8902006-12-06 20:33:20 -080054static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090056int print_fatal_signals __read_mostly;
57
Roland McGrath35de2542008-07-25 19:45:51 -070058static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070059{
Roland McGrath35de2542008-07-25 19:45:51 -070060 return t->sighand->action[sig - 1].sa.sa_handler;
61}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070062
Roland McGrath35de2542008-07-25 19:45:51 -070063static int sig_handler_ignored(void __user *handler, int sig)
64{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070065 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070066 return handler == SIG_IGN ||
67 (handler == SIG_DFL && sig_kernel_ignore(sig));
68}
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070070static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Roland McGrath35de2542008-07-25 19:45:51 -070072 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Oleg Nesterovf008faf2009-04-02 16:58:02 -070074 handler = sig_handler(t, sig);
75
76 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterov794ac8e2017-11-17 15:30:04 -080077 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Oleg Nesterovf008faf2009-04-02 16:58:02 -070078 return 1;
79
80 return sig_handler_ignored(handler, sig);
81}
82
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070083static int sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070084{
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 /*
86 * Blocked signals are never ignored, since the
87 * signal handler may change by the time it is
88 * unblocked.
89 */
Roland McGrath325d22d2007-11-12 15:41:55 -080090 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 return 0;
92
Oleg Nesterov1453b3a2017-11-17 15:30:01 -080093 /*
94 * Tracers may want to know about even ignored signal unless it
95 * is SIGKILL which can't be reported anyway but can be ignored
96 * by SIGNAL_UNKILLABLE task.
97 */
98 if (t->ptrace && sig != SIGKILL)
Roland McGrath35de2542008-07-25 19:45:51 -070099 return 0;
100
Oleg Nesterov1453b3a2017-11-17 15:30:01 -0800101 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
103
104/*
105 * Re-calculate pending state from the set of locally pending
106 * signals, globally pending signals, and blocked signals.
107 */
108static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
109{
110 unsigned long ready;
111 long i;
112
113 switch (_NSIG_WORDS) {
114 default:
115 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
116 ready |= signal->sig[i] &~ blocked->sig[i];
117 break;
118
119 case 4: ready = signal->sig[3] &~ blocked->sig[3];
120 ready |= signal->sig[2] &~ blocked->sig[2];
121 ready |= signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
123 break;
124
125 case 2: ready = signal->sig[1] &~ blocked->sig[1];
126 ready |= signal->sig[0] &~ blocked->sig[0];
127 break;
128
129 case 1: ready = signal->sig[0] &~ blocked->sig[0];
130 }
131 return ready != 0;
132}
133
134#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
135
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700136static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200138 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700140 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700142 return 1;
143 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700144 /*
145 * We must never clear the flag in another thread, or in current
146 * when it's possible the current syscall is returning -ERESTART*.
147 * So we don't clear it here, and only callers who know they should do.
148 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700149 return 0;
150}
151
152/*
153 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
154 * This is superfluous when called on current, the wakeup is a harmless no-op.
155 */
156void recalc_sigpending_and_wake(struct task_struct *t)
157{
158 if (recalc_sigpending_tsk(t))
159 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
162void recalc_sigpending(void)
163{
Tejun Heodd1d6772011-06-02 11:14:00 +0200164 if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700165 clear_thread_flag(TIF_SIGPENDING);
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
169/* Given the mask, find the first available signal that should be serviced. */
170
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800171#define SYNCHRONOUS_MASK \
172 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500173 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800174
Davide Libenzifba2afa2007-05-10 22:23:13 -0700175int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 unsigned long i, *s, *m, x;
178 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 s = pending->signal.sig;
181 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800182
183 /*
184 * Handle the first word specially: it contains the
185 * synchronous signals that need to be dequeued first.
186 */
187 x = *s &~ *m;
188 if (x) {
189 if (x & SYNCHRONOUS_MASK)
190 x &= SYNCHRONOUS_MASK;
191 sig = ffz(~x) + 1;
192 return sig;
193 }
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 switch (_NSIG_WORDS) {
196 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800197 for (i = 1; i < _NSIG_WORDS; ++i) {
198 x = *++s &~ *++m;
199 if (!x)
200 continue;
201 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 break;
205
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800206 case 2:
207 x = s[1] &~ m[1];
208 if (!x)
209 break;
210 sig = ffz(~x) + _NSIG_BPW + 1;
211 break;
212
213 case 1:
214 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 break;
216 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 return sig;
219}
220
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900221static inline void print_dropped_signal(int sig)
222{
223 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
224
225 if (!print_fatal_signals)
226 return;
227
228 if (!__ratelimit(&ratelimit_state))
229 return;
230
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700231 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900232 current->comm, current->pid, sig);
233}
234
Tejun Heoe5c19022011-03-23 10:37:00 +0100235/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200236 * task_set_jobctl_pending - set jobctl pending bits
237 * @task: target task
238 * @mask: pending bits to set
239 *
240 * Clear @mask from @task->jobctl. @mask must be subset of
241 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
242 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
243 * cleared. If @task is already being killed or exiting, this function
244 * becomes noop.
245 *
246 * CONTEXT:
247 * Must be called with @task->sighand->siglock held.
248 *
249 * RETURNS:
250 * %true if @mask is set, %false if made noop because @task was dying.
251 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700252bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200253{
254 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
255 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
256 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
257
258 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
259 return false;
260
261 if (mask & JOBCTL_STOP_SIGMASK)
262 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
263
264 task->jobctl |= mask;
265 return true;
266}
267
268/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200269 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100270 * @task: target task
271 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200272 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
273 * Clear it and wake up the ptracer. Note that we don't need any further
274 * locking. @task->siglock guarantees that @task->parent points to the
275 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100276 *
277 * CONTEXT:
278 * Must be called with @task->sighand->siglock held.
279 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200280void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100281{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200282 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
283 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700284 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200285 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100286 }
287}
288
289/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200290 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c19022011-03-23 10:37:00 +0100291 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200292 * @mask: pending bits to clear
Tejun Heoe5c19022011-03-23 10:37:00 +0100293 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200294 * Clear @mask from @task->jobctl. @mask must be subset of
295 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
296 * STOP bits are cleared together.
Tejun Heoe5c19022011-03-23 10:37:00 +0100297 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200298 * If clearing of @mask leaves no stop or trap pending, this function calls
299 * task_clear_jobctl_trapping().
Tejun Heoe5c19022011-03-23 10:37:00 +0100300 *
301 * CONTEXT:
302 * Must be called with @task->sighand->siglock held.
303 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700304void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c19022011-03-23 10:37:00 +0100305{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200306 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
307
308 if (mask & JOBCTL_STOP_PENDING)
309 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
310
311 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200312
313 if (!(task->jobctl & JOBCTL_PENDING_MASK))
314 task_clear_jobctl_trapping(task);
Tejun Heoe5c19022011-03-23 10:37:00 +0100315}
316
317/**
318 * task_participate_group_stop - participate in a group stop
319 * @task: task participating in a group stop
320 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200321 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100322 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200323 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100324 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c19022011-03-23 10:37:00 +0100325 *
326 * CONTEXT:
327 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100328 *
329 * RETURNS:
330 * %true if group stop completion should be notified to the parent, %false
331 * otherwise.
Tejun Heoe5c19022011-03-23 10:37:00 +0100332 */
333static bool task_participate_group_stop(struct task_struct *task)
334{
335 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200336 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c19022011-03-23 10:37:00 +0100337
Tejun Heoa8f072c2011-06-02 11:13:59 +0200338 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100339
Tejun Heo3759a0d2011-06-02 11:14:00 +0200340 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c19022011-03-23 10:37:00 +0100341
342 if (!consume)
343 return false;
344
345 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
346 sig->group_stop_count--;
347
Tejun Heo244056f2011-03-23 10:37:01 +0100348 /*
349 * Tell the caller to notify completion iff we are entering into a
350 * fresh group stop. Read comment in do_signal_stop() for details.
351 */
352 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles916a05b2017-01-10 16:57:54 -0800353 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c19022011-03-23 10:37:00 +0100354 return true;
355 }
356 return false;
357}
358
David Howellsc69e8d92008-11-14 10:39:19 +1100359/*
360 * allocate a new signal queue record
361 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700362 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100363 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900364static struct sigqueue *
365__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800368 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800370 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000371 * Protect access to @t credentials. This can go away when all
372 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800373 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000374 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100375 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800376 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000377 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800380 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800381 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900383 } else {
384 print_dropped_signal(sig);
385 }
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800388 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100389 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 } else {
391 INIT_LIST_HEAD(&q->list);
392 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100393 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
David Howellsd84f4f92008-11-14 10:39:23 +1100395
396 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
Andrew Morton514a01b2006-02-03 03:04:41 -0800399static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400{
401 if (q->flags & SIGQUEUE_PREALLOC)
402 return;
403 atomic_dec(&q->user->sigpending);
404 free_uid(q->user);
405 kmem_cache_free(sigqueue_cachep, q);
406}
407
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800408void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
410 struct sigqueue *q;
411
412 sigemptyset(&queue->signal);
413 while (!list_empty(&queue->list)) {
414 q = list_entry(queue->list.next, struct sigqueue , list);
415 list_del_init(&q->list);
416 __sigqueue_free(q);
417 }
418}
419
420/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400421 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800423void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400428 clear_tsk_thread_flag(t, TIF_SIGPENDING);
429 flush_sigqueue(&t->pending);
430 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 spin_unlock_irqrestore(&t->sighand->siglock, flags);
432}
433
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400434static void __flush_itimer_signals(struct sigpending *pending)
435{
436 sigset_t signal, retain;
437 struct sigqueue *q, *n;
438
439 signal = pending->signal;
440 sigemptyset(&retain);
441
442 list_for_each_entry_safe(q, n, &pending->list, list) {
443 int sig = q->info.si_signo;
444
445 if (likely(q->info.si_code != SI_TIMER)) {
446 sigaddset(&retain, sig);
447 } else {
448 sigdelset(&signal, sig);
449 list_del_init(&q->list);
450 __sigqueue_free(q);
451 }
452 }
453
454 sigorsets(&pending->signal, &signal, &retain);
455}
456
457void flush_itimer_signals(void)
458{
459 struct task_struct *tsk = current;
460 unsigned long flags;
461
462 spin_lock_irqsave(&tsk->sighand->siglock, flags);
463 __flush_itimer_signals(&tsk->pending);
464 __flush_itimer_signals(&tsk->signal->shared_pending);
465 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
466}
467
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700468void ignore_signals(struct task_struct *t)
469{
470 int i;
471
472 for (i = 0; i < _NSIG; ++i)
473 t->sighand->action[i].sa.sa_handler = SIG_IGN;
474
475 flush_signals(t);
476}
477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 * Flush all handlers for a task.
480 */
481
482void
483flush_signal_handlers(struct task_struct *t, int force_default)
484{
485 int i;
486 struct k_sigaction *ka = &t->sighand->action[0];
487 for (i = _NSIG ; i != 0 ; i--) {
488 if (force_default || ka->sa.sa_handler != SIG_IGN)
489 ka->sa.sa_handler = SIG_DFL;
490 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700491#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700492 ka->sa.sa_restorer = NULL;
493#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 sigemptyset(&ka->sa.sa_mask);
495 ka++;
496 }
497}
498
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200499int unhandled_signal(struct task_struct *tsk, int sig)
500{
Roland McGrath445a91d2008-07-25 19:45:52 -0700501 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700502 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200503 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700504 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200505 return 0;
Tejun Heoa288eec2011-06-17 16:50:37 +0200506 /* if ptraced, let the tracer determine */
507 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200508}
509
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500510static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
511 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
513 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 /*
516 * Collect the siginfo appropriate to this signal. Check if
517 * there is another siginfo for the same signal.
518 */
519 list_for_each_entry(q, &list->list, list) {
520 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700521 if (first)
522 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 first = q;
524 }
525 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700526
527 sigdelset(&list->signal, sig);
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700530still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 list_del_init(&first->list);
532 copy_siginfo(info, &first->info);
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500533
534 *resched_timer =
535 (first->flags & SIGQUEUE_PREALLOC) &&
536 (info->si_code == SI_TIMER) &&
537 (info->si_sys_private);
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700541 /*
542 * Ok, it wasn't in the queue. This must be
543 * a fast-pathed signal or we must have been
544 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 info->si_signo = sig;
547 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800548 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 info->si_pid = 0;
550 info->si_uid = 0;
551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552}
553
554static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500555 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556{
Roland McGrath27d91e02006-09-29 02:00:31 -0700557 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800559 if (sig)
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500560 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 return sig;
562}
563
564/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700565 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 * expected to free it.
567 *
568 * All callers have to hold the siglock.
569 */
570int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
571{
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500572 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700573 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000574
575 /* We only dequeue private signals from ourselves, we don't let
576 * signalfd steal them
577 */
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500578 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800579 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500581 mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800582 /*
583 * itimer signal ?
584 *
585 * itimers are process shared and we restart periodic
586 * itimers in the signal delivery path to prevent DoS
587 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700588 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800589 * itimers, as the SIGALRM is a legacy signal and only
590 * queued once. Changing the restart behaviour to
591 * restart the timer in the signal dequeue path is
592 * reducing the timer noise on heavy loaded !highres
593 * systems too.
594 */
595 if (unlikely(signr == SIGALRM)) {
596 struct hrtimer *tmr = &tsk->signal->real_timer;
597
598 if (!hrtimer_is_queued(tmr) &&
599 tsk->signal->it_real_incr.tv64 != 0) {
600 hrtimer_forward(tmr, tmr->base->get_time(),
601 tsk->signal->it_real_incr);
602 hrtimer_restart(tmr);
603 }
604 }
605 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700606
Davide Libenzib8fceee2007-09-20 12:40:16 -0700607 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700608 if (!signr)
609 return 0;
610
611 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800612 /*
613 * Set a marker that we have dequeued a stop signal. Our
614 * caller might release the siglock and then the pending
615 * stop signal it is about to process is no longer in the
616 * pending bitmasks, but must still be cleared by a SIGCONT
617 * (and overruled by a SIGKILL). So those cases clear this
618 * shared flag after we've set it. Note that this flag may
619 * remain set after the signal we return is ignored or
620 * handled. That doesn't matter because its only purpose
621 * is to alert stop-signal processing code when another
622 * processor has come along and cleared the flag.
623 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200624 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800625 }
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500626 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 /*
628 * Release the siglock to ensure proper locking order
629 * of timer locks outside of siglocks. Note, we leave
630 * irqs disabled here, since the posix-timers code is
631 * about to disable them again anyway.
632 */
633 spin_unlock(&tsk->sighand->siglock);
634 do_schedule_next_timer(info);
635 spin_lock(&tsk->sighand->siglock);
636 }
637 return signr;
638}
639
640/*
641 * Tell a process that it has a new active signal..
642 *
643 * NOTE! we rely on the previous spin_lock to
644 * lock interrupts for us! We can only be called with
645 * "siglock" held, and the local interrupt must
646 * have been disabled when that got acquired!
647 *
648 * No need to set need_resched since signal event passing
649 * goes through ->blocked
650 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100651void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100655 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500656 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 * executing another processor and just now entering stopped state.
658 * By using wake_up_state, we ensure the process will wake up and
659 * handle its death signal.
660 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100661 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 kick_process(t);
663}
664
665/*
666 * Remove signals in mask from the pending set and queue.
667 * Returns 1 if any signals were found.
668 *
669 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800670 */
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700671static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd52006-01-08 01:02:48 -0800672{
673 struct sigqueue *q, *n;
674 sigset_t m;
675
676 sigandsets(&m, mask, &s->signal);
677 if (sigisemptyset(&m))
678 return 0;
679
Oleg Nesterov702a5072011-04-27 22:01:27 +0200680 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd52006-01-08 01:02:48 -0800681 list_for_each_entry_safe(q, n, &s->list, list) {
682 if (sigismember(mask, q->info.si_signo)) {
683 list_del_init(&q->list);
684 __sigqueue_free(q);
685 }
686 }
687 return 1;
688}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Oleg Nesterov614c5172009-12-15 16:47:22 -0800690static inline int is_si_special(const struct siginfo *info)
691{
692 return info <= SEND_SIG_FORCED;
693}
694
695static inline bool si_fromuser(const struct siginfo *info)
696{
697 return info == SEND_SIG_NOINFO ||
698 (!is_si_special(info) && SI_FROMUSER(info));
699}
700
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -0600701static int dequeue_synchronous_signal(siginfo_t *info)
702{
703 struct task_struct *tsk = current;
704 struct sigpending *pending = &tsk->pending;
705 struct sigqueue *q, *sync = NULL;
706
707 /*
708 * Might a synchronous signal be in the queue?
709 */
710 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
711 return 0;
712
713 /*
714 * Return the first synchronous signal in the queue.
715 */
716 list_for_each_entry(q, &pending->list, list) {
717 /* Synchronous signals have a postive si_code */
718 if ((q->info.si_code > SI_USER) &&
719 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
720 sync = q;
721 goto next;
722 }
723 }
724 return 0;
725next:
726 /*
727 * Check if there is another siginfo for the same signal.
728 */
729 list_for_each_entry_continue(q, &pending->list, list) {
730 if (q->info.si_signo == sync->info.si_signo)
731 goto still_pending;
732 }
733
734 sigdelset(&pending->signal, sync->info.si_signo);
735 recalc_sigpending();
736still_pending:
737 list_del_init(&sync->list);
738 copy_siginfo(info, &sync->info);
739 __sigqueue_free(sync);
740 return info->si_signo;
741}
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700744 * called with RCU read lock from check_kill_permission()
745 */
746static int kill_ok_by_cred(struct task_struct *t)
747{
748 const struct cred *cred = current_cred();
749 const struct cred *tcred = __task_cred(t);
750
Eric W. Biederman5af66202012-03-03 20:21:47 -0800751 if (uid_eq(cred->euid, tcred->suid) ||
752 uid_eq(cred->euid, tcred->uid) ||
753 uid_eq(cred->uid, tcred->suid) ||
754 uid_eq(cred->uid, tcred->uid))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700755 return 1;
756
Eric W. Biedermanc4a4d602011-11-16 23:15:31 -0800757 if (ns_capable(tcred->user_ns, CAP_KILL))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700758 return 1;
759
760 return 0;
761}
762
763/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100765 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 */
767static int check_kill_permission(int sig, struct siginfo *info,
768 struct task_struct *t)
769{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700770 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700771 int error;
772
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700773 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700774 return -EINVAL;
775
Oleg Nesterov614c5172009-12-15 16:47:22 -0800776 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700777 return 0;
778
779 error = audit_signal_info(sig, t); /* Let audit system see the signal */
780 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400782
Oleg Nesterov065add32010-05-26 14:42:54 -0700783 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700784 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700785 switch (sig) {
786 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700787 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700788 /*
789 * We don't return the error if sid == NULL. The
790 * task was unhashed, the caller must notice this.
791 */
792 if (!sid || sid == task_session(current))
793 break;
794 default:
795 return -EPERM;
796 }
797 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100798
Amy Griffise54dc242007-03-29 18:01:04 -0400799 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
Tejun Heofb1d9102011-06-14 11:20:17 +0200802/**
803 * ptrace_trap_notify - schedule trap to notify ptracer
804 * @t: tracee wanting to notify tracer
805 *
806 * This function schedules sticky ptrace trap which is cleared on the next
807 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
808 * ptracer.
809 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200810 * If @t is running, STOP trap will be taken. If trapped for STOP and
811 * ptracer is listening for events, tracee is woken up so that it can
812 * re-trap for the new event. If trapped otherwise, STOP trap will be
813 * eventually taken without returning to userland after the existing traps
814 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200815 *
816 * CONTEXT:
817 * Must be called with @task->sighand->siglock held.
818 */
819static void ptrace_trap_notify(struct task_struct *t)
820{
821 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
822 assert_spin_locked(&t->sighand->siglock);
823
824 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100825 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200826}
827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700829 * Handle magic process-wide effects of stop/continue signals. Unlike
830 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 * time regardless of blocking, ignoring, or handling. This does the
832 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700833 * signals. The process stop is done as a signal action for SIG_DFL.
834 *
835 * Returns true if the signal should be actually delivered, otherwise
836 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700838static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700840 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700842 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Oleg Nesterov403bad72013-04-30 15:28:10 -0700844 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800845 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700846 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700848 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700850 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /*
852 * This is a stop signal. Remove SIGCONT from all queues.
853 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700854 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700855 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700856 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700857 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700859 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200861 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700863 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700864 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700865 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700866 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200867 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200868 if (likely(!(t->ptrace & PT_SEIZED)))
869 wake_up_state(t, __TASK_STOPPED);
870 else
871 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700874 /*
875 * Notify the parent with CLD_CONTINUED if we were stopped.
876 *
877 * If we were in the middle of a group stop, we pretend it
878 * was already finished, and then continued. Since SIGCHLD
879 * doesn't queue we report only CLD_STOPPED, as if the next
880 * CLD_CONTINUED was dropped.
881 */
882 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700883 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700884 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700885 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700886 why |= SIGNAL_CLD_STOPPED;
887
888 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700889 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700890 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700891 * will take ->siglock, notice SIGNAL_CLD_MASK, and
892 * notify its parent. See get_signal_to_deliver().
893 */
Jamie Iles916a05b2017-01-10 16:57:54 -0800894 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700895 signal->group_stop_count = 0;
896 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700899
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700900 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
902
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700903/*
904 * Test if P wants to take SIG. After we've checked all threads with this,
905 * it's equivalent to finding no threads not blocking SIG. Any threads not
906 * blocking SIG were ruled out because they are not running and already
907 * have pending signals. Such threads will dequeue from the shared queue
908 * as soon as they're available, so putting the signal on the shared queue
909 * will be equivalent to sending it to one such thread.
910 */
911static inline int wants_signal(int sig, struct task_struct *p)
912{
913 if (sigismember(&p->blocked, sig))
914 return 0;
915 if (p->flags & PF_EXITING)
916 return 0;
917 if (sig == SIGKILL)
918 return 1;
919 if (task_is_stopped_or_traced(p))
920 return 0;
921 return task_curr(p) || !signal_pending(p);
922}
923
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700924static void complete_signal(int sig, struct task_struct *p, int group)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700925{
926 struct signal_struct *signal = p->signal;
927 struct task_struct *t;
928
929 /*
930 * Now find a thread we can wake up to take the signal off the queue.
931 *
932 * If the main thread wants the signal, it gets first crack.
933 * Probably the least surprising to the average bear.
934 */
935 if (wants_signal(sig, p))
936 t = p;
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700937 else if (!group || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700938 /*
939 * There is just one thread and it does not need to be woken.
940 * It will dequeue unblocked signals before it runs again.
941 */
942 return;
943 else {
944 /*
945 * Otherwise try to find a suitable thread.
946 */
947 t = signal->curr_target;
948 while (!wants_signal(sig, t)) {
949 t = next_thread(t);
950 if (t == signal->curr_target)
951 /*
952 * No thread needs to be woken.
953 * Any eligible threads will see
954 * the signal in the queue soon.
955 */
956 return;
957 }
958 signal->curr_target = t;
959 }
960
961 /*
962 * Found a killable thread. If the signal will be fatal,
963 * then start taking the whole group down immediately.
964 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700965 if (sig_fatal(p, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800966 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700967 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800968 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700969 /*
970 * This signal will be fatal to the whole group.
971 */
972 if (!sig_kernel_coredump(sig)) {
973 /*
974 * Start a group exit and wake everybody up.
975 * This way we don't have other threads
976 * running and doing things after a slower
977 * thread has the fatal signal pending.
978 */
979 signal->flags = SIGNAL_GROUP_EXIT;
980 signal->group_exit_code = sig;
981 signal->group_stop_count = 0;
982 t = p;
983 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200984 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700985 sigaddset(&t->pending.signal, SIGKILL);
986 signal_wake_up(t, 1);
987 } while_each_thread(p, t);
988 return;
989 }
990 }
991
992 /*
993 * The signal is already in the shared-pending queue.
994 * Tell the chosen thread to wake up and dequeue it.
995 */
996 signal_wake_up(t, sig == SIGKILL);
997 return;
998}
999
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001000static inline int legacy_queue(struct sigpending *signals, int sig)
1001{
1002 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1003}
1004
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001005#ifdef CONFIG_USER_NS
1006static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1007{
1008 if (current_user_ns() == task_cred_xxx(t, user_ns))
1009 return;
1010
1011 if (SI_FROMKERNEL(info))
1012 return;
1013
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001014 rcu_read_lock();
1015 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1016 make_kuid(current_user_ns(), info->si_uid));
1017 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001018}
1019#else
1020static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1021{
1022 return;
1023}
1024#endif
1025
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001026static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1027 int group, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001029 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001030 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001031 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001032 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001033
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001034 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001035
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001036 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001037 if (!prepare_signal(sig, t,
Eric W. Biedermanba277fe2018-09-03 20:02:46 +02001038 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001039 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001040
1041 pending = group ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001043 * Short-circuit ignored signals and support queuing
1044 * exactly one non-rt signal, so that we can get more
1045 * detailed information about the cause of the signal.
1046 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001047 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001048 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001049 goto ret;
1050
1051 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001052 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 * fast-pathed signals for kernel-internal things like SIGSTOP
1054 * or SIGKILL.
1055 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001056 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 goto out_set;
1058
Randy Dunlap5aba0852011-04-04 14:59:31 -07001059 /*
1060 * Real-time signals must be queued if sent by sigqueue, or
1061 * some other real-time mechanism. It is implementation
1062 * defined whether kill() does so. We attempt to do so, on
1063 * the principle of least surprise, but since kill is not
1064 * allowed to fail with EAGAIN when low on memory we just
1065 * make sure at least one signal gets delivered and don't
1066 * pass on the info struct.
1067 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001068 if (sig < SIGRTMIN)
1069 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1070 else
1071 override_rlimit = 0;
1072
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001073 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001074 override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001076 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001078 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 q->info.si_signo = sig;
1080 q->info.si_errno = 0;
1081 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001082 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001083 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001084 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001086 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 q->info.si_signo = sig;
1088 q->info.si_errno = 0;
1089 q->info.si_code = SI_KERNEL;
1090 q->info.si_pid = 0;
1091 q->info.si_uid = 0;
1092 break;
1093 default:
1094 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001095 if (from_ancestor_ns)
1096 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 break;
1098 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001099
1100 userns_fixup_signal_uid(&q->info, t);
1101
Oleg Nesterov621d3122005-10-30 15:03:45 -08001102 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001103 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1104 /*
1105 * Queue overflow, abort. We may abort if the
1106 * signal was rt and sent by user using something
1107 * other than kill().
1108 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001109 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1110 ret = -EAGAIN;
1111 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001112 } else {
1113 /*
1114 * This is a silent loss of information. We still
1115 * send the signal, but the *info bits are lost.
1116 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001117 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
1120
1121out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001122 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001123 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001124 complete_signal(sig, t, group);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001125ret:
1126 trace_signal_generate(sig, info, t, group, result);
1127 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001130static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1131 int group)
1132{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001133 int from_ancestor_ns = 0;
1134
1135#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001136 from_ancestor_ns = si_fromuser(info) &&
1137 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001138#endif
1139
1140 return __send_signal(sig, info, t, group, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001141}
1142
Al Viro4aaefee2012-11-05 13:09:56 -05001143static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001144{
Al Viro4aaefee2012-11-05 13:09:56 -05001145 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001146 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001147
Al Viroca5cd872007-10-29 04:31:16 +00001148#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001149 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001150 {
1151 int i;
1152 for (i = 0; i < 16; i++) {
1153 unsigned char insn;
1154
Andi Kleenb45c6e72010-01-08 14:42:52 -08001155 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1156 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001157 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001158 }
1159 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001160 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001161#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001162 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001163 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001164 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001165}
1166
1167static int __init setup_print_fatal_signals(char *str)
1168{
1169 get_option (&str, &print_fatal_signals);
1170
1171 return 1;
1172}
1173
1174__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001176int
1177__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1178{
1179 return send_signal(sig, info, p, 1);
1180}
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182static int
1183specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1184{
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001185 return send_signal(sig, info, t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186}
1187
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001188int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1189 bool group)
1190{
1191 unsigned long flags;
1192 int ret = -ESRCH;
1193
1194 if (lock_task_sighand(p, &flags)) {
1195 ret = send_signal(sig, info, p, group);
1196 unlock_task_sighand(p, &flags);
1197 }
1198
1199 return ret;
1200}
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202/*
1203 * Force a signal that the process can't ignore: if necessary
1204 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001205 *
1206 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1207 * since we do not want to have a signal handler that was blocked
1208 * be invoked when user space had explicitly blocked it.
1209 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001210 * We don't want to have recursive SIGSEGV's etc, for example,
1211 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213int
1214force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1215{
1216 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001217 int ret, blocked, ignored;
1218 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001221 action = &t->sighand->action[sig-1];
1222 ignored = action->sa.sa_handler == SIG_IGN;
1223 blocked = sigismember(&t->blocked, sig);
1224 if (blocked || ignored) {
1225 action->sa.sa_handler = SIG_DFL;
1226 if (blocked) {
1227 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001228 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 }
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001231 if (action->sa.sa_handler == SIG_DFL)
1232 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 ret = specific_send_sig_info(sig, info, t);
1234 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1235
1236 return ret;
1237}
1238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239/*
1240 * Nuke all other threads in the group.
1241 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001242int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001244 struct task_struct *t = p;
1245 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 p->signal->group_stop_count = 0;
1248
Oleg Nesterov09faef12010-05-26 14:43:11 -07001249 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001250 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001251 count++;
1252
1253 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 if (t->exit_state)
1255 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 signal_wake_up(t, 1);
1258 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001259
1260 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261}
1262
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001263struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1264 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001265{
1266 struct sighand_struct *sighand;
1267
1268 for (;;) {
Paul E. McKenneyc41247e2014-05-05 08:18:30 -07001269 /*
1270 * Disable interrupts early to avoid deadlocks.
1271 * See rcu_read_unlock() comment header for details.
1272 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001273 local_irq_save(*flags);
1274 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001275 sighand = rcu_dereference(tsk->sighand);
Paul E. McKenneya8417962011-07-19 03:25:36 -07001276 if (unlikely(sighand == NULL)) {
1277 rcu_read_unlock();
1278 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001279 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001280 }
Oleg Nesterov392809b2014-09-28 23:44:18 +02001281 /*
1282 * This sighand can be already freed and even reused, but
1283 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1284 * initializes ->siglock: this slab can't go away, it has
1285 * the same object type, ->siglock can't be reinitialized.
1286 *
1287 * We need to ensure that tsk->sighand is still the same
1288 * after we take the lock, we can race with de_thread() or
1289 * __exit_signal(). In the latter case the next iteration
1290 * must see ->sighand == NULL.
1291 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001292 spin_lock(&sighand->siglock);
1293 if (likely(sighand == tsk->sighand)) {
1294 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001295 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001296 }
1297 spin_unlock(&sighand->siglock);
1298 rcu_read_unlock();
1299 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001300 }
1301
1302 return sighand;
1303}
1304
David Howellsc69e8d92008-11-14 10:39:19 +11001305/*
1306 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001307 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1309{
David Howells694f6902010-08-04 16:59:14 +01001310 int ret;
1311
1312 rcu_read_lock();
1313 ret = check_kill_permission(sig, info, p);
1314 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001316 if (!ret && sig)
1317 ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 return ret;
1320}
1321
1322/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001323 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001325 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001327int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328{
1329 struct task_struct *p = NULL;
1330 int retval, success;
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 success = 0;
1333 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001334 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 int err = group_send_sig_info(sig, info, p);
1336 success |= !err;
1337 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001338 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 return success ? 0 : retval;
1340}
1341
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001342int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001344 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 struct task_struct *p;
1346
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001347 for (;;) {
1348 rcu_read_lock();
1349 p = pid_task(pid, PIDTYPE_PID);
1350 if (p)
1351 error = group_send_sig_info(sig, info, p);
1352 rcu_read_unlock();
1353 if (likely(!p || error != -ESRCH))
1354 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001355
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001356 /*
1357 * The task was unhashed in between, try again. If it
1358 * is dead, pid_task() will return NULL, if we race with
1359 * de_thread() it will find the new leader.
1360 */
1361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
1363
Randy Dunlap5aba0852011-04-04 14:59:31 -07001364int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001365{
1366 int error;
1367 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001368 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001369 rcu_read_unlock();
1370 return error;
1371}
1372
Serge Hallynd178bc32011-09-26 10:45:18 -05001373static int kill_as_cred_perm(const struct cred *cred,
1374 struct task_struct *target)
1375{
1376 const struct cred *pcred = __task_cred(target);
Eric W. Biederman5af66202012-03-03 20:21:47 -08001377 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1378 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
Serge Hallynd178bc32011-09-26 10:45:18 -05001379 return 0;
1380 return 1;
1381}
1382
Eric W. Biederman2425c082006-10-02 02:17:28 -07001383/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001384int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1385 const struct cred *cred, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001386{
1387 int ret = -EINVAL;
1388 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001389 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001390
1391 if (!valid_signal(sig))
1392 return ret;
1393
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001394 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001395 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001396 if (!p) {
1397 ret = -ESRCH;
1398 goto out_unlock;
1399 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001400 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001401 ret = -EPERM;
1402 goto out_unlock;
1403 }
David Quigley8f95dc52006-06-30 01:55:47 -07001404 ret = security_task_kill(p, info, sig, secid);
1405 if (ret)
1406 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001407
1408 if (sig) {
1409 if (lock_task_sighand(p, &flags)) {
1410 ret = __send_signal(sig, info, p, 1, 0);
1411 unlock_task_sighand(p, &flags);
1412 } else
1413 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001414 }
1415out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001416 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001417 return ret;
1418}
Serge Hallynd178bc32011-09-26 10:45:18 -05001419EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421/*
1422 * kill_something_info() interprets pid in interesting ways just like kill(2).
1423 *
1424 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1425 * is probably wrong. Should make it like BSD or SYSV.
1426 */
1427
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001428static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001430 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001431
1432 if (pid > 0) {
1433 rcu_read_lock();
1434 ret = kill_pid_info(sig, info, find_vpid(pid));
1435 rcu_read_unlock();
1436 return ret;
1437 }
1438
zhongjiangec1975a2017-07-10 15:52:57 -07001439 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1440 if (pid == INT_MIN)
1441 return -ESRCH;
1442
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001443 read_lock(&tasklist_lock);
1444 if (pid != -1) {
1445 ret = __kill_pgrp_info(sig, info,
1446 pid ? find_vpid(-pid) : task_pgrp(current));
1447 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 int retval = 0, count = 0;
1449 struct task_struct * p;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001452 if (task_pid_vnr(p) > 1 &&
1453 !same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 int err = group_send_sig_info(sig, info, p);
1455 ++count;
1456 if (err != -EPERM)
1457 retval = err;
1458 }
1459 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001460 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001462 read_unlock(&tasklist_lock);
1463
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001464 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465}
1466
1467/*
1468 * These are for backward compatibility with the rest of the kernel source.
1469 */
1470
Randy Dunlap5aba0852011-04-04 14:59:31 -07001471int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 /*
1474 * Make sure legacy kernel users don't send in bad values
1475 * (normal paths check this in check_kill_permission).
1476 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001477 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 return -EINVAL;
1479
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001480 return do_send_sig_info(sig, info, p, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
1482
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001483#define __si_special(priv) \
1484 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486int
1487send_sig(int sig, struct task_struct *p, int priv)
1488{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001489 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490}
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492void
1493force_sig(int sig, struct task_struct *p)
1494{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001495 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496}
1497
1498/*
1499 * When things go south during signal handling, we
1500 * will force a SIGSEGV. And if the signal that caused
1501 * the problem was already a SIGSEGV, we'll want to
1502 * make sure we don't even try to deliver the signal..
1503 */
1504int
1505force_sigsegv(int sig, struct task_struct *p)
1506{
1507 if (sig == SIGSEGV) {
1508 unsigned long flags;
1509 spin_lock_irqsave(&p->sighand->siglock, flags);
1510 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1511 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1512 }
1513 force_sig(SIGSEGV, p);
1514 return 0;
1515}
1516
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001517int kill_pgrp(struct pid *pid, int sig, int priv)
1518{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001519 int ret;
1520
1521 read_lock(&tasklist_lock);
1522 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1523 read_unlock(&tasklist_lock);
1524
1525 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001526}
1527EXPORT_SYMBOL(kill_pgrp);
1528
1529int kill_pid(struct pid *pid, int sig, int priv)
1530{
1531 return kill_pid_info(sig, __si_special(priv), pid);
1532}
1533EXPORT_SYMBOL(kill_pid);
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535/*
1536 * These functions support sending signals using preallocated sigqueue
1537 * structures. This is needed "because realtime applications cannot
1538 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001539 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 * we allocate the sigqueue structure from the timer_create. If this
1541 * allocation fails we are able to report the failure to the application
1542 * with an EAGAIN error.
1543 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544struct sigqueue *sigqueue_alloc(void)
1545{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001546 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001548 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001550
1551 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552}
1553
1554void sigqueue_free(struct sigqueue *q)
1555{
1556 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001557 spinlock_t *lock = &current->sighand->siglock;
1558
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1560 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001561 * We must hold ->siglock while testing q->list
1562 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001563 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001565 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001566 q->flags &= ~SIGQUEUE_PREALLOC;
1567 /*
1568 * If it is queued it will be freed when dequeued,
1569 * like the "regular" sigqueue.
1570 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001571 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001572 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001573 spin_unlock_irqrestore(lock, flags);
1574
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001575 if (q)
1576 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577}
1578
Oleg Nesterovac5c2152008-04-30 00:52:57 -07001579int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001580{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001581 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001582 struct sigpending *pending;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001583 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001584 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001585
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001586 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001587
1588 ret = -1;
1589 if (!likely(lock_task_sighand(t, &flags)))
1590 goto ret;
1591
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001592 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001593 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001594 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001595 goto out;
1596
1597 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001598 if (unlikely(!list_empty(&q->list))) {
1599 /*
1600 * If an SI_TIMER entry is already queue just increment
1601 * the overrun count.
1602 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001603 BUG_ON(q->info.si_code != SI_TIMER);
1604 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001605 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001606 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001607 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001608 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001609
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001610 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001611 pending = group ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001612 list_add_tail(&q->list, &pending->list);
1613 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001614 complete_signal(sig, t, group);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001615 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001616out:
Oleg Nesterov163566f2011-11-22 21:37:41 +01001617 trace_signal_generate(sig, &q->info, t, group, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001618 unlock_task_sighand(t, &flags);
1619ret:
1620 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001621}
1622
Joel Fernandes (Google)af1070f2019-04-30 12:21:53 -04001623static void do_notify_pidfd(struct task_struct *task)
1624{
1625 struct pid *pid;
1626
1627 pid = task_pid(task);
1628 wake_up_all(&pid->wait_pidfd);
1629}
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 * Let a parent know about the death of a child.
1633 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001634 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001635 * Returns true if our parent ignored us and so we've switched to
1636 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001638bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639{
1640 struct siginfo info;
1641 unsigned long flags;
1642 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001643 bool autoreap = false;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001644 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
1646 BUG_ON(sig == -1);
1647
1648 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001649 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Tejun Heod21142e2011-06-17 16:50:34 +02001651 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1653
Joel Fernandes (Google)af1070f2019-04-30 12:21:53 -04001654 /* Wake up all pidfd waiters */
1655 do_notify_pidfd(tsk);
1656
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001657 if (sig != SIGCHLD) {
1658 /*
1659 * This is only possible if parent == real_parent.
1660 * Check if it has changed security domain.
1661 */
1662 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1663 sig = SIGCHLD;
1664 }
1665
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 info.si_signo = sig;
1667 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001668 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001669 * We are under tasklist_lock here so our parent is tied to
1670 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001671 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001672 * task_active_pid_ns will always return the same pid namespace
1673 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001674 *
1675 * write_lock() currently calls preempt_disable() which is the
1676 * same as rcu_read_lock(), but according to Oleg, this is not
1677 * correct to rely on this
1678 */
1679 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001680 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001681 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1682 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001683 rcu_read_unlock();
1684
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001685 task_cputime(tsk, &utime, &stime);
1686 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1687 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 info.si_status = tsk->exit_code & 0x7f;
1690 if (tsk->exit_code & 0x80)
1691 info.si_code = CLD_DUMPED;
1692 else if (tsk->exit_code & 0x7f)
1693 info.si_code = CLD_KILLED;
1694 else {
1695 info.si_code = CLD_EXITED;
1696 info.si_status = tsk->exit_code >> 8;
1697 }
1698
1699 psig = tsk->parent->sighand;
1700 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001701 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1703 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1704 /*
1705 * We are exiting and our parent doesn't care. POSIX.1
1706 * defines special semantics for setting SIGCHLD to SIG_IGN
1707 * or setting the SA_NOCLDWAIT flag: we should be reaped
1708 * automatically and not left for our parent's wait4 call.
1709 * Rather than having the parent do it as a magic kind of
1710 * signal handler, we just set this to tell do_exit that we
1711 * can be cleaned up without becoming a zombie. Note that
1712 * we still call __wake_up_parent in this case, because a
1713 * blocked sys_wait4 might now return -ECHILD.
1714 *
1715 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1716 * is implementation-defined: we do (if you don't want
1717 * it, just use SIG_IGN instead).
1718 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001719 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001721 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001723 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 __group_send_sig_info(sig, &info, tsk->parent);
1725 __wake_up_parent(tsk, tsk->parent);
1726 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001727
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001728 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729}
1730
Tejun Heo75b95952011-03-23 10:37:01 +01001731/**
1732 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1733 * @tsk: task reporting the state change
1734 * @for_ptracer: the notification is for ptracer
1735 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1736 *
1737 * Notify @tsk's parent that the stopped/continued state has changed. If
1738 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1739 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1740 *
1741 * CONTEXT:
1742 * Must be called with tasklist_lock at least read locked.
1743 */
1744static void do_notify_parent_cldstop(struct task_struct *tsk,
1745 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746{
1747 struct siginfo info;
1748 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001749 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 struct sighand_struct *sighand;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001751 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Tejun Heo75b95952011-03-23 10:37:01 +01001753 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001754 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001755 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001756 tsk = tsk->group_leader;
1757 parent = tsk->real_parent;
1758 }
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 info.si_signo = SIGCHLD;
1761 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001762 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001763 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001764 */
1765 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001766 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001767 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001768 rcu_read_unlock();
1769
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001770 task_cputime(tsk, &utime, &stime);
1771 info.si_utime = cputime_to_clock_t(utime);
1772 info.si_stime = cputime_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
1774 info.si_code = why;
1775 switch (why) {
1776 case CLD_CONTINUED:
1777 info.si_status = SIGCONT;
1778 break;
1779 case CLD_STOPPED:
1780 info.si_status = tsk->signal->group_exit_code & 0x7f;
1781 break;
1782 case CLD_TRAPPED:
1783 info.si_status = tsk->exit_code & 0x7f;
1784 break;
1785 default:
1786 BUG();
1787 }
1788
1789 sighand = parent->sighand;
1790 spin_lock_irqsave(&sighand->siglock, flags);
1791 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1792 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1793 __group_send_sig_info(SIGCHLD, &info, parent);
1794 /*
1795 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1796 */
1797 __wake_up_parent(tsk, parent);
1798 spin_unlock_irqrestore(&sighand->siglock, flags);
1799}
1800
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001801static inline int may_ptrace_stop(void)
1802{
Tejun Heod21142e2011-06-17 16:50:34 +02001803 if (!likely(current->ptrace))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001804 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001805 /*
1806 * Are we in the middle of do_coredump?
1807 * If so and our tracer is also part of the coredump stopping
1808 * is a deadlock situation, and pointless because our tracer
1809 * is dead so don't allow us to stop.
1810 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001811 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001812 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001813 *
1814 * This is almost outdated, a task with the pending SIGKILL can't
1815 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1816 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001817 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001818 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001819 unlikely(current->mm == current->parent->mm))
1820 return 0;
1821
1822 return 1;
1823}
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001826 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08001827 * Called with the siglock held.
1828 */
1829static int sigkill_pending(struct task_struct *tsk)
1830{
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001831 return sigismember(&tsk->pending.signal, SIGKILL) ||
1832 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001833}
1834
1835/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 * This must be called with current->sighand->siglock held.
1837 *
1838 * This should be the path for all ptrace stops.
1839 * We always set current->last_siginfo while stopped here.
1840 * That makes it a way to test a stopped process for
1841 * being ptrace-stopped vs being job-control-stopped.
1842 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001843 * If we actually decide not to stop at all because the tracer
1844 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001846static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001847 __releases(&current->sighand->siglock)
1848 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001850 bool gstop_done = false;
1851
Roland McGrath1a669c22008-02-06 01:37:37 -08001852 if (arch_ptrace_stop_needed(exit_code, info)) {
1853 /*
1854 * The arch code has something special to do before a
1855 * ptrace stop. This is allowed to block, e.g. for faults
1856 * on user stack pages. We can't keep the siglock while
1857 * calling arch_ptrace_stop, so we must release it now.
1858 * To preserve proper semantics, we must do this before
1859 * any signal bookkeeping like checking group_stop_count.
1860 * Meanwhile, a SIGKILL could come in before we retake the
1861 * siglock. That must prevent us from sleeping in TASK_TRACED.
1862 * So after regaining the lock, we must check for SIGKILL.
1863 */
1864 spin_unlock_irq(&current->sighand->siglock);
1865 arch_ptrace_stop(exit_code, info);
1866 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001867 if (sigkill_pending(current))
1868 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001869 }
1870
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02001872 * We're committing to trapping. TRACED should be visible before
1873 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1874 * Also, transition to TRACED and updates to ->jobctl should be
1875 * atomic with respect to siglock and should be done after the arch
1876 * hook as siglock is released and regrabbed across it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 */
Tejun Heo81be24b2011-06-02 11:13:59 +02001878 set_current_state(TASK_TRACED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
1880 current->last_siginfo = info;
1881 current->exit_code = exit_code;
1882
Tejun Heod79fdd62011-03-23 10:37:00 +01001883 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 * If @why is CLD_STOPPED, we're trapping to participate in a group
1885 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02001886 * across siglock relocks since INTERRUPT was scheduled, PENDING
1887 * could be clear now. We act as if SIGCONT is received after
1888 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01001889 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001890 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001892
Tejun Heofb1d9102011-06-14 11:20:17 +02001893 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02001894 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02001895 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1896 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02001897
Tejun Heo81be24b2011-06-02 11:13:59 +02001898 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001899 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001900
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 spin_unlock_irq(&current->sighand->siglock);
1902 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001903 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01001904 /*
1905 * Notify parents of the stop.
1906 *
1907 * While ptraced, there are two parents - the ptracer and
1908 * the real_parent of the group_leader. The ptracer should
1909 * know about every stop while the real parent is only
1910 * interested in the completion of group stop. The states
1911 * for the two don't interact with each other. Notify
1912 * separately unless they're gonna be duplicates.
1913 */
1914 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02001915 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01001916 do_notify_parent_cldstop(current, false, why);
1917
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001918 /*
1919 * Don't want to allow preemption here, because
1920 * sys_ptrace() needs this task to be inactive.
1921 *
1922 * XXX: implement read_unlock_no_resched().
1923 */
1924 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001926 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02001927 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 } else {
1929 /*
1930 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001931 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01001932 *
1933 * If @gstop_done, the ptracer went away between group stop
1934 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02001935 * JOBCTL_STOP_PENDING on us and we'll re-enter
1936 * TASK_STOPPED in do_signal_stop() on return, so notifying
1937 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01001939 if (gstop_done)
1940 do_notify_parent_cldstop(current, false, why);
1941
Oleg Nesterov9899d112013-01-21 20:48:00 +01001942 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001943 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08001944 if (clear_code)
1945 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001946 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 }
1948
1949 /*
1950 * We are back. Now reacquire the siglock before touching
1951 * last_siginfo, so that we are sure to have synchronized with
1952 * any signal-sending on another CPU that wants to examine it.
1953 */
1954 spin_lock_irq(&current->sighand->siglock);
1955 current->last_siginfo = NULL;
1956
Tejun Heo544b2c92011-06-14 11:20:18 +02001957 /* LISTENING can be set only during STOP traps, clear it */
1958 current->jobctl &= ~JOBCTL_LISTENING;
1959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 /*
1961 * Queued signals ignored us while we were stopped for tracing.
1962 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07001963 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07001965 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966}
1967
Tejun Heo3544d722011-06-14 11:20:15 +02001968static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969{
1970 siginfo_t info;
1971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 memset(&info, 0, sizeof info);
Tejun Heo3544d722011-06-14 11:20:15 +02001973 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001975 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001976 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02001979 ptrace_stop(exit_code, why, 1, &info);
1980}
1981
1982void ptrace_notify(int exit_code)
1983{
1984 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02001985 if (unlikely(current->task_works))
1986 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02001987
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02001989 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 spin_unlock_irq(&current->sighand->siglock);
1991}
1992
Tejun Heo73ddff22011-06-14 11:20:14 +02001993/**
1994 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1995 * @signr: signr causing group stop if initiating
1996 *
1997 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1998 * and participate in it. If already set, participate in the existing
1999 * group stop. If participated in a group stop (and thus slept), %true is
2000 * returned with siglock released.
2001 *
2002 * If ptraced, this function doesn't handle stop itself. Instead,
2003 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2004 * untouched. The caller must ensure that INTERRUPT trap handling takes
2005 * places afterwards.
2006 *
2007 * CONTEXT:
2008 * Must be called with @current->sighand->siglock held, which is released
2009 * on %true return.
2010 *
2011 * RETURNS:
2012 * %false if group stop is already cancelled or ptrace trap is scheduled.
2013 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002015static bool do_signal_stop(int signr)
2016 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017{
2018 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
Tejun Heoa8f072c2011-06-02 11:13:59 +02002020 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002021 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002022 struct task_struct *t;
2023
Tejun Heoa8f072c2011-06-02 11:13:59 +02002024 /* signr will be recorded in task->jobctl for retries */
2025 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002026
Tejun Heoa8f072c2011-06-02 11:13:59 +02002027 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002028 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002029 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002031 * There is no group stop already in progress. We must
2032 * initiate one now.
2033 *
2034 * While ptraced, a task may be resumed while group stop is
2035 * still in effect and then receive a stop signal and
2036 * initiate another group stop. This deviates from the
2037 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002038 * cause two group stops when !ptraced. That is why we
2039 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002040 *
2041 * The condition can be distinguished by testing whether
2042 * SIGNAL_STOP_STOPPED is already set. Don't generate
2043 * group_exit_code in such case.
2044 *
2045 * This is not necessary for SIGNAL_STOP_CONTINUED because
2046 * an intervening stop signal is required to cause two
2047 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002049 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2050 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002051
Tejun Heo7dd3db52011-06-02 11:14:00 +02002052 sig->group_stop_count = 0;
2053
2054 if (task_set_jobctl_pending(current, signr | gstop))
2055 sig->group_stop_count++;
2056
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002057 t = current;
2058 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002059 /*
2060 * Setting state to TASK_STOPPED for a group
2061 * stop is always done with the siglock held,
2062 * so this check has no races.
2063 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002064 if (!task_is_stopped(t) &&
2065 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002066 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002067 if (likely(!(t->ptrace & PT_SEIZED)))
2068 signal_wake_up(t, 0);
2069 else
2070 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002071 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002072 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002073 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002074
Tejun Heod21142e2011-06-17 16:50:34 +02002075 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002076 int notify = 0;
2077
2078 /*
2079 * If there are no other threads in the group, or if there
2080 * is a group stop in progress and we are the last to stop,
2081 * report to the parent.
2082 */
2083 if (task_participate_group_stop(current))
2084 notify = CLD_STOPPED;
2085
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002086 __set_current_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002087 spin_unlock_irq(&current->sighand->siglock);
2088
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002089 /*
2090 * Notify the parent of the group stop completion. Because
2091 * we're not holding either the siglock or tasklist_lock
2092 * here, ptracer may attach inbetween; however, this is for
2093 * group stop and should always be delivered to the real
2094 * parent of the group leader. The new ptracer will get
2095 * its notification when this task transitions into
2096 * TASK_TRACED.
2097 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002098 if (notify) {
2099 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002100 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002101 read_unlock(&tasklist_lock);
2102 }
2103
2104 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002105 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002106 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002107 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002108 /*
2109 * While ptraced, group stop is handled by STOP trap.
2110 * Schedule it and let the caller deal with it.
2111 */
2112 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2113 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002114 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002115}
Tejun Heod79fdd62011-03-23 10:37:00 +01002116
Tejun Heo73ddff22011-06-14 11:20:14 +02002117/**
2118 * do_jobctl_trap - take care of ptrace jobctl traps
2119 *
Tejun Heo3544d722011-06-14 11:20:15 +02002120 * When PT_SEIZED, it's used for both group stop and explicit
2121 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2122 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2123 * the stop signal; otherwise, %SIGTRAP.
2124 *
2125 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2126 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002127 *
2128 * CONTEXT:
2129 * Must be called with @current->sighand->siglock held, which may be
2130 * released and re-acquired before returning with intervening sleep.
2131 */
2132static void do_jobctl_trap(void)
2133{
Tejun Heo3544d722011-06-14 11:20:15 +02002134 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002135 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002136
Tejun Heo3544d722011-06-14 11:20:15 +02002137 if (current->ptrace & PT_SEIZED) {
2138 if (!signal->group_stop_count &&
2139 !(signal->flags & SIGNAL_STOP_STOPPED))
2140 signr = SIGTRAP;
2141 WARN_ON_ONCE(!signr);
2142 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2143 CLD_STOPPED);
2144 } else {
2145 WARN_ON_ONCE(!signr);
2146 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002147 current->exit_code = 0;
2148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149}
2150
Al Viro94eb22d2012-11-05 13:08:06 -05002151static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002152{
Al Virob7f95912012-11-05 13:06:22 -05002153 ptrace_signal_deliver();
Oleg Nesterov8a352412011-07-21 17:06:53 +02002154 /*
2155 * We do not check sig_kernel_stop(signr) but set this marker
2156 * unconditionally because we do not know whether debugger will
2157 * change signr. This flag has no meaning unless we are going
2158 * to stop after return from ptrace_stop(). In this case it will
2159 * be checked in do_signal_stop(), we should only stop if it was
2160 * not cleared by SIGCONT while we were sleeping. See also the
2161 * comment in dequeue_signal().
2162 */
2163 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002164 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002165
2166 /* We're back. Did the debugger cancel the sig? */
2167 signr = current->exit_code;
2168 if (signr == 0)
2169 return signr;
2170
2171 current->exit_code = 0;
2172
Randy Dunlap5aba0852011-04-04 14:59:31 -07002173 /*
2174 * Update the siginfo structure if the signal has
2175 * changed. If the debugger wanted something
2176 * specific in the siginfo structure then it should
2177 * have updated *info via PTRACE_SETSIGINFO.
2178 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002179 if (signr != info->si_signo) {
2180 info->si_signo = signr;
2181 info->si_errno = 0;
2182 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002183 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002184 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002185 info->si_uid = from_kuid_munged(current_user_ns(),
2186 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002187 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002188 }
2189
2190 /* If the (new) signal is now blocked, requeue it. */
2191 if (sigismember(&current->blocked, signr)) {
2192 specific_send_sig_info(signr, info, current);
2193 signr = 0;
2194 }
2195
2196 return signr;
2197}
2198
Richard Weinberger828b1f62013-10-07 15:26:57 +02002199int get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002201 struct sighand_struct *sighand = current->sighand;
2202 struct signal_struct *signal = current->signal;
2203 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002205 if (unlikely(current->task_works))
2206 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002207
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302208 if (unlikely(uprobe_deny_signal()))
2209 return 0;
2210
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002211 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002212 * Do this once, we can't return to user-mode if freezing() == T.
2213 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2214 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002215 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002216 try_to_freeze();
2217
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002218relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002219 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002220 /*
2221 * Every stopped thread goes here after wakeup. Check to see if
2222 * we should notify the parent, prepare_signal(SIGCONT) encodes
2223 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2224 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002225 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002226 int why;
2227
2228 if (signal->flags & SIGNAL_CLD_CONTINUED)
2229 why = CLD_CONTINUED;
2230 else
2231 why = CLD_STOPPED;
2232
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002233 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002234
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002235 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002236
Tejun Heoceb6bd62011-03-23 10:37:01 +01002237 /*
2238 * Notify the parent that we're continuing. This event is
2239 * always per-process and doesn't make whole lot of sense
2240 * for ptracers, who shouldn't consume the state via
2241 * wait(2) either, but, for backward compatibility, notify
2242 * the ptracer of the group leader too unless it's gonna be
2243 * a duplicate.
2244 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002245 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002246 do_notify_parent_cldstop(current, false, why);
2247
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002248 if (ptrace_reparented(current->group_leader))
2249 do_notify_parent_cldstop(current->group_leader,
2250 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002251 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002252
Oleg Nesterove4420552008-04-30 00:52:44 -07002253 goto relock;
2254 }
2255
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002256 /* Has this task already been marked for death? */
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002257 if (signal_group_exit(signal)) {
2258 ksig->info.si_signo = signr = SIGKILL;
2259 sigdelset(&current->pending.signal, SIGKILL);
Zhenliang Wei9adcdd52019-05-31 22:30:52 -07002260 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2261 &sighand->action[SIGKILL - 1]);
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002262 recalc_sigpending();
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002263 goto fatal;
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002264 }
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 for (;;) {
2267 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002268
2269 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2270 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002271 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002272
Tejun Heo73ddff22011-06-14 11:20:14 +02002273 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2274 do_jobctl_trap();
2275 spin_unlock_irq(&sighand->siglock);
2276 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 }
2278
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -06002279 /*
2280 * Signals generated by the execution of an instruction
2281 * need to be delivered before any other pending signals
2282 * so that the instruction pointer in the signal stack
2283 * frame points to the faulting instruction.
2284 */
2285 signr = dequeue_synchronous_signal(&ksig->info);
2286 if (!signr)
2287 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Tejun Heodd1d6772011-06-02 11:14:00 +02002289 if (!signr)
2290 break; /* will return 0 */
2291
Oleg Nesterov8a352412011-07-21 17:06:53 +02002292 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002293 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002295 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 }
2297
Tejun Heodd1d6772011-06-02 11:14:00 +02002298 ka = &sighand->action[signr-1];
2299
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002300 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002301 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2304 continue;
2305 if (ka->sa.sa_handler != SIG_DFL) {
2306 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002307 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
2309 if (ka->sa.sa_flags & SA_ONESHOT)
2310 ka->sa.sa_handler = SIG_DFL;
2311
2312 break; /* will return non-zero "signr" value */
2313 }
2314
2315 /*
2316 * Now we are doing the default action for this signal.
2317 */
2318 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2319 continue;
2320
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002321 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002322 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002323 * Container-init gets no signals it doesn't want from same
2324 * container.
2325 *
2326 * Note that if global/container-init sees a sig_kernel_only()
2327 * signal here, the signal must have been generated internally
2328 * or must have come from an ancestor namespace. In either
2329 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002330 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002331 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002332 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 continue;
2334
2335 if (sig_kernel_stop(signr)) {
2336 /*
2337 * The default action is to stop all threads in
2338 * the thread group. The job control signals
2339 * do nothing in an orphaned pgrp, but SIGSTOP
2340 * always works. Note that siglock needs to be
2341 * dropped during the call to is_orphaned_pgrp()
2342 * because of lock ordering with tasklist_lock.
2343 * This allows an intervening SIGCONT to be posted.
2344 * We need to check for that and bail out if necessary.
2345 */
2346 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002347 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
2349 /* signals can be posted during this window */
2350
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002351 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 goto relock;
2353
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002354 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 }
2356
Richard Weinberger828b1f62013-10-07 15:26:57 +02002357 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 /* It released the siglock. */
2359 goto relock;
2360 }
2361
2362 /*
2363 * We didn't actually stop, due to a race
2364 * with SIGCONT or something like that.
2365 */
2366 continue;
2367 }
2368
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002369 fatal:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002370 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
2372 /*
2373 * Anything else is fatal, maybe with a core dump.
2374 */
2375 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002378 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002379 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002380 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 /*
2382 * If it was able to dump core, this kills all
2383 * other threads in the group and synchronizes with
2384 * their demise. If we lost the race with another
2385 * thread getting here, it set group_exit_code
2386 * first and our do_group_exit call below will use
2387 * that value and ignore the one we pass it.
2388 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002389 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 }
2391
2392 /*
2393 * Death signals, no core dump.
2394 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002395 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 /* NOTREACHED */
2397 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002398 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002399
2400 ksig->sig = signr;
2401 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402}
2403
Matt Fleming5e6292c2012-01-10 15:11:17 -08002404/**
Al Viroefee9842012-04-28 02:04:15 -04002405 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002406 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002407 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002408 *
Masanari Iidae2278672014-02-18 22:54:36 +09002409 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002410 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002411 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002412 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002413 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002414static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002415{
2416 sigset_t blocked;
2417
Al Viroa610d6e2012-05-21 23:42:15 -04002418 /* A signal was successfully delivered, and the
2419 saved sigmask was stored on the signal frame,
2420 and will be restored by sigreturn. So we can
2421 simply clear the restore sigmask flag. */
2422 clear_restore_sigmask();
2423
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002424 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2425 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2426 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002427 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002428 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002429}
2430
Al Viro2ce5da12012-11-07 15:11:25 -05002431void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2432{
2433 if (failed)
2434 force_sigsegv(ksig->sig, current);
2435 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002436 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002437}
2438
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002439/*
2440 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002441 * group-wide signal. Other threads should be notified now to take
2442 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002443 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002444static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002445{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002446 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002447 struct task_struct *t;
2448
Oleg Nesterovf646e222011-04-27 19:18:39 +02002449 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2450 if (sigisemptyset(&retarget))
2451 return;
2452
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002453 t = tsk;
2454 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002455 if (t->flags & PF_EXITING)
2456 continue;
2457
2458 if (!has_pending_signals(&retarget, &t->blocked))
2459 continue;
2460 /* Remove the signals this thread can handle. */
2461 sigandsets(&retarget, &retarget, &t->blocked);
2462
2463 if (!signal_pending(t))
2464 signal_wake_up(t, 0);
2465
2466 if (sigisemptyset(&retarget))
2467 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002468 }
2469}
2470
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002471void exit_signals(struct task_struct *tsk)
2472{
2473 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002474 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002475
Tejun Heo77e4ef92011-12-12 18:12:21 -08002476 /*
2477 * @tsk is about to have PF_EXITING set - lock out users which
2478 * expect stable threadgroup.
2479 */
2480 threadgroup_change_begin(tsk);
2481
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002482 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2483 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002484 threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002485 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002486 }
2487
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002488 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002489 /*
2490 * From now this task is not visible for group-wide signals,
2491 * see wants_signal(), do_signal_stop().
2492 */
2493 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002494
2495 threadgroup_change_end(tsk);
2496
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002497 if (!signal_pending(tsk))
2498 goto out;
2499
Oleg Nesterovf646e222011-04-27 19:18:39 +02002500 unblocked = tsk->blocked;
2501 signotset(&unblocked);
2502 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002503
Tejun Heoa8f072c2011-06-02 11:13:59 +02002504 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c19022011-03-23 10:37:00 +01002505 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002506 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002507out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002508 spin_unlock_irq(&tsk->sighand->siglock);
2509
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002510 /*
2511 * If group stop has completed, deliver the notification. This
2512 * should always go to the real parent of the group leader.
2513 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002514 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002515 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002516 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002517 read_unlock(&tasklist_lock);
2518 }
2519}
2520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521EXPORT_SYMBOL(recalc_sigpending);
2522EXPORT_SYMBOL_GPL(dequeue_signal);
2523EXPORT_SYMBOL(flush_signals);
2524EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525EXPORT_SYMBOL(send_sig);
2526EXPORT_SYMBOL(send_sig_info);
2527EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
2529/*
2530 * System call entry points.
2531 */
2532
Randy Dunlap41c57892011-04-04 15:00:26 -07002533/**
2534 * sys_restart_syscall - restart a system call
2535 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002536SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002538 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 return restart->fn(restart);
2540}
2541
2542long do_no_restart_syscall(struct restart_block *param)
2543{
2544 return -EINTR;
2545}
2546
Oleg Nesterovb1828012011-04-27 21:56:14 +02002547static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2548{
2549 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2550 sigset_t newblocked;
2551 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002552 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002553 retarget_shared_pending(tsk, &newblocked);
2554 }
2555 tsk->blocked = *newset;
2556 recalc_sigpending();
2557}
2558
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002559/**
2560 * set_current_blocked - change current->blocked mask
2561 * @newset: new mask
2562 *
2563 * It is wrong to change ->blocked directly, this helper should be used
2564 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 */
Al Viro77097ae2012-04-27 13:58:59 -04002566void set_current_blocked(sigset_t *newset)
2567{
Al Viro77097ae2012-04-27 13:58:59 -04002568 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002569 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002570}
2571
2572void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002573{
2574 struct task_struct *tsk = current;
2575
Waiman Long20a30612016-12-14 15:04:10 -08002576 /*
2577 * In case the signal mask hasn't changed, there is nothing we need
2578 * to do. The current->blocked shouldn't be modified by other task.
2579 */
2580 if (sigequalsets(&tsk->blocked, newset))
2581 return;
2582
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002583 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002584 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002585 spin_unlock_irq(&tsk->sighand->siglock);
2586}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
2588/*
2589 * This is also useful for kernel threads that want to temporarily
2590 * (or permanently) block certain signals.
2591 *
2592 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2593 * interface happily blocks "unblockable" signals like SIGKILL
2594 * and friends.
2595 */
2596int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2597{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002598 struct task_struct *tsk = current;
2599 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002601 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002602 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002603 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002604
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 switch (how) {
2606 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002607 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 break;
2609 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002610 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 break;
2612 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002613 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 break;
2615 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002616 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002618
Al Viro77097ae2012-04-27 13:58:59 -04002619 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002620 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621}
2622
Randy Dunlap41c57892011-04-04 15:00:26 -07002623/**
2624 * sys_rt_sigprocmask - change the list of currently blocked signals
2625 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002626 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002627 * @oset: previous value of signal mask if non-null
2628 * @sigsetsize: size of sigset_t type
2629 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002630SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002631 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002634 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
2636 /* XXX: Don't preclude handling different sized sigset_t's. */
2637 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002638 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002640 old_set = current->blocked;
2641
2642 if (nset) {
2643 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2644 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2646
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002647 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002649 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002651
2652 if (oset) {
2653 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2654 return -EFAULT;
2655 }
2656
2657 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658}
2659
Al Viro322a56c2012-12-25 13:32:58 -05002660#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002661COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2662 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663{
Al Viro322a56c2012-12-25 13:32:58 -05002664#ifdef __BIG_ENDIAN
2665 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666
Al Viro322a56c2012-12-25 13:32:58 -05002667 /* XXX: Don't preclude handling different sized sigset_t's. */
2668 if (sigsetsize != sizeof(sigset_t))
2669 return -EINVAL;
2670
2671 if (nset) {
2672 compat_sigset_t new32;
2673 sigset_t new_set;
2674 int error;
2675 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2676 return -EFAULT;
2677
2678 sigset_from_compat(&new_set, &new32);
2679 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2680
2681 error = sigprocmask(how, &new_set, NULL);
2682 if (error)
2683 return error;
2684 }
2685 if (oset) {
2686 compat_sigset_t old32;
2687 sigset_to_compat(&old32, &old_set);
Al Virodb61ec22013-03-02 20:39:15 -05002688 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
Al Viro322a56c2012-12-25 13:32:58 -05002689 return -EFAULT;
2690 }
2691 return 0;
2692#else
2693 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2694 (sigset_t __user *)oset, sigsetsize);
2695#endif
2696}
2697#endif
Al Viro322a56c2012-12-25 13:32:58 -05002698
Al Virofe9c1db2012-12-25 14:31:38 -05002699static int do_sigpending(void *set, unsigned long sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 if (sigsetsize > sizeof(sigset_t))
Al Virofe9c1db2012-12-25 14:31:38 -05002702 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
2704 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002705 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 &current->signal->shared_pending.signal);
2707 spin_unlock_irq(&current->sighand->siglock);
2708
2709 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002710 sigandsets(set, &current->blocked, set);
2711 return 0;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002712}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713
Randy Dunlap41c57892011-04-04 15:00:26 -07002714/**
2715 * sys_rt_sigpending - examine a pending signal that has been raised
2716 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002717 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002718 * @sigsetsize: size of sigset_t type or larger
2719 */
Al Virofe9c1db2012-12-25 14:31:38 -05002720SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721{
Al Virofe9c1db2012-12-25 14:31:38 -05002722 sigset_t set;
2723 int err = do_sigpending(&set, sigsetsize);
2724 if (!err && copy_to_user(uset, &set, sigsetsize))
2725 err = -EFAULT;
2726 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727}
2728
Al Virofe9c1db2012-12-25 14:31:38 -05002729#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002730COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2731 compat_size_t, sigsetsize)
2732{
2733#ifdef __BIG_ENDIAN
2734 sigset_t set;
2735 int err = do_sigpending(&set, sigsetsize);
2736 if (!err) {
2737 compat_sigset_t set32;
2738 sigset_to_compat(&set32, &set);
2739 /* we can get here only if sigsetsize <= sizeof(set) */
2740 if (copy_to_user(uset, &set32, sigsetsize))
2741 err = -EFAULT;
2742 }
2743 return err;
2744#else
2745 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2746#endif
2747}
2748#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002749
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2751
Al Viroce395962013-10-13 17:23:53 -04002752int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753{
2754 int err;
2755
2756 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2757 return -EFAULT;
2758 if (from->si_code < 0)
2759 return __copy_to_user(to, from, sizeof(siginfo_t))
2760 ? -EFAULT : 0;
2761 /*
2762 * If you change siginfo_t structure, please be sure
2763 * this code is fixed accordingly.
Davide Libenzifba2afa2007-05-10 22:23:13 -07002764 * Please remember to update the signalfd_copyinfo() function
2765 * inside fs/signalfd.c too, in case siginfo_t changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 * It should never copy any pad contained in the structure
2767 * to avoid security leaks, but must copy the generic
2768 * 3 ints plus the relevant union member.
2769 */
2770 err = __put_user(from->si_signo, &to->si_signo);
2771 err |= __put_user(from->si_errno, &to->si_errno);
2772 err |= __put_user((short)from->si_code, &to->si_code);
2773 switch (from->si_code & __SI_MASK) {
2774 case __SI_KILL:
2775 err |= __put_user(from->si_pid, &to->si_pid);
2776 err |= __put_user(from->si_uid, &to->si_uid);
2777 break;
2778 case __SI_TIMER:
2779 err |= __put_user(from->si_tid, &to->si_tid);
2780 err |= __put_user(from->si_overrun, &to->si_overrun);
2781 err |= __put_user(from->si_ptr, &to->si_ptr);
2782 break;
2783 case __SI_POLL:
2784 err |= __put_user(from->si_band, &to->si_band);
2785 err |= __put_user(from->si_fd, &to->si_fd);
2786 break;
2787 case __SI_FAULT:
2788 err |= __put_user(from->si_addr, &to->si_addr);
2789#ifdef __ARCH_SI_TRAPNO
2790 err |= __put_user(from->si_trapno, &to->si_trapno);
2791#endif
Andi Kleena337fda2010-09-27 20:32:19 +02002792#ifdef BUS_MCEERR_AO
Randy Dunlap5aba0852011-04-04 14:59:31 -07002793 /*
Andi Kleena337fda2010-09-27 20:32:19 +02002794 * Other callers might not initialize the si_lsb field,
Randy Dunlap5aba0852011-04-04 14:59:31 -07002795 * so check explicitly for the right codes here.
Andi Kleena337fda2010-09-27 20:32:19 +02002796 */
Amanieu d'Antras26135022015-08-06 15:46:29 -07002797 if (from->si_signo == SIGBUS &&
2798 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
Andi Kleena337fda2010-09-27 20:32:19 +02002799 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2800#endif
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002801#ifdef SEGV_BNDERR
Amanieu d'Antras26135022015-08-06 15:46:29 -07002802 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2803 err |= __put_user(from->si_lower, &to->si_lower);
2804 err |= __put_user(from->si_upper, &to->si_upper);
2805 }
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002806#endif
Dave Hansencd0ea352016-02-12 13:02:12 -08002807#ifdef SEGV_PKUERR
2808 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2809 err |= __put_user(from->si_pkey, &to->si_pkey);
2810#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 break;
2812 case __SI_CHLD:
2813 err |= __put_user(from->si_pid, &to->si_pid);
2814 err |= __put_user(from->si_uid, &to->si_uid);
2815 err |= __put_user(from->si_status, &to->si_status);
2816 err |= __put_user(from->si_utime, &to->si_utime);
2817 err |= __put_user(from->si_stime, &to->si_stime);
2818 break;
2819 case __SI_RT: /* This is not generated by the kernel as of now. */
2820 case __SI_MESGQ: /* But this is */
2821 err |= __put_user(from->si_pid, &to->si_pid);
2822 err |= __put_user(from->si_uid, &to->si_uid);
2823 err |= __put_user(from->si_ptr, &to->si_ptr);
2824 break;
Will Drewrya0727e82012-04-12 16:48:00 -05002825#ifdef __ARCH_SIGSYS
2826 case __SI_SYS:
2827 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2828 err |= __put_user(from->si_syscall, &to->si_syscall);
2829 err |= __put_user(from->si_arch, &to->si_arch);
2830 break;
2831#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 default: /* this is just in case for now ... */
2833 err |= __put_user(from->si_pid, &to->si_pid);
2834 err |= __put_user(from->si_uid, &to->si_uid);
2835 break;
2836 }
2837 return err;
2838}
2839
2840#endif
2841
Randy Dunlap41c57892011-04-04 15:00:26 -07002842/**
Oleg Nesterov943df142011-04-27 21:44:14 +02002843 * do_sigtimedwait - wait for queued signals specified in @which
2844 * @which: queued signals to wait for
2845 * @info: if non-null, the signal's siginfo is returned here
2846 * @ts: upper bound on process time suspension
2847 */
2848int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002849 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02002850{
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002851 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
Oleg Nesterov943df142011-04-27 21:44:14 +02002852 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02002853 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002854 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02002855
2856 if (ts) {
2857 if (!timespec_valid(ts))
2858 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002859 timeout = timespec_to_ktime(*ts);
2860 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02002861 }
2862
2863 /*
2864 * Invert the set of allowed signals to get those we want to block.
2865 */
2866 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2867 signotset(&mask);
2868
2869 spin_lock_irq(&tsk->sighand->siglock);
2870 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002871 if (!sig && timeout.tv64) {
Oleg Nesterov943df142011-04-27 21:44:14 +02002872 /*
2873 * None ready, temporarily unblock those we're interested
2874 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02002875 * they arrive. Unblocking is always fine, we can avoid
2876 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02002877 */
2878 tsk->real_blocked = tsk->blocked;
2879 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2880 recalc_sigpending();
2881 spin_unlock_irq(&tsk->sighand->siglock);
2882
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002883 __set_current_state(TASK_INTERRUPTIBLE);
2884 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2885 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02002886 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002887 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07002888 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002889 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02002890 }
2891 spin_unlock_irq(&tsk->sighand->siglock);
2892
2893 if (sig)
2894 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002895 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02002896}
2897
2898/**
Randy Dunlap41c57892011-04-04 15:00:26 -07002899 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2900 * in @uthese
2901 * @uthese: queued signals to wait for
2902 * @uinfo: if non-null, the signal's siginfo is returned here
2903 * @uts: upper bound on process time suspension
2904 * @sigsetsize: size of sigset_t type
2905 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002906SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2907 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2908 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 sigset_t these;
2911 struct timespec ts;
2912 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02002913 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914
2915 /* XXX: Don't preclude handling different sized sigset_t's. */
2916 if (sigsetsize != sizeof(sigset_t))
2917 return -EINVAL;
2918
2919 if (copy_from_user(&these, uthese, sizeof(these)))
2920 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 if (uts) {
2923 if (copy_from_user(&ts, uts, sizeof(ts)))
2924 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 }
2926
Oleg Nesterov943df142011-04-27 21:44:14 +02002927 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
Oleg Nesterov943df142011-04-27 21:44:14 +02002929 if (ret > 0 && uinfo) {
2930 if (copy_siginfo_to_user(uinfo, &info))
2931 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 }
2933
2934 return ret;
2935}
2936
Christian Braunercf9f8292018-11-19 00:51:56 +01002937static inline void prepare_kill_siginfo(int sig, struct siginfo *info)
2938{
2939 info->si_signo = sig;
2940 info->si_errno = 0;
2941 info->si_code = SI_USER;
2942 info->si_pid = task_tgid_vnr(current);
2943 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
2944}
2945
Randy Dunlap41c57892011-04-04 15:00:26 -07002946/**
2947 * sys_kill - send a signal to a process
2948 * @pid: the PID of the process
2949 * @sig: signal to be sent
2950 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002951SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952{
2953 struct siginfo info;
2954
Christian Braunercf9f8292018-11-19 00:51:56 +01002955 prepare_kill_siginfo(sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
2957 return kill_something_info(sig, &info, pid);
2958}
2959
Christian Braunercf9f8292018-11-19 00:51:56 +01002960/*
2961 * Verify that the signaler and signalee either are in the same pid namespace
2962 * or that the signaler's pid namespace is an ancestor of the signalee's pid
2963 * namespace.
2964 */
2965static bool access_pidfd_pidns(struct pid *pid)
2966{
2967 struct pid_namespace *active = task_active_pid_ns(current);
2968 struct pid_namespace *p = ns_of_pid(pid);
2969
2970 for (;;) {
2971 if (!p)
2972 return false;
2973 if (p == active)
2974 break;
2975 p = p->parent;
2976 }
2977
2978 return true;
2979}
2980
Christian Braunerb3ae5982019-04-17 22:50:25 +02002981static struct pid *pidfd_to_pid(const struct file *file)
2982{
2983 if (file->f_op == &pidfd_fops)
2984 return file->private_data;
2985
2986 return tgid_pidfd_to_pid(file);
2987}
2988
Christian Braunercf9f8292018-11-19 00:51:56 +01002989static int copy_siginfo_from_user_any(siginfo_t *kinfo, siginfo_t __user *info)
2990{
2991#ifdef CONFIG_COMPAT
2992 /*
2993 * Avoid hooking up compat syscalls and instead handle necessary
2994 * conversions here. Note, this is a stop-gap measure and should not be
2995 * considered a generic solution.
2996 */
2997 if (in_compat_syscall())
2998 return copy_siginfo_from_user32(
2999 kinfo, (struct compat_siginfo __user *)info);
3000#endif
3001 return copy_from_user(kinfo, info, sizeof(siginfo_t));
3002}
3003
3004/**
Christian Braunerac937bb2019-06-04 15:18:43 +02003005 * sys_pidfd_send_signal - Signal a process through a pidfd
3006 * @pidfd: file descriptor of the process
3007 * @sig: signal to send
3008 * @info: signal info
3009 * @flags: future flags
Christian Braunercf9f8292018-11-19 00:51:56 +01003010 *
3011 * The syscall currently only signals via PIDTYPE_PID which covers
3012 * kill(<positive-pid>, <signal>. It does not signal threads or process
3013 * groups.
3014 * In order to extend the syscall to threads and process groups the @flags
3015 * argument should be used. In essence, the @flags argument will determine
3016 * what is signaled and not the file descriptor itself. Put in other words,
3017 * grouping is a property of the flags argument not a property of the file
3018 * descriptor.
3019 *
3020 * Return: 0 on success, negative errno on failure
3021 */
3022SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3023 siginfo_t __user *, info, unsigned int, flags)
3024{
3025 int ret;
3026 struct fd f;
3027 struct pid *pid;
3028 siginfo_t kinfo;
3029
3030 /* Enforce flags be set to 0 until we add an extension. */
3031 if (flags)
3032 return -EINVAL;
3033
Christian Brauner68defbc2019-04-18 12:18:39 +02003034 f = fdget(pidfd);
Christian Braunercf9f8292018-11-19 00:51:56 +01003035 if (!f.file)
3036 return -EBADF;
3037
3038 /* Is this a pidfd? */
Christian Braunerb3ae5982019-04-17 22:50:25 +02003039 pid = pidfd_to_pid(f.file);
Christian Braunercf9f8292018-11-19 00:51:56 +01003040 if (IS_ERR(pid)) {
3041 ret = PTR_ERR(pid);
3042 goto err;
3043 }
3044
3045 ret = -EINVAL;
3046 if (!access_pidfd_pidns(pid))
3047 goto err;
3048
3049 if (info) {
3050 ret = copy_siginfo_from_user_any(&kinfo, info);
3051 if (unlikely(ret))
3052 goto err;
3053
3054 ret = -EINVAL;
3055 if (unlikely(sig != kinfo.si_signo))
3056 goto err;
3057
Jann Hornf511d492019-03-30 03:12:32 +01003058 /* Only allow sending arbitrary signals to yourself. */
3059 ret = -EPERM;
Christian Braunercf9f8292018-11-19 00:51:56 +01003060 if ((task_pid(current) != pid) &&
Jann Hornf511d492019-03-30 03:12:32 +01003061 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3062 goto err;
Christian Braunercf9f8292018-11-19 00:51:56 +01003063 } else {
3064 prepare_kill_siginfo(sig, &kinfo);
3065 }
3066
3067 ret = kill_pid_info(sig, &kinfo, pid);
3068
3069err:
3070 fdput(f);
3071 return ret;
3072}
Christian Braunercf9f8292018-11-19 00:51:56 +01003073
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003074static int
3075do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003076{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003077 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003078 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003079
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003080 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003081 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003082 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003083 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003084 /*
3085 * The null signal is a permissions and process existence
3086 * probe. No signal is actually delivered.
3087 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003088 if (!error && sig) {
3089 error = do_send_sig_info(sig, info, p, false);
3090 /*
3091 * If lock_task_sighand() failed we pretend the task
3092 * dies after receiving the signal. The window is tiny,
3093 * and the signal is private anyway.
3094 */
3095 if (unlikely(error == -ESRCH))
3096 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003097 }
3098 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003099 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003100
3101 return error;
3102}
3103
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003104static int do_tkill(pid_t tgid, pid_t pid, int sig)
3105{
Emese Revfyb9e146d2013-04-17 15:58:36 -07003106 struct siginfo info = {};
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003107
3108 info.si_signo = sig;
3109 info.si_errno = 0;
3110 info.si_code = SI_TKILL;
3111 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003112 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003113
3114 return do_send_specific(tgid, pid, sig, &info);
3115}
3116
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117/**
3118 * sys_tgkill - send signal to one specific thread
3119 * @tgid: the thread group ID of the thread
3120 * @pid: the PID of the thread
3121 * @sig: signal to be sent
3122 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08003123 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 * exists but it's not belonging to the target process anymore. This
3125 * method solves the problem of threads exiting and PIDs getting reused.
3126 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003127SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 /* This is only valid for single tasks */
3130 if (pid <= 0 || tgid <= 0)
3131 return -EINVAL;
3132
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003133 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134}
3135
Randy Dunlap41c57892011-04-04 15:00:26 -07003136/**
3137 * sys_tkill - send signal to one specific task
3138 * @pid: the PID of the task
3139 * @sig: signal to be sent
3140 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3142 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003143SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 /* This is only valid for single tasks */
3146 if (pid <= 0)
3147 return -EINVAL;
3148
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003149 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150}
3151
Al Viro75907d42012-12-25 15:19:12 -05003152static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3153{
3154 /* Not even root can pretend to send signals from the kernel.
3155 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3156 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003157 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003158 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003159 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003160
Al Viro75907d42012-12-25 15:19:12 -05003161 info->si_signo = sig;
3162
3163 /* POSIX.1b doesn't mention process groups. */
3164 return kill_proc_info(sig, info, pid);
3165}
3166
Randy Dunlap41c57892011-04-04 15:00:26 -07003167/**
3168 * sys_rt_sigqueueinfo - send signal information to a signal
3169 * @pid: the PID of the thread
3170 * @sig: signal to be sent
3171 * @uinfo: signal info to be sent
3172 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003173SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3174 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175{
3176 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3178 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003179 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180}
3181
Al Viro75907d42012-12-25 15:19:12 -05003182#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003183COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3184 compat_pid_t, pid,
3185 int, sig,
3186 struct compat_siginfo __user *, uinfo)
3187{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003188 siginfo_t info = {};
Al Viro75907d42012-12-25 15:19:12 -05003189 int ret = copy_siginfo_from_user32(&info, uinfo);
3190 if (unlikely(ret))
3191 return ret;
3192 return do_rt_sigqueueinfo(pid, sig, &info);
3193}
3194#endif
Al Viro75907d42012-12-25 15:19:12 -05003195
Al Viro9aae8fc2012-12-24 23:12:04 -05003196static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003197{
3198 /* This is only valid for single tasks */
3199 if (pid <= 0 || tgid <= 0)
3200 return -EINVAL;
3201
3202 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003203 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3204 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003205 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3206 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003207 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003208
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003209 info->si_signo = sig;
3210
3211 return do_send_specific(tgid, pid, sig, info);
3212}
3213
3214SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3215 siginfo_t __user *, uinfo)
3216{
3217 siginfo_t info;
3218
3219 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3220 return -EFAULT;
3221
3222 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3223}
3224
Al Viro9aae8fc2012-12-24 23:12:04 -05003225#ifdef CONFIG_COMPAT
3226COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3227 compat_pid_t, tgid,
3228 compat_pid_t, pid,
3229 int, sig,
3230 struct compat_siginfo __user *, uinfo)
3231{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003232 siginfo_t info = {};
Al Viro9aae8fc2012-12-24 23:12:04 -05003233
3234 if (copy_siginfo_from_user32(&info, uinfo))
3235 return -EFAULT;
3236 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3237}
3238#endif
3239
Oleg Nesterov03417292014-06-06 14:36:53 -07003240/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003241 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003242 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003243void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003244{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003245 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003246 current->sighand->action[sig - 1].sa.sa_handler = action;
3247 if (action == SIG_IGN) {
3248 sigset_t mask;
3249
3250 sigemptyset(&mask);
3251 sigaddset(&mask, sig);
3252
3253 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3254 flush_sigqueue_mask(&mask, &current->pending);
3255 recalc_sigpending();
3256 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003257 spin_unlock_irq(&current->sighand->siglock);
3258}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003259EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003260
Dmitry Safonov68463512016-09-05 16:33:08 +03003261void __weak sigaction_compat_abi(struct k_sigaction *act,
3262 struct k_sigaction *oact)
3263{
3264}
3265
Oleg Nesterov88531f72006-03-28 16:11:24 -08003266int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003268 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08003270 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003272 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 return -EINVAL;
3274
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003275 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003277 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 if (oact)
3279 *oact = *k;
3280
Dmitry Safonov68463512016-09-05 16:33:08 +03003281 sigaction_compat_abi(act, oact);
3282
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003284 sigdelsetmask(&act->sa.sa_mask,
3285 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003286 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 /*
3288 * POSIX 3.3.1.3:
3289 * "Setting a signal action to SIG_IGN for a signal that is
3290 * pending shall cause the pending signal to be discarded,
3291 * whether or not it is blocked."
3292 *
3293 * "Setting a signal action to SIG_DFL for a signal that is
3294 * pending and whose default action is to ignore the signal
3295 * (for example, SIGCHLD), shall cause the pending signal to
3296 * be discarded, whether or not it is blocked"
3297 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003298 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08003299 sigemptyset(&mask);
3300 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003301 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3302 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003303 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 }
3306
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003307 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 return 0;
3309}
3310
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003311static int
Will Deacon1e7066a2018-09-05 15:34:42 +01003312do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp,
3313 size_t min_ss_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314{
3315 stack_t oss;
3316 int error;
3317
Linus Torvalds0083fc22009-08-01 10:34:56 -07003318 oss.ss_sp = (void __user *) current->sas_ss_sp;
3319 oss.ss_size = current->sas_ss_size;
Andy Lutomirski0318bc82016-05-03 10:31:51 -07003320 oss.ss_flags = sas_ss_flags(sp) |
3321 (current->sas_ss_flags & SS_FLAG_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322
3323 if (uss) {
3324 void __user *ss_sp;
3325 size_t ss_size;
Stas Sergeev407bc162016-04-14 23:20:03 +03003326 unsigned ss_flags;
3327 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328
3329 error = -EFAULT;
Linus Torvalds0dd84862009-08-01 11:18:56 -07003330 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3331 goto out;
3332 error = __get_user(ss_sp, &uss->ss_sp) |
3333 __get_user(ss_flags, &uss->ss_flags) |
3334 __get_user(ss_size, &uss->ss_size);
3335 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 goto out;
3337
3338 error = -EPERM;
3339 if (on_sig_stack(sp))
3340 goto out;
3341
Stas Sergeev407bc162016-04-14 23:20:03 +03003342 ss_mode = ss_flags & ~SS_FLAG_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 error = -EINVAL;
Stas Sergeev407bc162016-04-14 23:20:03 +03003344 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3345 ss_mode != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 goto out;
3347
Stas Sergeev407bc162016-04-14 23:20:03 +03003348 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 ss_size = 0;
3350 ss_sp = NULL;
3351 } else {
Will Deacon1e7066a2018-09-05 15:34:42 +01003352 if (unlikely(ss_size < min_ss_size))
3353 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
3356 current->sas_ss_sp = (unsigned long) ss_sp;
3357 current->sas_ss_size = ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +03003358 current->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 }
3360
Linus Torvalds0083fc22009-08-01 10:34:56 -07003361 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 if (uoss) {
3363 error = -EFAULT;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003364 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 goto out;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003366 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3367 __put_user(oss.ss_size, &uoss->ss_size) |
3368 __put_user(oss.ss_flags, &uoss->ss_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 }
3370
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371out:
3372 return error;
3373}
Al Viro6bf9adf2012-12-14 14:09:47 -05003374SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3375{
Will Deacon1e7066a2018-09-05 15:34:42 +01003376 return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
3377 MINSIGSTKSZ);
Al Viro6bf9adf2012-12-14 14:09:47 -05003378}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
Al Viro5c495742012-11-18 15:29:16 -05003380int restore_altstack(const stack_t __user *uss)
3381{
Will Deacon1e7066a2018-09-05 15:34:42 +01003382 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
3383 MINSIGSTKSZ);
Al Viro5c495742012-11-18 15:29:16 -05003384 /* squash all but EFAULT for now */
3385 return err == -EFAULT ? err : 0;
3386}
3387
Al Viroc40702c2012-11-20 14:24:26 -05003388int __save_altstack(stack_t __user *uss, unsigned long sp)
3389{
3390 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003391 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3392 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003393 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003394 if (err)
3395 return err;
3396 if (t->sas_ss_flags & SS_AUTODISARM)
3397 sas_ss_reset(t);
3398 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003399}
3400
Al Viro90268432012-12-14 14:47:53 -05003401#ifdef CONFIG_COMPAT
Al Viro90228fc2012-12-23 03:33:38 -05003402COMPAT_SYSCALL_DEFINE2(sigaltstack,
3403 const compat_stack_t __user *, uss_ptr,
3404 compat_stack_t __user *, uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003405{
3406 stack_t uss, uoss;
3407 int ret;
3408 mm_segment_t seg;
3409
3410 if (uss_ptr) {
3411 compat_stack_t uss32;
3412
3413 memset(&uss, 0, sizeof(stack_t));
3414 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3415 return -EFAULT;
3416 uss.ss_sp = compat_ptr(uss32.ss_sp);
3417 uss.ss_flags = uss32.ss_flags;
3418 uss.ss_size = uss32.ss_size;
3419 }
3420 seg = get_fs();
3421 set_fs(KERNEL_DS);
3422 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3423 (stack_t __force __user *) &uoss,
Will Deacon1e7066a2018-09-05 15:34:42 +01003424 compat_user_stack_pointer(),
3425 COMPAT_MINSIGSTKSZ);
Al Viro90268432012-12-14 14:47:53 -05003426 set_fs(seg);
3427 if (ret >= 0 && uoss_ptr) {
3428 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3429 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3430 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3431 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3432 ret = -EFAULT;
3433 }
3434 return ret;
3435}
3436
3437int compat_restore_altstack(const compat_stack_t __user *uss)
3438{
3439 int err = compat_sys_sigaltstack(uss, NULL);
3440 /* squash all but -EFAULT for now */
3441 return err == -EFAULT ? err : 0;
3442}
Al Viroc40702c2012-11-20 14:24:26 -05003443
3444int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3445{
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003446 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003447 struct task_struct *t = current;
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003448 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3449 &uss->ss_sp) |
3450 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003451 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003452 if (err)
3453 return err;
3454 if (t->sas_ss_flags & SS_AUTODISARM)
3455 sas_ss_reset(t);
3456 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003457}
Al Viro90268432012-12-14 14:47:53 -05003458#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459
3460#ifdef __ARCH_WANT_SYS_SIGPENDING
3461
Randy Dunlap41c57892011-04-04 15:00:26 -07003462/**
3463 * sys_sigpending - examine pending signals
3464 * @set: where mask of pending signal is returned
3465 */
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003466SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467{
Al Virofe9c1db2012-12-25 14:31:38 -05003468 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469}
3470
3471#endif
3472
3473#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003474/**
3475 * sys_sigprocmask - examine and change blocked signals
3476 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003477 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003478 * @oset: previous value of signal mask if non-null
3479 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003480 * Some platforms have their own version with special arguments;
3481 * others support only sys_rt_sigprocmask.
3482 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483
Oleg Nesterovb013c392011-04-28 11:36:20 +02003484SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003485 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003488 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489
Oleg Nesterovb013c392011-04-28 11:36:20 +02003490 old_set = current->blocked.sig[0];
3491
3492 if (nset) {
3493 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3494 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003496 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003500 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 break;
3502 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003503 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 break;
3505 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003506 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003508 default:
3509 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 }
3511
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003512 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003514
3515 if (oset) {
3516 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3517 return -EFAULT;
3518 }
3519
3520 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521}
3522#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3523
Al Viroeaca6ea2012-11-25 23:12:10 -05003524#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003525/**
3526 * sys_rt_sigaction - alter an action taken by a process
3527 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003528 * @act: new sigaction
3529 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003530 * @sigsetsize: size of sigset_t type
3531 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003532SYSCALL_DEFINE4(rt_sigaction, int, sig,
3533 const struct sigaction __user *, act,
3534 struct sigaction __user *, oact,
3535 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536{
3537 struct k_sigaction new_sa, old_sa;
3538 int ret = -EINVAL;
3539
3540 /* XXX: Don't preclude handling different sized sigset_t's. */
3541 if (sigsetsize != sizeof(sigset_t))
3542 goto out;
3543
3544 if (act) {
3545 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3546 return -EFAULT;
3547 }
3548
3549 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3550
3551 if (!ret && oact) {
3552 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3553 return -EFAULT;
3554 }
3555out:
3556 return ret;
3557}
Al Viro08d32fe2012-12-25 18:38:15 -05003558#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003559COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3560 const struct compat_sigaction __user *, act,
3561 struct compat_sigaction __user *, oact,
3562 compat_size_t, sigsetsize)
3563{
3564 struct k_sigaction new_ka, old_ka;
3565 compat_sigset_t mask;
3566#ifdef __ARCH_HAS_SA_RESTORER
3567 compat_uptr_t restorer;
3568#endif
3569 int ret;
3570
3571 /* XXX: Don't preclude handling different sized sigset_t's. */
3572 if (sigsetsize != sizeof(compat_sigset_t))
3573 return -EINVAL;
3574
3575 if (act) {
3576 compat_uptr_t handler;
3577 ret = get_user(handler, &act->sa_handler);
3578 new_ka.sa.sa_handler = compat_ptr(handler);
3579#ifdef __ARCH_HAS_SA_RESTORER
3580 ret |= get_user(restorer, &act->sa_restorer);
3581 new_ka.sa.sa_restorer = compat_ptr(restorer);
3582#endif
3583 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003584 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003585 if (ret)
3586 return -EFAULT;
3587 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3588 }
3589
3590 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3591 if (!ret && oact) {
3592 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3593 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3594 &oact->sa_handler);
3595 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003596 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003597#ifdef __ARCH_HAS_SA_RESTORER
3598 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3599 &oact->sa_restorer);
3600#endif
3601 }
3602 return ret;
3603}
3604#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003605#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606
Al Viro495dfbf2012-12-25 19:09:45 -05003607#ifdef CONFIG_OLD_SIGACTION
3608SYSCALL_DEFINE3(sigaction, int, sig,
3609 const struct old_sigaction __user *, act,
3610 struct old_sigaction __user *, oact)
3611{
3612 struct k_sigaction new_ka, old_ka;
3613 int ret;
3614
3615 if (act) {
3616 old_sigset_t mask;
3617 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3618 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3619 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3620 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3621 __get_user(mask, &act->sa_mask))
3622 return -EFAULT;
3623#ifdef __ARCH_HAS_KA_RESTORER
3624 new_ka.ka_restorer = NULL;
3625#endif
3626 siginitset(&new_ka.sa.sa_mask, mask);
3627 }
3628
3629 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3630
3631 if (!ret && oact) {
3632 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3633 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3634 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3635 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3636 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3637 return -EFAULT;
3638 }
3639
3640 return ret;
3641}
3642#endif
3643#ifdef CONFIG_COMPAT_OLD_SIGACTION
3644COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3645 const struct compat_old_sigaction __user *, act,
3646 struct compat_old_sigaction __user *, oact)
3647{
3648 struct k_sigaction new_ka, old_ka;
3649 int ret;
3650 compat_old_sigset_t mask;
3651 compat_uptr_t handler, restorer;
3652
3653 if (act) {
3654 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3655 __get_user(handler, &act->sa_handler) ||
3656 __get_user(restorer, &act->sa_restorer) ||
3657 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3658 __get_user(mask, &act->sa_mask))
3659 return -EFAULT;
3660
3661#ifdef __ARCH_HAS_KA_RESTORER
3662 new_ka.ka_restorer = NULL;
3663#endif
3664 new_ka.sa.sa_handler = compat_ptr(handler);
3665 new_ka.sa.sa_restorer = compat_ptr(restorer);
3666 siginitset(&new_ka.sa.sa_mask, mask);
3667 }
3668
3669 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3670
3671 if (!ret && oact) {
3672 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3673 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3674 &oact->sa_handler) ||
3675 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3676 &oact->sa_restorer) ||
3677 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3678 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3679 return -EFAULT;
3680 }
3681 return ret;
3682}
3683#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684
Fabian Frederickf6187762014-06-04 16:11:12 -07003685#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686
3687/*
3688 * For backwards compatibility. Functionality superseded by sigprocmask.
3689 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003690SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691{
3692 /* SMP safe */
3693 return current->blocked.sig[0];
3694}
3695
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003696SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003698 int old = current->blocked.sig[0];
3699 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003701 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003702 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703
3704 return old;
3705}
Fabian Frederickf6187762014-06-04 16:11:12 -07003706#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707
3708#ifdef __ARCH_WANT_SYS_SIGNAL
3709/*
3710 * For backwards compatibility. Functionality superseded by sigaction.
3711 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003712SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713{
3714 struct k_sigaction new_sa, old_sa;
3715 int ret;
3716
3717 new_sa.sa.sa_handler = handler;
3718 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003719 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720
3721 ret = do_sigaction(sig, &new_sa, &old_sa);
3722
3723 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3724}
3725#endif /* __ARCH_WANT_SYS_SIGNAL */
3726
3727#ifdef __ARCH_WANT_SYS_PAUSE
3728
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003729SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003731 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003732 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003733 schedule();
3734 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 return -ERESTARTNOHAND;
3736}
3737
3738#endif
3739
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003740static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003741{
Al Viro68f3f162012-05-21 21:42:32 -04003742 current->saved_sigmask = current->blocked;
3743 set_current_blocked(set);
3744
Sasha Levin823dd322016-02-05 15:36:05 -08003745 while (!signal_pending(current)) {
3746 __set_current_state(TASK_INTERRUPTIBLE);
3747 schedule();
3748 }
Al Viro68f3f162012-05-21 21:42:32 -04003749 set_restore_sigmask();
3750 return -ERESTARTNOHAND;
3751}
Al Viro68f3f162012-05-21 21:42:32 -04003752
Randy Dunlap41c57892011-04-04 15:00:26 -07003753/**
3754 * sys_rt_sigsuspend - replace the signal mask for a value with the
3755 * @unewset value until a signal is received
3756 * @unewset: new signal mask value
3757 * @sigsetsize: size of sigset_t type
3758 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003759SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003760{
3761 sigset_t newset;
3762
3763 /* XXX: Don't preclude handling different sized sigset_t's. */
3764 if (sigsetsize != sizeof(sigset_t))
3765 return -EINVAL;
3766
3767 if (copy_from_user(&newset, unewset, sizeof(newset)))
3768 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003769 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003770}
Al Viroad4b65a2012-12-24 21:43:56 -05003771
3772#ifdef CONFIG_COMPAT
3773COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3774{
3775#ifdef __BIG_ENDIAN
3776 sigset_t newset;
3777 compat_sigset_t newset32;
3778
3779 /* XXX: Don't preclude handling different sized sigset_t's. */
3780 if (sigsetsize != sizeof(sigset_t))
3781 return -EINVAL;
3782
3783 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3784 return -EFAULT;
3785 sigset_from_compat(&newset, &newset32);
3786 return sigsuspend(&newset);
3787#else
3788 /* on little-endian bitmaps don't care about granularity */
3789 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3790#endif
3791}
3792#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003793
Al Viro0a0e8cd2012-12-25 16:04:12 -05003794#ifdef CONFIG_OLD_SIGSUSPEND
3795SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3796{
3797 sigset_t blocked;
3798 siginitset(&blocked, mask);
3799 return sigsuspend(&blocked);
3800}
3801#endif
3802#ifdef CONFIG_OLD_SIGSUSPEND3
3803SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3804{
3805 sigset_t blocked;
3806 siginitset(&blocked, mask);
3807 return sigsuspend(&blocked);
3808}
3809#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07003811__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003812{
3813 return NULL;
3814}
3815
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816void __init signals_init(void)
3817{
Helge Deller41b27152016-03-22 14:27:54 -07003818 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3819 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3820 != offsetof(struct siginfo, _sifields._pad));
3821
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003822 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003824
3825#ifdef CONFIG_KGDB_KDB
3826#include <linux/kdb.h>
3827/*
3828 * kdb_send_sig_info - Allows kdb to send signals without exposing
3829 * signal internals. This function checks if the required locks are
3830 * available before calling the main signal code, to avoid kdb
3831 * deadlocks.
3832 */
3833void
3834kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3835{
3836 static struct task_struct *kdb_prev_t;
3837 int sig, new_t;
3838 if (!spin_trylock(&t->sighand->siglock)) {
3839 kdb_printf("Can't do kill command now.\n"
3840 "The sigmask lock is held somewhere else in "
3841 "kernel, try again later\n");
3842 return;
3843 }
3844 spin_unlock(&t->sighand->siglock);
3845 new_t = kdb_prev_t != t;
3846 kdb_prev_t = t;
3847 if (t->state != TASK_RUNNING && new_t) {
3848 kdb_printf("Process is not RUNNING, sending a signal from "
3849 "kdb risks deadlock\n"
3850 "on the run queue locks. "
3851 "The signal has _not_ been sent.\n"
3852 "Reissue the kill command if you want to risk "
3853 "the deadlock.\n");
3854 return;
3855 }
3856 sig = info->si_signo;
3857 if (send_sig_info(sig, info, t))
3858 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3859 sig, t->pid);
3860 else
3861 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3862}
3863#endif /* CONFIG_KGDB_KDB */