Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 1 | /* |
| 2 | * fs/signalfd.c |
| 3 | * |
| 4 | * Copyright (C) 2003 Linus Torvalds |
| 5 | * |
| 6 | * Mon Mar 5, 2007: Davide Libenzi <davidel@xmailserver.org> |
| 7 | * Changed ->read() to return a siginfo strcture instead of signal number. |
| 8 | * Fixed locking in ->poll(). |
| 9 | * Added sighand-detach notification. |
| 10 | * Added fd re-use in sys_signalfd() syscall. |
| 11 | * Now using anonymous inode source. |
| 12 | * Thanks to Oleg Nesterov for useful code review and suggestions. |
| 13 | * More comments and suggestions from Arnd Bergmann. |
Davi Arnaut | b3762bf | 2007-05-23 13:58:04 -0700 | [diff] [blame] | 14 | * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br> |
| 15 | * Retrieve multiple signals with one read() call |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 16 | */ |
| 17 | |
| 18 | #include <linux/file.h> |
| 19 | #include <linux/poll.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/fs.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/signal.h> |
| 25 | #include <linux/list.h> |
| 26 | #include <linux/anon_inodes.h> |
| 27 | #include <linux/signalfd.h> |
| 28 | |
| 29 | struct signalfd_ctx { |
| 30 | struct list_head lnk; |
| 31 | wait_queue_head_t wqh; |
| 32 | sigset_t sigmask; |
| 33 | struct task_struct *tsk; |
| 34 | }; |
| 35 | |
| 36 | struct signalfd_lockctx { |
| 37 | struct task_struct *tsk; |
| 38 | unsigned long flags; |
| 39 | }; |
| 40 | |
| 41 | /* |
| 42 | * Tries to acquire the sighand lock. We do not increment the sighand |
| 43 | * use count, and we do not even pin the task struct, so we need to |
| 44 | * do it inside an RCU read lock, and we must be prepared for the |
| 45 | * ctx->tsk going to NULL (in signalfd_deliver()), and for the sighand |
| 46 | * being detached. We return 0 if the sighand has been detached, or |
| 47 | * 1 if we were able to pin the sighand lock. |
| 48 | */ |
| 49 | static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk) |
| 50 | { |
| 51 | struct sighand_struct *sighand = NULL; |
| 52 | |
| 53 | rcu_read_lock(); |
| 54 | lk->tsk = rcu_dereference(ctx->tsk); |
| 55 | if (likely(lk->tsk != NULL)) |
| 56 | sighand = lock_task_sighand(lk->tsk, &lk->flags); |
| 57 | rcu_read_unlock(); |
| 58 | |
| 59 | if (sighand && !ctx->tsk) { |
| 60 | unlock_task_sighand(lk->tsk, &lk->flags); |
| 61 | sighand = NULL; |
| 62 | } |
| 63 | |
| 64 | return sighand != NULL; |
| 65 | } |
| 66 | |
| 67 | static void signalfd_unlock(struct signalfd_lockctx *lk) |
| 68 | { |
| 69 | unlock_task_sighand(lk->tsk, &lk->flags); |
| 70 | } |
| 71 | |
| 72 | /* |
| 73 | * This must be called with the sighand lock held. |
| 74 | */ |
| 75 | void signalfd_deliver(struct task_struct *tsk, int sig) |
| 76 | { |
| 77 | struct sighand_struct *sighand = tsk->sighand; |
| 78 | struct signalfd_ctx *ctx, *tmp; |
| 79 | |
| 80 | BUG_ON(!sig); |
| 81 | list_for_each_entry_safe(ctx, tmp, &sighand->signalfd_list, lnk) { |
| 82 | /* |
| 83 | * We use a negative signal value as a way to broadcast that the |
| 84 | * sighand has been orphaned, so that we can notify all the |
| 85 | * listeners about this. Remember the ctx->sigmask is inverted, |
| 86 | * so if the user is interested in a signal, that corresponding |
| 87 | * bit will be zero. |
| 88 | */ |
| 89 | if (sig < 0) { |
| 90 | if (ctx->tsk == tsk) { |
| 91 | ctx->tsk = NULL; |
| 92 | list_del_init(&ctx->lnk); |
| 93 | wake_up(&ctx->wqh); |
| 94 | } |
| 95 | } else { |
| 96 | if (!sigismember(&ctx->sigmask, sig)) |
| 97 | wake_up(&ctx->wqh); |
| 98 | } |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | static void signalfd_cleanup(struct signalfd_ctx *ctx) |
| 103 | { |
| 104 | struct signalfd_lockctx lk; |
| 105 | |
| 106 | /* |
| 107 | * This is tricky. If the sighand is gone, we do not need to remove |
| 108 | * context from the list, the list itself won't be there anymore. |
| 109 | */ |
| 110 | if (signalfd_lock(ctx, &lk)) { |
| 111 | list_del(&ctx->lnk); |
| 112 | signalfd_unlock(&lk); |
| 113 | } |
| 114 | kfree(ctx); |
| 115 | } |
| 116 | |
| 117 | static int signalfd_release(struct inode *inode, struct file *file) |
| 118 | { |
| 119 | signalfd_cleanup(file->private_data); |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | static unsigned int signalfd_poll(struct file *file, poll_table *wait) |
| 124 | { |
| 125 | struct signalfd_ctx *ctx = file->private_data; |
| 126 | unsigned int events = 0; |
| 127 | struct signalfd_lockctx lk; |
| 128 | |
| 129 | poll_wait(file, &ctx->wqh, wait); |
| 130 | |
| 131 | /* |
| 132 | * Let the caller get a POLLIN in this case, ala socket recv() when |
| 133 | * the peer disconnects. |
| 134 | */ |
| 135 | if (signalfd_lock(ctx, &lk)) { |
Davide Libenzi | f8738c5 | 2007-06-27 14:09:59 -0700 | [diff] [blame] | 136 | if ((lk.tsk == current && |
| 137 | next_signal(&lk.tsk->pending, &ctx->sigmask) > 0) || |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 138 | next_signal(&lk.tsk->signal->shared_pending, |
| 139 | &ctx->sigmask) > 0) |
| 140 | events |= POLLIN; |
| 141 | signalfd_unlock(&lk); |
| 142 | } else |
| 143 | events |= POLLIN; |
| 144 | |
| 145 | return events; |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * Copied from copy_siginfo_to_user() in kernel/signal.c |
| 150 | */ |
| 151 | static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, |
| 152 | siginfo_t const *kinfo) |
| 153 | { |
| 154 | long err; |
| 155 | |
| 156 | BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128); |
| 157 | |
| 158 | /* |
| 159 | * Unused memebers should be zero ... |
| 160 | */ |
| 161 | err = __clear_user(uinfo, sizeof(*uinfo)); |
| 162 | |
| 163 | /* |
| 164 | * If you change siginfo_t structure, please be sure |
| 165 | * this code is fixed accordingly. |
| 166 | */ |
| 167 | err |= __put_user(kinfo->si_signo, &uinfo->signo); |
| 168 | err |= __put_user(kinfo->si_errno, &uinfo->err); |
| 169 | err |= __put_user((short)kinfo->si_code, &uinfo->code); |
| 170 | switch (kinfo->si_code & __SI_MASK) { |
| 171 | case __SI_KILL: |
| 172 | err |= __put_user(kinfo->si_pid, &uinfo->pid); |
| 173 | err |= __put_user(kinfo->si_uid, &uinfo->uid); |
| 174 | break; |
| 175 | case __SI_TIMER: |
| 176 | err |= __put_user(kinfo->si_tid, &uinfo->tid); |
| 177 | err |= __put_user(kinfo->si_overrun, &uinfo->overrun); |
| 178 | err |= __put_user((long)kinfo->si_ptr, &uinfo->svptr); |
| 179 | break; |
| 180 | case __SI_POLL: |
| 181 | err |= __put_user(kinfo->si_band, &uinfo->band); |
| 182 | err |= __put_user(kinfo->si_fd, &uinfo->fd); |
| 183 | break; |
| 184 | case __SI_FAULT: |
| 185 | err |= __put_user((long)kinfo->si_addr, &uinfo->addr); |
| 186 | #ifdef __ARCH_SI_TRAPNO |
| 187 | err |= __put_user(kinfo->si_trapno, &uinfo->trapno); |
| 188 | #endif |
| 189 | break; |
| 190 | case __SI_CHLD: |
| 191 | err |= __put_user(kinfo->si_pid, &uinfo->pid); |
| 192 | err |= __put_user(kinfo->si_uid, &uinfo->uid); |
| 193 | err |= __put_user(kinfo->si_status, &uinfo->status); |
| 194 | err |= __put_user(kinfo->si_utime, &uinfo->utime); |
| 195 | err |= __put_user(kinfo->si_stime, &uinfo->stime); |
| 196 | break; |
| 197 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
| 198 | case __SI_MESGQ: /* But this is */ |
| 199 | err |= __put_user(kinfo->si_pid, &uinfo->pid); |
| 200 | err |= __put_user(kinfo->si_uid, &uinfo->uid); |
| 201 | err |= __put_user((long)kinfo->si_ptr, &uinfo->svptr); |
| 202 | break; |
| 203 | default: /* this is just in case for now ... */ |
| 204 | err |= __put_user(kinfo->si_pid, &uinfo->pid); |
| 205 | err |= __put_user(kinfo->si_uid, &uinfo->uid); |
| 206 | break; |
| 207 | } |
| 208 | |
| 209 | return err ? -EFAULT: sizeof(*uinfo); |
| 210 | } |
| 211 | |
Davi Arnaut | b3762bf | 2007-05-23 13:58:04 -0700 | [diff] [blame] | 212 | static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info, |
| 213 | int nonblock) |
| 214 | { |
| 215 | ssize_t ret; |
| 216 | struct signalfd_lockctx lk; |
| 217 | DECLARE_WAITQUEUE(wait, current); |
| 218 | |
| 219 | if (!signalfd_lock(ctx, &lk)) |
| 220 | return 0; |
| 221 | |
| 222 | ret = dequeue_signal(lk.tsk, &ctx->sigmask, info); |
| 223 | switch (ret) { |
| 224 | case 0: |
| 225 | if (!nonblock) |
| 226 | break; |
| 227 | ret = -EAGAIN; |
| 228 | default: |
| 229 | signalfd_unlock(&lk); |
| 230 | return ret; |
| 231 | } |
| 232 | |
| 233 | add_wait_queue(&ctx->wqh, &wait); |
| 234 | for (;;) { |
| 235 | set_current_state(TASK_INTERRUPTIBLE); |
| 236 | ret = dequeue_signal(lk.tsk, &ctx->sigmask, info); |
| 237 | signalfd_unlock(&lk); |
| 238 | if (ret != 0) |
| 239 | break; |
| 240 | if (signal_pending(current)) { |
| 241 | ret = -ERESTARTSYS; |
| 242 | break; |
| 243 | } |
| 244 | schedule(); |
| 245 | ret = signalfd_lock(ctx, &lk); |
| 246 | if (unlikely(!ret)) { |
| 247 | /* |
| 248 | * Let the caller read zero byte, ala socket |
| 249 | * recv() when the peer disconnect. This test |
| 250 | * must be done before doing a dequeue_signal(), |
| 251 | * because if the sighand has been orphaned, |
| 252 | * the dequeue_signal() call is going to crash |
| 253 | * because ->sighand will be long gone. |
| 254 | */ |
| 255 | break; |
| 256 | } |
| 257 | } |
| 258 | |
| 259 | remove_wait_queue(&ctx->wqh, &wait); |
| 260 | __set_current_state(TASK_RUNNING); |
| 261 | |
| 262 | return ret; |
| 263 | } |
| 264 | |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 265 | /* |
| 266 | * Returns either the size of a "struct signalfd_siginfo", or zero if the |
| 267 | * sighand we are attached to, has been orphaned. The "count" parameter |
| 268 | * must be at least the size of a "struct signalfd_siginfo". |
| 269 | */ |
| 270 | static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, |
| 271 | loff_t *ppos) |
| 272 | { |
| 273 | struct signalfd_ctx *ctx = file->private_data; |
Davi Arnaut | b3762bf | 2007-05-23 13:58:04 -0700 | [diff] [blame] | 274 | struct signalfd_siginfo __user *siginfo; |
| 275 | int nonblock = file->f_flags & O_NONBLOCK; |
| 276 | ssize_t ret, total = 0; |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 277 | siginfo_t info; |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 278 | |
Davi Arnaut | b3762bf | 2007-05-23 13:58:04 -0700 | [diff] [blame] | 279 | count /= sizeof(struct signalfd_siginfo); |
| 280 | if (!count) |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 281 | return -EINVAL; |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 282 | |
Davi Arnaut | b3762bf | 2007-05-23 13:58:04 -0700 | [diff] [blame] | 283 | siginfo = (struct signalfd_siginfo __user *) buf; |
| 284 | |
| 285 | do { |
| 286 | ret = signalfd_dequeue(ctx, &info, nonblock); |
| 287 | if (unlikely(ret <= 0)) |
| 288 | break; |
| 289 | ret = signalfd_copyinfo(siginfo, &info); |
| 290 | if (ret < 0) |
| 291 | break; |
| 292 | siginfo++; |
| 293 | total += ret; |
| 294 | nonblock = 1; |
| 295 | } while (--count); |
| 296 | |
| 297 | return total ? total : ret; |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | static const struct file_operations signalfd_fops = { |
| 301 | .release = signalfd_release, |
| 302 | .poll = signalfd_poll, |
| 303 | .read = signalfd_read, |
| 304 | }; |
| 305 | |
| 306 | /* |
| 307 | * Create a file descriptor that is associated with our signal |
| 308 | * state. We can pass it around to others if we want to, but |
| 309 | * it will always be _our_ signal state. |
| 310 | */ |
| 311 | asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) |
| 312 | { |
| 313 | int error; |
| 314 | sigset_t sigmask; |
| 315 | struct signalfd_ctx *ctx; |
| 316 | struct sighand_struct *sighand; |
| 317 | struct file *file; |
| 318 | struct inode *inode; |
| 319 | struct signalfd_lockctx lk; |
| 320 | |
| 321 | if (sizemask != sizeof(sigset_t) || |
| 322 | copy_from_user(&sigmask, user_mask, sizeof(sigmask))) |
| 323 | return error = -EINVAL; |
| 324 | sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
| 325 | signotset(&sigmask); |
| 326 | |
| 327 | if (ufd == -1) { |
| 328 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 329 | if (!ctx) |
| 330 | return -ENOMEM; |
| 331 | |
| 332 | init_waitqueue_head(&ctx->wqh); |
| 333 | ctx->sigmask = sigmask; |
| 334 | ctx->tsk = current; |
| 335 | |
| 336 | sighand = current->sighand; |
| 337 | /* |
| 338 | * Add this fd to the list of signal listeners. |
| 339 | */ |
| 340 | spin_lock_irq(&sighand->siglock); |
| 341 | list_add_tail(&ctx->lnk, &sighand->signalfd_list); |
| 342 | spin_unlock_irq(&sighand->siglock); |
| 343 | |
| 344 | /* |
| 345 | * When we call this, the initialization must be complete, since |
| 346 | * anon_inode_getfd() will install the fd. |
| 347 | */ |
| 348 | error = anon_inode_getfd(&ufd, &inode, &file, "[signalfd]", |
| 349 | &signalfd_fops, ctx); |
| 350 | if (error) |
| 351 | goto err_fdalloc; |
| 352 | } else { |
| 353 | file = fget(ufd); |
| 354 | if (!file) |
| 355 | return -EBADF; |
| 356 | ctx = file->private_data; |
| 357 | if (file->f_op != &signalfd_fops) { |
| 358 | fput(file); |
| 359 | return -EINVAL; |
| 360 | } |
| 361 | /* |
| 362 | * We need to be prepared of the fact that the sighand this fd |
| 363 | * is attached to, has been detched. In that case signalfd_lock() |
| 364 | * will return 0, and we'll just skip setting the new mask. |
| 365 | */ |
| 366 | if (signalfd_lock(ctx, &lk)) { |
| 367 | ctx->sigmask = sigmask; |
| 368 | signalfd_unlock(&lk); |
| 369 | } |
| 370 | wake_up(&ctx->wqh); |
| 371 | fput(file); |
| 372 | } |
| 373 | |
| 374 | return ufd; |
| 375 | |
| 376 | err_fdalloc: |
| 377 | signalfd_cleanup(ctx); |
| 378 | return error; |
| 379 | } |
| 380 | |