blob: c1708066bf55d4a65e91a6d06391a01163caec67 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/fcntl.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/syscalls.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fs.h>
11#include <linux/file.h>
12#include <linux/dnotify.h>
13#include <linux/smp_lock.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/security.h>
17#include <linux/ptrace.h>
18
19#include <asm/poll.h>
20#include <asm/siginfo.h>
21#include <asm/uaccess.h>
22
23void fastcall set_close_on_exec(unsigned int fd, int flag)
24{
25 struct files_struct *files = current->files;
26 spin_lock(&files->file_lock);
27 if (flag)
28 FD_SET(fd, files->close_on_exec);
29 else
30 FD_CLR(fd, files->close_on_exec);
31 spin_unlock(&files->file_lock);
32}
33
34static inline int get_close_on_exec(unsigned int fd)
35{
36 struct files_struct *files = current->files;
37 int res;
38 spin_lock(&files->file_lock);
39 res = FD_ISSET(fd, files->close_on_exec);
40 spin_unlock(&files->file_lock);
41 return res;
42}
43
44/*
45 * locate_fd finds a free file descriptor in the open_fds fdset,
46 * expanding the fd arrays if necessary. Must be called with the
47 * file_lock held for write.
48 */
49
50static int locate_fd(struct files_struct *files,
51 struct file *file, unsigned int orig_start)
52{
53 unsigned int newfd;
54 unsigned int start;
55 int error;
56
57 error = -EINVAL;
58 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
59 goto out;
60
61repeat:
62 /*
63 * Someone might have closed fd's in the range
64 * orig_start..files->next_fd
65 */
66 start = orig_start;
67 if (start < files->next_fd)
68 start = files->next_fd;
69
70 newfd = start;
71 if (start < files->max_fdset) {
72 newfd = find_next_zero_bit(files->open_fds->fds_bits,
73 files->max_fdset, start);
74 }
75
76 error = -EMFILE;
77 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
78 goto out;
79
80 error = expand_files(files, newfd);
81 if (error < 0)
82 goto out;
83
84 /*
85 * If we needed to expand the fs array we
86 * might have blocked - try again.
87 */
88 if (error)
89 goto repeat;
90
91 if (start <= files->next_fd)
92 files->next_fd = newfd + 1;
93
94 error = newfd;
95
96out:
97 return error;
98}
99
100static int dupfd(struct file *file, unsigned int start)
101{
102 struct files_struct * files = current->files;
103 int fd;
104
105 spin_lock(&files->file_lock);
106 fd = locate_fd(files, file, start);
107 if (fd >= 0) {
108 FD_SET(fd, files->open_fds);
109 FD_CLR(fd, files->close_on_exec);
110 spin_unlock(&files->file_lock);
111 fd_install(fd, file);
112 } else {
113 spin_unlock(&files->file_lock);
114 fput(file);
115 }
116
117 return fd;
118}
119
120asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
121{
122 int err = -EBADF;
123 struct file * file, *tofree;
124 struct files_struct * files = current->files;
125
126 spin_lock(&files->file_lock);
127 if (!(file = fcheck(oldfd)))
128 goto out_unlock;
129 err = newfd;
130 if (newfd == oldfd)
131 goto out_unlock;
132 err = -EBADF;
133 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
134 goto out_unlock;
135 get_file(file); /* We are now finished with oldfd */
136
137 err = expand_files(files, newfd);
138 if (err < 0)
139 goto out_fput;
140
141 /* To avoid races with open() and dup(), we will mark the fd as
142 * in-use in the open-file bitmap throughout the entire dup2()
143 * process. This is quite safe: do_close() uses the fd array
144 * entry, not the bitmap, to decide what work needs to be
145 * done. --sct */
146 /* Doesn't work. open() might be there first. --AV */
147
148 /* Yes. It's a race. In user space. Nothing sane to do */
149 err = -EBUSY;
150 tofree = files->fd[newfd];
151 if (!tofree && FD_ISSET(newfd, files->open_fds))
152 goto out_fput;
153
154 files->fd[newfd] = file;
155 FD_SET(newfd, files->open_fds);
156 FD_CLR(newfd, files->close_on_exec);
157 spin_unlock(&files->file_lock);
158
159 if (tofree)
160 filp_close(tofree, files);
161 err = newfd;
162out:
163 return err;
164out_unlock:
165 spin_unlock(&files->file_lock);
166 goto out;
167
168out_fput:
169 spin_unlock(&files->file_lock);
170 fput(file);
171 goto out;
172}
173
174asmlinkage long sys_dup(unsigned int fildes)
175{
176 int ret = -EBADF;
177 struct file * file = fget(fildes);
178
179 if (file)
180 ret = dupfd(file, 0);
181 return ret;
182}
183
184#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
185
186static int setfl(int fd, struct file * filp, unsigned long arg)
187{
188 struct inode * inode = filp->f_dentry->d_inode;
189 int error = 0;
190
191 /* O_APPEND cannot be cleared if the file is marked as append-only */
192 if (!(arg & O_APPEND) && IS_APPEND(inode))
193 return -EPERM;
194
195 /* O_NOATIME can only be set by the owner or superuser */
196 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
197 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
198 return -EPERM;
199
200 /* required for strict SunOS emulation */
201 if (O_NONBLOCK != O_NDELAY)
202 if (arg & O_NDELAY)
203 arg |= O_NONBLOCK;
204
205 if (arg & O_DIRECT) {
206 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
207 !filp->f_mapping->a_ops->direct_IO)
208 return -EINVAL;
209 }
210
211 if (filp->f_op && filp->f_op->check_flags)
212 error = filp->f_op->check_flags(arg);
213 if (error)
214 return error;
215
216 lock_kernel();
217 if ((arg ^ filp->f_flags) & FASYNC) {
218 if (filp->f_op && filp->f_op->fasync) {
219 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
220 if (error < 0)
221 goto out;
222 }
223 }
224
225 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
226 out:
227 unlock_kernel();
228 return error;
229}
230
231static void f_modown(struct file *filp, unsigned long pid,
232 uid_t uid, uid_t euid, int force)
233{
234 write_lock_irq(&filp->f_owner.lock);
235 if (force || !filp->f_owner.pid) {
236 filp->f_owner.pid = pid;
237 filp->f_owner.uid = uid;
238 filp->f_owner.euid = euid;
239 }
240 write_unlock_irq(&filp->f_owner.lock);
241}
242
243int f_setown(struct file *filp, unsigned long arg, int force)
244{
245 int err;
246
247 err = security_file_set_fowner(filp);
248 if (err)
249 return err;
250
251 f_modown(filp, arg, current->uid, current->euid, force);
252 return 0;
253}
254
255EXPORT_SYMBOL(f_setown);
256
257void f_delown(struct file *filp)
258{
259 f_modown(filp, 0, 0, 0, 1);
260}
261
262static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
263 struct file *filp)
264{
265 long err = -EINVAL;
266
267 switch (cmd) {
268 case F_DUPFD:
269 get_file(filp);
270 err = dupfd(filp, arg);
271 break;
272 case F_GETFD:
273 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
274 break;
275 case F_SETFD:
276 err = 0;
277 set_close_on_exec(fd, arg & FD_CLOEXEC);
278 break;
279 case F_GETFL:
280 err = filp->f_flags;
281 break;
282 case F_SETFL:
283 err = setfl(fd, filp, arg);
284 break;
285 case F_GETLK:
286 err = fcntl_getlk(filp, (struct flock __user *) arg);
287 break;
288 case F_SETLK:
289 case F_SETLKW:
290 err = fcntl_setlk(filp, cmd, (struct flock __user *) arg);
291 break;
292 case F_GETOWN:
293 /*
294 * XXX If f_owner is a process group, the
295 * negative return value will get converted
296 * into an error. Oops. If we keep the
297 * current syscall conventions, the only way
298 * to fix this will be in libc.
299 */
300 err = filp->f_owner.pid;
301 force_successful_syscall_return();
302 break;
303 case F_SETOWN:
304 err = f_setown(filp, arg, 1);
305 break;
306 case F_GETSIG:
307 err = filp->f_owner.signum;
308 break;
309 case F_SETSIG:
310 /* arg == 0 restores default behaviour. */
311 if (arg < 0 || arg > _NSIG) {
312 break;
313 }
314 err = 0;
315 filp->f_owner.signum = arg;
316 break;
317 case F_GETLEASE:
318 err = fcntl_getlease(filp);
319 break;
320 case F_SETLEASE:
321 err = fcntl_setlease(fd, filp, arg);
322 break;
323 case F_NOTIFY:
324 err = fcntl_dirnotify(fd, filp, arg);
325 break;
326 default:
327 break;
328 }
329 return err;
330}
331
332asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
333{
334 struct file *filp;
335 long err = -EBADF;
336
337 filp = fget(fd);
338 if (!filp)
339 goto out;
340
341 err = security_file_fcntl(filp, cmd, arg);
342 if (err) {
343 fput(filp);
344 return err;
345 }
346
347 err = do_fcntl(fd, cmd, arg, filp);
348
349 fput(filp);
350out:
351 return err;
352}
353
354#if BITS_PER_LONG == 32
355asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
356{
357 struct file * filp;
358 long err;
359
360 err = -EBADF;
361 filp = fget(fd);
362 if (!filp)
363 goto out;
364
365 err = security_file_fcntl(filp, cmd, arg);
366 if (err) {
367 fput(filp);
368 return err;
369 }
370 err = -EBADF;
371
372 switch (cmd) {
373 case F_GETLK64:
374 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
375 break;
376 case F_SETLK64:
377 case F_SETLKW64:
378 err = fcntl_setlk64(filp, cmd, (struct flock64 __user *) arg);
379 break;
380 default:
381 err = do_fcntl(fd, cmd, arg, filp);
382 break;
383 }
384 fput(filp);
385out:
386 return err;
387}
388#endif
389
390/* Table to convert sigio signal codes into poll band bitmaps */
391
392static long band_table[NSIGPOLL] = {
393 POLLIN | POLLRDNORM, /* POLL_IN */
394 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
395 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
396 POLLERR, /* POLL_ERR */
397 POLLPRI | POLLRDBAND, /* POLL_PRI */
398 POLLHUP | POLLERR /* POLL_HUP */
399};
400
401static inline int sigio_perm(struct task_struct *p,
402 struct fown_struct *fown, int sig)
403{
404 return (((fown->euid == 0) ||
405 (fown->euid == p->suid) || (fown->euid == p->uid) ||
406 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
407 !security_file_send_sigiotask(p, fown, sig));
408}
409
410static void send_sigio_to_task(struct task_struct *p,
411 struct fown_struct *fown,
412 int fd,
413 int reason)
414{
415 if (!sigio_perm(p, fown, fown->signum))
416 return;
417
418 switch (fown->signum) {
419 siginfo_t si;
420 default:
421 /* Queue a rt signal with the appropriate fd as its
422 value. We use SI_SIGIO as the source, not
423 SI_KERNEL, since kernel signals always get
424 delivered even if we can't queue. Failure to
425 queue in this case _should_ be reported; we fall
426 back to SIGIO in that case. --sct */
427 si.si_signo = fown->signum;
428 si.si_errno = 0;
429 si.si_code = reason;
430 /* Make sure we are called with one of the POLL_*
431 reasons, otherwise we could leak kernel stack into
432 userspace. */
433 if ((reason & __SI_MASK) != __SI_POLL)
434 BUG();
435 if (reason - POLL_IN >= NSIGPOLL)
436 si.si_band = ~0L;
437 else
438 si.si_band = band_table[reason - POLL_IN];
439 si.si_fd = fd;
440 if (!send_sig_info(fown->signum, &si, p))
441 break;
442 /* fall-through: fall back on the old plain SIGIO signal */
443 case 0:
444 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
445 }
446}
447
448void send_sigio(struct fown_struct *fown, int fd, int band)
449{
450 struct task_struct *p;
451 int pid;
452
453 read_lock(&fown->lock);
454 pid = fown->pid;
455 if (!pid)
456 goto out_unlock_fown;
457
458 read_lock(&tasklist_lock);
459 if (pid > 0) {
460 p = find_task_by_pid(pid);
461 if (p) {
462 send_sigio_to_task(p, fown, fd, band);
463 }
464 } else {
465 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
466 send_sigio_to_task(p, fown, fd, band);
467 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
468 }
469 read_unlock(&tasklist_lock);
470 out_unlock_fown:
471 read_unlock(&fown->lock);
472}
473
474static void send_sigurg_to_task(struct task_struct *p,
475 struct fown_struct *fown)
476{
477 if (sigio_perm(p, fown, SIGURG))
478 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
479}
480
481int send_sigurg(struct fown_struct *fown)
482{
483 struct task_struct *p;
484 int pid, ret = 0;
485
486 read_lock(&fown->lock);
487 pid = fown->pid;
488 if (!pid)
489 goto out_unlock_fown;
490
491 ret = 1;
492
493 read_lock(&tasklist_lock);
494 if (pid > 0) {
495 p = find_task_by_pid(pid);
496 if (p) {
497 send_sigurg_to_task(p, fown);
498 }
499 } else {
500 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
501 send_sigurg_to_task(p, fown);
502 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
503 }
504 read_unlock(&tasklist_lock);
505 out_unlock_fown:
506 read_unlock(&fown->lock);
507 return ret;
508}
509
510static DEFINE_RWLOCK(fasync_lock);
511static kmem_cache_t *fasync_cache;
512
513/*
514 * fasync_helper() is used by some character device drivers (mainly mice)
515 * to set up the fasync queue. It returns negative on error, 0 if it did
516 * no changes and positive if it added/deleted the entry.
517 */
518int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
519{
520 struct fasync_struct *fa, **fp;
521 struct fasync_struct *new = NULL;
522 int result = 0;
523
524 if (on) {
525 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
526 if (!new)
527 return -ENOMEM;
528 }
529 write_lock_irq(&fasync_lock);
530 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
531 if (fa->fa_file == filp) {
532 if(on) {
533 fa->fa_fd = fd;
534 kmem_cache_free(fasync_cache, new);
535 } else {
536 *fp = fa->fa_next;
537 kmem_cache_free(fasync_cache, fa);
538 result = 1;
539 }
540 goto out;
541 }
542 }
543
544 if (on) {
545 new->magic = FASYNC_MAGIC;
546 new->fa_file = filp;
547 new->fa_fd = fd;
548 new->fa_next = *fapp;
549 *fapp = new;
550 result = 1;
551 }
552out:
553 write_unlock_irq(&fasync_lock);
554 return result;
555}
556
557EXPORT_SYMBOL(fasync_helper);
558
559void __kill_fasync(struct fasync_struct *fa, int sig, int band)
560{
561 while (fa) {
562 struct fown_struct * fown;
563 if (fa->magic != FASYNC_MAGIC) {
564 printk(KERN_ERR "kill_fasync: bad magic number in "
565 "fasync_struct!\n");
566 return;
567 }
568 fown = &fa->fa_file->f_owner;
569 /* Don't send SIGURG to processes which have not set a
570 queued signum: SIGURG has its own default signalling
571 mechanism. */
572 if (!(sig == SIGURG && fown->signum == 0))
573 send_sigio(fown, fa->fa_fd, band);
574 fa = fa->fa_next;
575 }
576}
577
578EXPORT_SYMBOL(__kill_fasync);
579
580void kill_fasync(struct fasync_struct **fp, int sig, int band)
581{
582 /* First a quick test without locking: usually
583 * the list is empty.
584 */
585 if (*fp) {
586 read_lock(&fasync_lock);
587 /* reread *fp after obtaining the lock */
588 __kill_fasync(*fp, sig, band);
589 read_unlock(&fasync_lock);
590 }
591}
592EXPORT_SYMBOL(kill_fasync);
593
594static int __init fasync_init(void)
595{
596 fasync_cache = kmem_cache_create("fasync_cache",
597 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
598 return 0;
599}
600
601module_init(fasync_init)