blob: 6fbc9d8fcc3621abb4af0684a79828d6f293d6b6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/fcntl.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/syscalls.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fs.h>
11#include <linux/file.h>
12#include <linux/dnotify.h>
13#include <linux/smp_lock.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/security.h>
17#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070018#include <linux/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include <asm/poll.h>
21#include <asm/siginfo.h>
22#include <asm/uaccess.h>
23
24void fastcall set_close_on_exec(unsigned int fd, int flag)
25{
26 struct files_struct *files = current->files;
27 spin_lock(&files->file_lock);
28 if (flag)
29 FD_SET(fd, files->close_on_exec);
30 else
31 FD_CLR(fd, files->close_on_exec);
32 spin_unlock(&files->file_lock);
33}
34
35static inline int get_close_on_exec(unsigned int fd)
36{
37 struct files_struct *files = current->files;
38 int res;
39 spin_lock(&files->file_lock);
40 res = FD_ISSET(fd, files->close_on_exec);
41 spin_unlock(&files->file_lock);
42 return res;
43}
44
45/*
46 * locate_fd finds a free file descriptor in the open_fds fdset,
47 * expanding the fd arrays if necessary. Must be called with the
48 * file_lock held for write.
49 */
50
51static int locate_fd(struct files_struct *files,
52 struct file *file, unsigned int orig_start)
53{
54 unsigned int newfd;
55 unsigned int start;
56 int error;
57
58 error = -EINVAL;
59 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
60 goto out;
61
62repeat:
63 /*
64 * Someone might have closed fd's in the range
65 * orig_start..files->next_fd
66 */
67 start = orig_start;
68 if (start < files->next_fd)
69 start = files->next_fd;
70
71 newfd = start;
72 if (start < files->max_fdset) {
73 newfd = find_next_zero_bit(files->open_fds->fds_bits,
74 files->max_fdset, start);
75 }
76
77 error = -EMFILE;
78 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
79 goto out;
80
81 error = expand_files(files, newfd);
82 if (error < 0)
83 goto out;
84
85 /*
86 * If we needed to expand the fs array we
87 * might have blocked - try again.
88 */
89 if (error)
90 goto repeat;
91
92 if (start <= files->next_fd)
93 files->next_fd = newfd + 1;
94
95 error = newfd;
96
97out:
98 return error;
99}
100
101static int dupfd(struct file *file, unsigned int start)
102{
103 struct files_struct * files = current->files;
104 int fd;
105
106 spin_lock(&files->file_lock);
107 fd = locate_fd(files, file, start);
108 if (fd >= 0) {
109 FD_SET(fd, files->open_fds);
110 FD_CLR(fd, files->close_on_exec);
111 spin_unlock(&files->file_lock);
112 fd_install(fd, file);
113 } else {
114 spin_unlock(&files->file_lock);
115 fput(file);
116 }
117
118 return fd;
119}
120
121asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
122{
123 int err = -EBADF;
124 struct file * file, *tofree;
125 struct files_struct * files = current->files;
126
127 spin_lock(&files->file_lock);
128 if (!(file = fcheck(oldfd)))
129 goto out_unlock;
130 err = newfd;
131 if (newfd == oldfd)
132 goto out_unlock;
133 err = -EBADF;
134 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
135 goto out_unlock;
136 get_file(file); /* We are now finished with oldfd */
137
138 err = expand_files(files, newfd);
139 if (err < 0)
140 goto out_fput;
141
142 /* To avoid races with open() and dup(), we will mark the fd as
143 * in-use in the open-file bitmap throughout the entire dup2()
144 * process. This is quite safe: do_close() uses the fd array
145 * entry, not the bitmap, to decide what work needs to be
146 * done. --sct */
147 /* Doesn't work. open() might be there first. --AV */
148
149 /* Yes. It's a race. In user space. Nothing sane to do */
150 err = -EBUSY;
151 tofree = files->fd[newfd];
152 if (!tofree && FD_ISSET(newfd, files->open_fds))
153 goto out_fput;
154
155 files->fd[newfd] = file;
156 FD_SET(newfd, files->open_fds);
157 FD_CLR(newfd, files->close_on_exec);
158 spin_unlock(&files->file_lock);
159
160 if (tofree)
161 filp_close(tofree, files);
162 err = newfd;
163out:
164 return err;
165out_unlock:
166 spin_unlock(&files->file_lock);
167 goto out;
168
169out_fput:
170 spin_unlock(&files->file_lock);
171 fput(file);
172 goto out;
173}
174
175asmlinkage long sys_dup(unsigned int fildes)
176{
177 int ret = -EBADF;
178 struct file * file = fget(fildes);
179
180 if (file)
181 ret = dupfd(file, 0);
182 return ret;
183}
184
185#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
186
187static int setfl(int fd, struct file * filp, unsigned long arg)
188{
189 struct inode * inode = filp->f_dentry->d_inode;
190 int error = 0;
191
192 /* O_APPEND cannot be cleared if the file is marked as append-only */
193 if (!(arg & O_APPEND) && IS_APPEND(inode))
194 return -EPERM;
195
196 /* O_NOATIME can only be set by the owner or superuser */
197 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
198 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
199 return -EPERM;
200
201 /* required for strict SunOS emulation */
202 if (O_NONBLOCK != O_NDELAY)
203 if (arg & O_NDELAY)
204 arg |= O_NONBLOCK;
205
206 if (arg & O_DIRECT) {
207 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
208 !filp->f_mapping->a_ops->direct_IO)
209 return -EINVAL;
210 }
211
212 if (filp->f_op && filp->f_op->check_flags)
213 error = filp->f_op->check_flags(arg);
214 if (error)
215 return error;
216
217 lock_kernel();
218 if ((arg ^ filp->f_flags) & FASYNC) {
219 if (filp->f_op && filp->f_op->fasync) {
220 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
221 if (error < 0)
222 goto out;
223 }
224 }
225
226 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
227 out:
228 unlock_kernel();
229 return error;
230}
231
232static void f_modown(struct file *filp, unsigned long pid,
233 uid_t uid, uid_t euid, int force)
234{
235 write_lock_irq(&filp->f_owner.lock);
236 if (force || !filp->f_owner.pid) {
237 filp->f_owner.pid = pid;
238 filp->f_owner.uid = uid;
239 filp->f_owner.euid = euid;
240 }
241 write_unlock_irq(&filp->f_owner.lock);
242}
243
244int f_setown(struct file *filp, unsigned long arg, int force)
245{
246 int err;
247
248 err = security_file_set_fowner(filp);
249 if (err)
250 return err;
251
252 f_modown(filp, arg, current->uid, current->euid, force);
253 return 0;
254}
255
256EXPORT_SYMBOL(f_setown);
257
258void f_delown(struct file *filp)
259{
260 f_modown(filp, 0, 0, 0, 1);
261}
262
263static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
264 struct file *filp)
265{
266 long err = -EINVAL;
267
268 switch (cmd) {
269 case F_DUPFD:
270 get_file(filp);
271 err = dupfd(filp, arg);
272 break;
273 case F_GETFD:
274 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
275 break;
276 case F_SETFD:
277 err = 0;
278 set_close_on_exec(fd, arg & FD_CLOEXEC);
279 break;
280 case F_GETFL:
281 err = filp->f_flags;
282 break;
283 case F_SETFL:
284 err = setfl(fd, filp, arg);
285 break;
286 case F_GETLK:
287 err = fcntl_getlk(filp, (struct flock __user *) arg);
288 break;
289 case F_SETLK:
290 case F_SETLKW:
Peter Staubachc2936212005-07-27 11:45:09 -0700291 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 break;
293 case F_GETOWN:
294 /*
295 * XXX If f_owner is a process group, the
296 * negative return value will get converted
297 * into an error. Oops. If we keep the
298 * current syscall conventions, the only way
299 * to fix this will be in libc.
300 */
301 err = filp->f_owner.pid;
302 force_successful_syscall_return();
303 break;
304 case F_SETOWN:
305 err = f_setown(filp, arg, 1);
306 break;
307 case F_GETSIG:
308 err = filp->f_owner.signum;
309 break;
310 case F_SETSIG:
311 /* arg == 0 restores default behaviour. */
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700312 if (!valid_signal(arg)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 break;
314 }
315 err = 0;
316 filp->f_owner.signum = arg;
317 break;
318 case F_GETLEASE:
319 err = fcntl_getlease(filp);
320 break;
321 case F_SETLEASE:
322 err = fcntl_setlease(fd, filp, arg);
323 break;
324 case F_NOTIFY:
325 err = fcntl_dirnotify(fd, filp, arg);
326 break;
327 default:
328 break;
329 }
330 return err;
331}
332
333asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
334{
335 struct file *filp;
336 long err = -EBADF;
337
338 filp = fget(fd);
339 if (!filp)
340 goto out;
341
342 err = security_file_fcntl(filp, cmd, arg);
343 if (err) {
344 fput(filp);
345 return err;
346 }
347
348 err = do_fcntl(fd, cmd, arg, filp);
349
350 fput(filp);
351out:
352 return err;
353}
354
355#if BITS_PER_LONG == 32
356asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
357{
358 struct file * filp;
359 long err;
360
361 err = -EBADF;
362 filp = fget(fd);
363 if (!filp)
364 goto out;
365
366 err = security_file_fcntl(filp, cmd, arg);
367 if (err) {
368 fput(filp);
369 return err;
370 }
371 err = -EBADF;
372
373 switch (cmd) {
374 case F_GETLK64:
375 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
376 break;
377 case F_SETLK64:
378 case F_SETLKW64:
Peter Staubachc2936212005-07-27 11:45:09 -0700379 err = fcntl_setlk64(fd, filp, cmd,
380 (struct flock64 __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 break;
382 default:
383 err = do_fcntl(fd, cmd, arg, filp);
384 break;
385 }
386 fput(filp);
387out:
388 return err;
389}
390#endif
391
392/* Table to convert sigio signal codes into poll band bitmaps */
393
394static long band_table[NSIGPOLL] = {
395 POLLIN | POLLRDNORM, /* POLL_IN */
396 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
397 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
398 POLLERR, /* POLL_ERR */
399 POLLPRI | POLLRDBAND, /* POLL_PRI */
400 POLLHUP | POLLERR /* POLL_HUP */
401};
402
403static inline int sigio_perm(struct task_struct *p,
404 struct fown_struct *fown, int sig)
405{
406 return (((fown->euid == 0) ||
407 (fown->euid == p->suid) || (fown->euid == p->uid) ||
408 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
409 !security_file_send_sigiotask(p, fown, sig));
410}
411
412static void send_sigio_to_task(struct task_struct *p,
413 struct fown_struct *fown,
414 int fd,
415 int reason)
416{
417 if (!sigio_perm(p, fown, fown->signum))
418 return;
419
420 switch (fown->signum) {
421 siginfo_t si;
422 default:
423 /* Queue a rt signal with the appropriate fd as its
424 value. We use SI_SIGIO as the source, not
425 SI_KERNEL, since kernel signals always get
426 delivered even if we can't queue. Failure to
427 queue in this case _should_ be reported; we fall
428 back to SIGIO in that case. --sct */
429 si.si_signo = fown->signum;
430 si.si_errno = 0;
431 si.si_code = reason;
432 /* Make sure we are called with one of the POLL_*
433 reasons, otherwise we could leak kernel stack into
434 userspace. */
435 if ((reason & __SI_MASK) != __SI_POLL)
436 BUG();
437 if (reason - POLL_IN >= NSIGPOLL)
438 si.si_band = ~0L;
439 else
440 si.si_band = band_table[reason - POLL_IN];
441 si.si_fd = fd;
Bharath Rameshfc9c9ab2005-04-16 15:25:41 -0700442 if (!send_group_sig_info(fown->signum, &si, p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 break;
444 /* fall-through: fall back on the old plain SIGIO signal */
445 case 0:
446 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
447 }
448}
449
450void send_sigio(struct fown_struct *fown, int fd, int band)
451{
452 struct task_struct *p;
453 int pid;
454
455 read_lock(&fown->lock);
456 pid = fown->pid;
457 if (!pid)
458 goto out_unlock_fown;
459
460 read_lock(&tasklist_lock);
461 if (pid > 0) {
462 p = find_task_by_pid(pid);
463 if (p) {
464 send_sigio_to_task(p, fown, fd, band);
465 }
466 } else {
467 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
468 send_sigio_to_task(p, fown, fd, band);
469 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
470 }
471 read_unlock(&tasklist_lock);
472 out_unlock_fown:
473 read_unlock(&fown->lock);
474}
475
476static void send_sigurg_to_task(struct task_struct *p,
477 struct fown_struct *fown)
478{
479 if (sigio_perm(p, fown, SIGURG))
480 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
481}
482
483int send_sigurg(struct fown_struct *fown)
484{
485 struct task_struct *p;
486 int pid, ret = 0;
487
488 read_lock(&fown->lock);
489 pid = fown->pid;
490 if (!pid)
491 goto out_unlock_fown;
492
493 ret = 1;
494
495 read_lock(&tasklist_lock);
496 if (pid > 0) {
497 p = find_task_by_pid(pid);
498 if (p) {
499 send_sigurg_to_task(p, fown);
500 }
501 } else {
502 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
503 send_sigurg_to_task(p, fown);
504 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
505 }
506 read_unlock(&tasklist_lock);
507 out_unlock_fown:
508 read_unlock(&fown->lock);
509 return ret;
510}
511
512static DEFINE_RWLOCK(fasync_lock);
513static kmem_cache_t *fasync_cache;
514
515/*
516 * fasync_helper() is used by some character device drivers (mainly mice)
517 * to set up the fasync queue. It returns negative on error, 0 if it did
518 * no changes and positive if it added/deleted the entry.
519 */
520int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
521{
522 struct fasync_struct *fa, **fp;
523 struct fasync_struct *new = NULL;
524 int result = 0;
525
526 if (on) {
527 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
528 if (!new)
529 return -ENOMEM;
530 }
531 write_lock_irq(&fasync_lock);
532 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
533 if (fa->fa_file == filp) {
534 if(on) {
535 fa->fa_fd = fd;
536 kmem_cache_free(fasync_cache, new);
537 } else {
538 *fp = fa->fa_next;
539 kmem_cache_free(fasync_cache, fa);
540 result = 1;
541 }
542 goto out;
543 }
544 }
545
546 if (on) {
547 new->magic = FASYNC_MAGIC;
548 new->fa_file = filp;
549 new->fa_fd = fd;
550 new->fa_next = *fapp;
551 *fapp = new;
552 result = 1;
553 }
554out:
555 write_unlock_irq(&fasync_lock);
556 return result;
557}
558
559EXPORT_SYMBOL(fasync_helper);
560
561void __kill_fasync(struct fasync_struct *fa, int sig, int band)
562{
563 while (fa) {
564 struct fown_struct * fown;
565 if (fa->magic != FASYNC_MAGIC) {
566 printk(KERN_ERR "kill_fasync: bad magic number in "
567 "fasync_struct!\n");
568 return;
569 }
570 fown = &fa->fa_file->f_owner;
571 /* Don't send SIGURG to processes which have not set a
572 queued signum: SIGURG has its own default signalling
573 mechanism. */
574 if (!(sig == SIGURG && fown->signum == 0))
575 send_sigio(fown, fa->fa_fd, band);
576 fa = fa->fa_next;
577 }
578}
579
580EXPORT_SYMBOL(__kill_fasync);
581
582void kill_fasync(struct fasync_struct **fp, int sig, int band)
583{
584 /* First a quick test without locking: usually
585 * the list is empty.
586 */
587 if (*fp) {
588 read_lock(&fasync_lock);
589 /* reread *fp after obtaining the lock */
590 __kill_fasync(*fp, sig, band);
591 read_unlock(&fasync_lock);
592 }
593}
594EXPORT_SYMBOL(kill_fasync);
595
596static int __init fasync_init(void)
597{
598 fasync_cache = kmem_cache_create("fasync_cache",
599 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
600 return 0;
601}
602
603module_init(fasync_init)