blob: 676073b8dda57659202b123dde8adb90e0fb416b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Directory notifications for Linux.
3 *
4 * Copyright (C) 2000,2001,2002 Stephen Rothwell
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16#include <linux/fs.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/dnotify.h>
20#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
Al Viro9f3acc32008-04-24 07:44:08 -040023#include <linux/fdtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Eric Dumazetfa3536c2006-03-26 01:37:24 -080025int dir_notify_enable __read_mostly = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Christoph Lametere18b8902006-12-06 20:33:20 -080027static struct kmem_cache *dn_cache __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29static void redo_inode_mask(struct inode *inode)
30{
31 unsigned long new_mask;
32 struct dnotify_struct *dn;
33
34 new_mask = 0;
35 for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next)
36 new_mask |= dn->dn_mask & ~DN_MULTISHOT;
37 inode->i_dnotify_mask = new_mask;
38}
39
40void dnotify_flush(struct file *filp, fl_owner_t id)
41{
42 struct dnotify_struct *dn;
43 struct dnotify_struct **prev;
44 struct inode *inode;
45
Josef "Jeff" Sipek0f7fc9e2006-12-08 02:36:35 -080046 inode = filp->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 if (!S_ISDIR(inode->i_mode))
48 return;
49 spin_lock(&inode->i_lock);
50 prev = &inode->i_dnotify;
51 while ((dn = *prev) != NULL) {
52 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
53 *prev = dn->dn_next;
54 redo_inode_mask(inode);
55 kmem_cache_free(dn_cache, dn);
56 break;
57 }
58 prev = &dn->dn_next;
59 }
60 spin_unlock(&inode->i_lock);
61}
62
63int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
64{
65 struct dnotify_struct *dn;
66 struct dnotify_struct *odn;
67 struct dnotify_struct **prev;
68 struct inode *inode;
69 fl_owner_t id = current->files;
Al Viro214b7042008-05-01 03:52:22 +010070 struct file *f;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 int error = 0;
72
73 if ((arg & ~DN_MULTISHOT) == 0) {
74 dnotify_flush(filp, id);
75 return 0;
76 }
77 if (!dir_notify_enable)
78 return -EINVAL;
Josef "Jeff" Sipek0f7fc9e2006-12-08 02:36:35 -080079 inode = filp->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 if (!S_ISDIR(inode->i_mode))
81 return -ENOTDIR;
Christoph Lametere94b1762006-12-06 20:33:17 -080082 dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 if (dn == NULL)
84 return -ENOMEM;
85 spin_lock(&inode->i_lock);
86 prev = &inode->i_dnotify;
87 while ((odn = *prev) != NULL) {
88 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
89 odn->dn_fd = fd;
90 odn->dn_mask |= arg;
91 inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
92 goto out_free;
93 }
94 prev = &odn->dn_next;
95 }
96
Al Viro214b7042008-05-01 03:52:22 +010097 rcu_read_lock();
98 f = fcheck(fd);
99 rcu_read_unlock();
100 /* we'd lost the race with close(), sod off silently */
101 /* note that inode->i_lock prevents reordering problems
102 * between accesses to descriptor table and ->i_dnotify */
103 if (f != filp)
104 goto out_free;
105
Eric W. Biederman609d7fa2006-10-02 02:17:15 -0700106 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 if (error)
108 goto out_free;
109
110 dn->dn_mask = arg;
111 dn->dn_fd = fd;
112 dn->dn_filp = filp;
113 dn->dn_owner = id;
114 inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
115 dn->dn_next = inode->i_dnotify;
116 inode->i_dnotify = dn;
117 spin_unlock(&inode->i_lock);
118
119 if (filp->f_op && filp->f_op->dir_notify)
120 return filp->f_op->dir_notify(filp, arg);
121 return 0;
122
123out_free:
124 spin_unlock(&inode->i_lock);
125 kmem_cache_free(dn_cache, dn);
126 return error;
127}
128
129void __inode_dir_notify(struct inode *inode, unsigned long event)
130{
131 struct dnotify_struct * dn;
132 struct dnotify_struct **prev;
133 struct fown_struct * fown;
134 int changed = 0;
135
136 spin_lock(&inode->i_lock);
137 prev = &inode->i_dnotify;
138 while ((dn = *prev) != NULL) {
139 if ((dn->dn_mask & event) == 0) {
140 prev = &dn->dn_next;
141 continue;
142 }
143 fown = &dn->dn_filp->f_owner;
144 send_sigio(fown, dn->dn_fd, POLL_MSG);
145 if (dn->dn_mask & DN_MULTISHOT)
146 prev = &dn->dn_next;
147 else {
148 *prev = dn->dn_next;
149 changed = 1;
150 kmem_cache_free(dn_cache, dn);
151 }
152 }
153 if (changed)
154 redo_inode_mask(inode);
155 spin_unlock(&inode->i_lock);
156}
157
158EXPORT_SYMBOL(__inode_dir_notify);
159
160/*
161 * This is hopelessly wrong, but unfixable without API changes. At
162 * least it doesn't oops the kernel...
163 *
164 * To safely access ->d_parent we need to keep d_move away from it. Use the
165 * dentry's d_lock for this.
166 */
167void dnotify_parent(struct dentry *dentry, unsigned long event)
168{
169 struct dentry *parent;
170
171 if (!dir_notify_enable)
172 return;
173
174 spin_lock(&dentry->d_lock);
175 parent = dentry->d_parent;
176 if (parent->d_inode->i_dnotify_mask & event) {
177 dget(parent);
178 spin_unlock(&dentry->d_lock);
179 __inode_dir_notify(parent->d_inode, event);
180 dput(parent);
181 } else {
182 spin_unlock(&dentry->d_lock);
183 }
184}
185EXPORT_SYMBOL_GPL(dnotify_parent);
186
187static int __init dnotify_init(void)
188{
189 dn_cache = kmem_cache_create("dnotify_cache",
Paul Mundt20c2df82007-07-20 10:11:58 +0900190 sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 return 0;
192}
193
194module_init(dnotify_init)