blob: dddecc74e63d276abef7de462dfed2841fbcdd8c [file] [log] [blame]
Eric Paris90586522009-05-21 17:01:20 -04001/*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
Eric Parisa2d8bc62009-05-21 17:01:37 -040019/*
20 * Basic idea behind the notification queue: An fsnotify group (like inotify)
21 * sends the userspace notification about events asyncronously some time after
22 * the event happened. When inotify gets an event it will need to add that
23 * event to the group notify queue. Since a single event might need to be on
24 * multiple group's notification queues we can't add the event directly to each
25 * queue and instead add a small "event_holder" to each queue. This event_holder
26 * has a pointer back to the original event. Since the majority of events are
27 * going to end up on one, and only one, notification queue we embed one
28 * event_holder into each event. This means we have a single allocation instead
29 * of always needing two. If the embedded event_holder is already in use by
30 * another group a new event_holder (from fsnotify_event_holder_cachep) will be
31 * allocated and used.
32 */
33
Eric Paris90586522009-05-21 17:01:20 -040034#include <linux/fs.h>
35#include <linux/init.h>
36#include <linux/kernel.h>
37#include <linux/list.h>
38#include <linux/mount.h>
39#include <linux/mutex.h>
40#include <linux/namei.h>
41#include <linux/path.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44
45#include <asm/atomic.h>
46
47#include <linux/fsnotify_backend.h>
48#include "fsnotify.h"
49
50static struct kmem_cache *fsnotify_event_cachep;
Eric Parisa2d8bc62009-05-21 17:01:37 -040051static struct kmem_cache *fsnotify_event_holder_cachep;
52/*
53 * This is a magic event we send when the q is too full. Since it doesn't
54 * hold real event information we just keep one system wide and use it any time
55 * it is needed. It's refcnt is set 1 at kernel init time and will never
56 * get set to 0 so it will never get 'freed'
57 */
58static struct fsnotify_event q_overflow_event;
59
60/* return true if the notify queue is empty, false otherwise */
61bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
62{
63 BUG_ON(!mutex_is_locked(&group->notification_mutex));
64 return list_empty(&group->notification_list) ? true : false;
65}
Eric Paris90586522009-05-21 17:01:20 -040066
67void fsnotify_get_event(struct fsnotify_event *event)
68{
69 atomic_inc(&event->refcnt);
70}
71
72void fsnotify_put_event(struct fsnotify_event *event)
73{
74 if (!event)
75 return;
76
77 if (atomic_dec_and_test(&event->refcnt)) {
78 if (event->data_type == FSNOTIFY_EVENT_PATH)
79 path_put(&event->path);
80
81 kmem_cache_free(fsnotify_event_cachep, event);
82 }
83}
84
Eric Parisa2d8bc62009-05-21 17:01:37 -040085struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
86{
87 return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL);
88}
89
90void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
91{
92 kmem_cache_free(fsnotify_event_holder_cachep, holder);
93}
94
Eric Paris90586522009-05-21 17:01:20 -040095/*
Eric Parisa2d8bc62009-05-21 17:01:37 -040096 * check if 2 events contain the same information.
97 */
98static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
99{
100 if ((old->mask == new->mask) &&
101 (old->to_tell == new->to_tell) &&
102 (old->data_type == new->data_type)) {
103 switch (old->data_type) {
104 case (FSNOTIFY_EVENT_INODE):
105 if (old->inode == new->inode)
106 return true;
107 break;
108 case (FSNOTIFY_EVENT_PATH):
109 if ((old->path.mnt == new->path.mnt) &&
110 (old->path.dentry == new->path.dentry))
111 return true;
112 case (FSNOTIFY_EVENT_NONE):
113 return true;
114 };
115 }
116 return false;
117}
118
119/*
120 * Add an event to the group notification queue. The group can later pull this
121 * event off the queue to deal with. If the event is successfully added to the
122 * group's notification queue, a reference is taken on event.
123 */
124int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event)
125{
126 struct fsnotify_event_holder *holder = NULL;
127 struct list_head *list = &group->notification_list;
128 struct fsnotify_event_holder *last_holder;
129 struct fsnotify_event *last_event;
130
131 /*
132 * There is one fsnotify_event_holder embedded inside each fsnotify_event.
133 * Check if we expect to be able to use that holder. If not alloc a new
134 * holder.
135 * For the overflow event it's possible that something will use the in
136 * event holder before we get the lock so we may need to jump back and
137 * alloc a new holder, this can't happen for most events...
138 */
139 if (!list_empty(&event->holder.event_list)) {
140alloc_holder:
141 holder = fsnotify_alloc_event_holder();
142 if (!holder)
143 return -ENOMEM;
144 }
145
146 mutex_lock(&group->notification_mutex);
147
148 if (group->q_len >= group->max_events)
149 event = &q_overflow_event;
150
151 spin_lock(&event->lock);
152
153 if (list_empty(&event->holder.event_list)) {
154 if (unlikely(holder))
155 fsnotify_destroy_event_holder(holder);
156 holder = &event->holder;
157 } else if (unlikely(!holder)) {
158 /* between the time we checked above and got the lock the in
159 * event holder was used, go back and get a new one */
160 spin_unlock(&event->lock);
161 mutex_unlock(&group->notification_mutex);
162 goto alloc_holder;
163 }
164
165 if (!list_empty(list)) {
166 last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
167 last_event = last_holder->event;
168 if (event_compare(last_event, event)) {
169 spin_unlock(&event->lock);
170 mutex_unlock(&group->notification_mutex);
171 if (holder != &event->holder)
172 fsnotify_destroy_event_holder(holder);
173 return 0;
174 }
175 }
176
177 group->q_len++;
178 holder->event = event;
179
180 fsnotify_get_event(event);
181 list_add_tail(&holder->event_list, list);
182 spin_unlock(&event->lock);
183 mutex_unlock(&group->notification_mutex);
184
185 wake_up(&group->notification_waitq);
186 return 0;
187}
188
189/*
190 * Remove and return the first event from the notification list. There is a
191 * reference held on this event since it was on the list. It is the responsibility
192 * of the caller to drop this reference.
193 */
194struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
195{
196 struct fsnotify_event *event;
197 struct fsnotify_event_holder *holder;
198
199 BUG_ON(!mutex_is_locked(&group->notification_mutex));
200
201 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
202
203 event = holder->event;
204
205 spin_lock(&event->lock);
206 holder->event = NULL;
207 list_del_init(&holder->event_list);
208 spin_unlock(&event->lock);
209
210 /* event == holder means we are referenced through the in event holder */
211 if (holder != &event->holder)
212 fsnotify_destroy_event_holder(holder);
213
214 group->q_len--;
215
216 return event;
217}
218
219/*
220 * This will not remove the event, that must be done with fsnotify_remove_notify_event()
221 */
222struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
223{
224 struct fsnotify_event *event;
225 struct fsnotify_event_holder *holder;
226
227 BUG_ON(!mutex_is_locked(&group->notification_mutex));
228
229 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
230 event = holder->event;
231
232 return event;
233}
234
235/*
236 * Called when a group is being torn down to clean up any outstanding
237 * event notifications.
238 */
239void fsnotify_flush_notify(struct fsnotify_group *group)
240{
241 struct fsnotify_event *event;
242
243 mutex_lock(&group->notification_mutex);
244 while (!fsnotify_notify_queue_is_empty(group)) {
245 event = fsnotify_remove_notify_event(group);
246 fsnotify_put_event(event); /* matches fsnotify_add_notify_event */
247 }
248 mutex_unlock(&group->notification_mutex);
249}
250
251static void initialize_event(struct fsnotify_event *event)
252{
253 event->holder.event = NULL;
254 INIT_LIST_HEAD(&event->holder.event_list);
255 atomic_set(&event->refcnt, 1);
256
257 spin_lock_init(&event->lock);
258
259 event->path.dentry = NULL;
260 event->path.mnt = NULL;
261 event->inode = NULL;
262 event->data_type = FSNOTIFY_EVENT_NONE;
263
264 event->to_tell = NULL;
265}
266
267/*
268 * fsnotify_create_event - Allocate a new event which will be sent to each
269 * group's handle_event function if the group was interested in this
270 * particular event.
271 *
272 * @to_tell the inode which is supposed to receive the event (sometimes a
273 * parent of the inode to which the event happened.
274 * @mask what actually happened.
275 * @data pointer to the object which was actually affected
276 * @data_type flag indication if the data is a file, path, inode, nothing...
Eric Paris90586522009-05-21 17:01:20 -0400277 */
278struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
279 void *data, int data_type)
280{
281 struct fsnotify_event *event;
282
283 event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
284 if (!event)
285 return NULL;
286
Eric Parisa2d8bc62009-05-21 17:01:37 -0400287 initialize_event(event);
Eric Paris90586522009-05-21 17:01:20 -0400288 event->to_tell = to_tell;
289
290 switch (data_type) {
291 case FSNOTIFY_EVENT_FILE: {
292 struct file *file = data;
293 struct path *path = &file->f_path;
294 event->path.dentry = path->dentry;
295 event->path.mnt = path->mnt;
296 path_get(&event->path);
297 event->data_type = FSNOTIFY_EVENT_PATH;
298 break;
299 }
300 case FSNOTIFY_EVENT_PATH: {
301 struct path *path = data;
302 event->path.dentry = path->dentry;
303 event->path.mnt = path->mnt;
304 path_get(&event->path);
305 event->data_type = FSNOTIFY_EVENT_PATH;
306 break;
307 }
308 case FSNOTIFY_EVENT_INODE:
309 event->inode = data;
310 event->data_type = FSNOTIFY_EVENT_INODE;
311 break;
312 case FSNOTIFY_EVENT_NONE:
313 event->inode = NULL;
314 event->path.dentry = NULL;
315 event->path.mnt = NULL;
316 break;
317 default:
318 BUG();
319 }
320
321 event->mask = mask;
322
323 return event;
324}
325
326__init int fsnotify_notification_init(void)
327{
328 fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
Eric Parisa2d8bc62009-05-21 17:01:37 -0400329 fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
330
331 initialize_event(&q_overflow_event);
332 q_overflow_event.mask = FS_Q_OVERFLOW;
Eric Paris90586522009-05-21 17:01:20 -0400333
334 return 0;
335}
336subsys_initcall(fsnotify_notification_init);
337