blob: cdc154146974e0aede8e93cde6b78c9b381d2534 [file] [log] [blame]
Eric Paris3be25f42009-05-21 17:01:26 -04001/*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/*
20 * fsnotify inode mark locking/lifetime/and refcnting
21 *
22 * REFCNT:
23 * The mark->refcnt tells how many "things" in the kernel currently are
24 * referencing this object. The object typically will live inside the kernel
25 * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
26 * which can find this object holding the appropriete locks, can take a reference
27 * and the object itself is guarenteed to survive until the reference is dropped.
28 *
29 * LOCKING:
30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST
31 * be taken in order as follows:
32 *
33 * entry->lock
34 * group->mark_lock
35 * inode->i_lock
36 *
37 * entry->lock protects 2 things, entry->group and entry->inode. You must hold
38 * that lock to dereference either of these things (they could be NULL even with
39 * the lock)
40 *
41 * group->mark_lock protects the mark_entries list anchored inside a given group
42 * and each entry is hooked via the g_list. It also sorta protects the
43 * free_g_list, which when used is anchored by a private list on the stack of the
44 * task which held the group->mark_lock.
45 *
46 * inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
47 * given inode and each entry is hooked via the i_list. (and sorta the
48 * free_i_list)
49 *
50 *
51 * LIFETIME:
52 * Inode marks survive between when they are added to an inode and when their
53 * refcnt==0.
54 *
55 * The inode mark can be cleared for a number of different reasons including:
56 * - The inode is unlinked for the last time. (fsnotify_inode_remove)
57 * - The inode is being evicted from cache. (fsnotify_inode_delete)
58 * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
59 * - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
60 * - The fsnotify_group associated with the mark is going away and all such marks
61 * need to be cleaned up. (fsnotify_clear_marks_by_group)
62 *
63 * Worst case we are given an inode and need to clean up all the marks on that
64 * inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
65 * mark on the list we take a reference (so the mark can't disappear under us).
66 * We remove that mark form the inode's list of marks and we add this mark to a
67 * private list anchored on the stack using i_free_list; At this point we no
68 * longer fear anything finding the mark using the inode's list of marks.
69 *
70 * We can safely and locklessly run the private list on the stack of everything
71 * we just unattached from the original inode. For each mark on the private list
72 * we grab the mark-> and can thus dereference mark->group and mark->inode. If
73 * we see the group and inode are not NULL we take those locks. Now holding all
74 * 3 locks we can completely remove the mark from other tasks finding it in the
75 * future. Remember, 10 things might already be referencing this mark, but they
76 * better be holding a ref. We drop our reference we took before we unhooked it
77 * from the inode. When the ref hits 0 we can free the mark.
78 *
79 * Very similarly for freeing by group, except we use free_g_list.
80 *
81 * This has the very interesting property of being able to run concurrently with
82 * any (or all) other directions.
83 */
84
85#include <linux/fs.h>
86#include <linux/init.h>
87#include <linux/kernel.h>
88#include <linux/module.h>
89#include <linux/mutex.h>
90#include <linux/slab.h>
91#include <linux/spinlock.h>
92
93#include <asm/atomic.h>
94
95#include <linux/fsnotify_backend.h>
96#include "fsnotify.h"
97
98void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
99{
100 atomic_inc(&entry->refcnt);
101}
102
103void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
104{
105 if (atomic_dec_and_test(&entry->refcnt))
106 entry->free_mark(entry);
107}
108
109/*
110 * Recalculate the mask of events relevant to a given inode locked.
111 */
112static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
113{
114 struct fsnotify_mark_entry *entry;
115 struct hlist_node *pos;
116 __u32 new_mask = 0;
117
118 assert_spin_locked(&inode->i_lock);
119
120 hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
121 new_mask |= entry->mask;
122 inode->i_fsnotify_mask = new_mask;
123}
124
125/*
126 * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
127 * any notifier is interested in hearing for this inode.
128 */
129void fsnotify_recalc_inode_mask(struct inode *inode)
130{
131 spin_lock(&inode->i_lock);
132 fsnotify_recalc_inode_mask_locked(inode);
133 spin_unlock(&inode->i_lock);
134}
135
136/*
137 * Any time a mark is getting freed we end up here.
138 * The caller had better be holding a reference to this mark so we don't actually
139 * do the final put under the entry->lock
140 */
141void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
142{
143 struct fsnotify_group *group;
144 struct inode *inode;
145
146 spin_lock(&entry->lock);
147
148 group = entry->group;
149 inode = entry->inode;
150
151 BUG_ON(group && !inode);
152 BUG_ON(!group && inode);
153
154 /* if !group something else already marked this to die */
155 if (!group) {
156 spin_unlock(&entry->lock);
157 return;
158 }
159
160 /* 1 from caller and 1 for being on i_list/g_list */
161 BUG_ON(atomic_read(&entry->refcnt) < 2);
162
163 spin_lock(&group->mark_lock);
164 spin_lock(&inode->i_lock);
165
166 hlist_del_init(&entry->i_list);
167 entry->inode = NULL;
168
169 list_del_init(&entry->g_list);
170 entry->group = NULL;
171
172 fsnotify_put_mark(entry); /* for i_list and g_list */
173
174 /*
175 * this mark is now off the inode->i_fsnotify_mark_entries list and we
176 * hold the inode->i_lock, so this is the perfect time to update the
177 * inode->i_fsnotify_mask
178 */
179 fsnotify_recalc_inode_mask_locked(inode);
180
181 spin_unlock(&inode->i_lock);
182 spin_unlock(&group->mark_lock);
183 spin_unlock(&entry->lock);
184
185 /*
186 * Some groups like to know that marks are being freed. This is a
187 * callback to the group function to let it know that this entry
188 * is being freed.
189 */
190 group->ops->freeing_mark(entry, group);
191
192 /*
193 * it's possible that this group tried to destroy itself, but this
194 * this mark was simultaneously being freed by inode. If that's the
195 * case, we finish freeing the group here.
196 */
197 if (unlikely(atomic_dec_and_test(&group->num_marks)))
198 fsnotify_final_destroy_group(group);
199}
200
201/*
202 * Given a group, destroy all of the marks associated with that group.
203 */
204void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
205{
206 struct fsnotify_mark_entry *lentry, *entry;
207 LIST_HEAD(free_list);
208
209 spin_lock(&group->mark_lock);
210 list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
211 list_add(&entry->free_g_list, &free_list);
212 list_del_init(&entry->g_list);
213 fsnotify_get_mark(entry);
214 }
215 spin_unlock(&group->mark_lock);
216
217 list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
218 fsnotify_destroy_mark_by_entry(entry);
219 fsnotify_put_mark(entry);
220 }
221}
222
223/*
224 * Given an inode, destroy all of the marks associated with that inode.
225 */
226void fsnotify_clear_marks_by_inode(struct inode *inode)
227{
228 struct fsnotify_mark_entry *entry, *lentry;
229 struct hlist_node *pos, *n;
230 LIST_HEAD(free_list);
231
232 spin_lock(&inode->i_lock);
233 hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
234 list_add(&entry->free_i_list, &free_list);
235 hlist_del_init(&entry->i_list);
236 fsnotify_get_mark(entry);
237 }
238 spin_unlock(&inode->i_lock);
239
240 list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
241 fsnotify_destroy_mark_by_entry(entry);
242 fsnotify_put_mark(entry);
243 }
244}
245
246/*
247 * given a group and inode, find the mark associated with that combination.
248 * if found take a reference to that mark and return it, else return NULL
249 */
250struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group,
251 struct inode *inode)
252{
253 struct fsnotify_mark_entry *entry;
254 struct hlist_node *pos;
255
256 assert_spin_locked(&inode->i_lock);
257
258 hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
259 if (entry->group == group) {
260 fsnotify_get_mark(entry);
261 return entry;
262 }
263 }
264 return NULL;
265}
266
267/*
268 * Nothing fancy, just initialize lists and locks and counters.
269 */
270void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
271 void (*free_mark)(struct fsnotify_mark_entry *entry))
272
273{
274 spin_lock_init(&entry->lock);
275 atomic_set(&entry->refcnt, 1);
276 INIT_HLIST_NODE(&entry->i_list);
277 entry->group = NULL;
278 entry->mask = 0;
279 entry->inode = NULL;
280 entry->free_mark = free_mark;
281}
282
283/*
284 * Attach an initialized mark entry to a given group and inode.
285 * These marks may be used for the fsnotify backend to determine which
286 * event types should be delivered to which group and for which inodes.
287 */
288int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
289 struct fsnotify_group *group, struct inode *inode)
290{
291 struct fsnotify_mark_entry *lentry;
292 int ret = 0;
293
294 /*
295 * LOCKING ORDER!!!!
296 * entry->lock
297 * group->mark_lock
298 * inode->i_lock
299 */
300 spin_lock(&entry->lock);
301 spin_lock(&group->mark_lock);
302 spin_lock(&inode->i_lock);
303
304 entry->group = group;
305 entry->inode = inode;
306
307 lentry = fsnotify_find_mark_entry(group, inode);
308 if (!lentry) {
309 hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
310 list_add(&entry->g_list, &group->mark_entries);
311
312 fsnotify_get_mark(entry); /* for i_list and g_list */
313
314 atomic_inc(&group->num_marks);
315
316 fsnotify_recalc_inode_mask_locked(inode);
317 }
318
319 spin_unlock(&inode->i_lock);
320 spin_unlock(&group->mark_lock);
321 spin_unlock(&entry->lock);
322
323 if (lentry) {
324 ret = -EEXIST;
325 fsnotify_put_mark(lentry);
326 }
327
328 return ret;
329}