blob: 0e91a3c9fdb2018abfcd2588d859876c78b545fd [file] [log] [blame]
David Howells955d00912009-04-03 16:42:38 +01001/* netfs cookie management
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
David Howellsccc4fc32009-04-03 16:42:38 +010010 *
11 * See Documentation/filesystems/caching/netfs-api.txt for more information on
12 * the netfs API.
David Howells955d00912009-04-03 16:42:38 +010013 */
14
15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h>
17#include <linux/slab.h>
18#include "internal.h"
19
20struct kmem_cache *fscache_cookie_jar;
21
David Howellsccc4fc32009-04-03 16:42:38 +010022static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
23
24static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
25static int fscache_alloc_object(struct fscache_cache *cache,
26 struct fscache_cookie *cookie);
27static int fscache_attach_object(struct fscache_cookie *cookie,
28 struct fscache_object *object);
29
David Howells955d00912009-04-03 16:42:38 +010030/*
31 * initialise an cookie jar slab element prior to any use
32 */
33void fscache_cookie_init_once(void *_cookie)
34{
35 struct fscache_cookie *cookie = _cookie;
36
37 memset(cookie, 0, sizeof(*cookie));
38 spin_lock_init(&cookie->lock);
David Howells1bccf512009-11-19 18:11:25 +000039 spin_lock_init(&cookie->stores_lock);
David Howells955d00912009-04-03 16:42:38 +010040 INIT_HLIST_HEAD(&cookie->backing_objects);
41}
42
43/*
David Howellsccc4fc32009-04-03 16:42:38 +010044 * request a cookie to represent an object (index, datafile, xattr, etc)
45 * - parent specifies the parent object
46 * - the top level index cookie for each netfs is stored in the fscache_netfs
47 * struct upon registration
48 * - def points to the definition
49 * - the netfs_data will be passed to the functions pointed to in *def
50 * - all attached caches will be searched to see if they contain this object
51 * - index objects aren't stored on disk until there's a dependent file that
52 * needs storing
53 * - other objects are stored in a selected cache immediately, and all the
54 * indices forming the path to it are instantiated if necessary
55 * - we never let on to the netfs about errors
56 * - we may set a negative cookie pointer, but that's okay
57 */
58struct fscache_cookie *__fscache_acquire_cookie(
59 struct fscache_cookie *parent,
60 const struct fscache_cookie_def *def,
61 void *netfs_data)
62{
63 struct fscache_cookie *cookie;
64
65 BUG_ON(!def);
66
67 _enter("{%s},{%s},%p",
68 parent ? (char *) parent->def->name : "<no-parent>",
69 def->name, netfs_data);
70
71 fscache_stat(&fscache_n_acquires);
72
73 /* if there's no parent cookie, then we don't create one here either */
74 if (!parent) {
75 fscache_stat(&fscache_n_acquires_null);
76 _leave(" [no parent]");
77 return NULL;
78 }
79
80 /* validate the definition */
81 BUG_ON(!def->get_key);
82 BUG_ON(!def->name[0]);
83
84 BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
85 parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
86
87 /* allocate and initialise a cookie */
88 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
89 if (!cookie) {
90 fscache_stat(&fscache_n_acquires_oom);
91 _leave(" [ENOMEM]");
92 return NULL;
93 }
94
95 atomic_set(&cookie->usage, 1);
96 atomic_set(&cookie->n_children, 0);
97
David Howells13627292013-05-10 19:50:26 +010098 /* We keep the active count elevated until relinquishment to prevent an
99 * attempt to wake up every time the object operations queue quiesces.
100 */
101 atomic_set(&cookie->n_active, 1);
102
David Howellsccc4fc32009-04-03 16:42:38 +0100103 atomic_inc(&parent->usage);
104 atomic_inc(&parent->n_children);
105
106 cookie->def = def;
107 cookie->parent = parent;
108 cookie->netfs_data = netfs_data;
109 cookie->flags = 0;
110
David Howellsb34df792009-11-19 18:11:14 +0000111 /* radix tree insertion won't use the preallocation pool unless it's
112 * told it may not wait */
113 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
David Howellsccc4fc32009-04-03 16:42:38 +0100114
115 switch (cookie->def->type) {
116 case FSCACHE_COOKIE_TYPE_INDEX:
117 fscache_stat(&fscache_n_cookie_index);
118 break;
119 case FSCACHE_COOKIE_TYPE_DATAFILE:
120 fscache_stat(&fscache_n_cookie_data);
121 break;
122 default:
123 fscache_stat(&fscache_n_cookie_special);
124 break;
125 }
126
127 /* if the object is an index then we need do nothing more here - we
128 * create indices on disk when we need them as an index may exist in
129 * multiple caches */
130 if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
131 if (fscache_acquire_non_index_cookie(cookie) < 0) {
132 atomic_dec(&parent->n_children);
133 __fscache_cookie_put(cookie);
134 fscache_stat(&fscache_n_acquires_nobufs);
135 _leave(" = NULL");
136 return NULL;
137 }
138 }
139
140 fscache_stat(&fscache_n_acquires_ok);
141 _leave(" = %p", cookie);
142 return cookie;
143}
144EXPORT_SYMBOL(__fscache_acquire_cookie);
145
146/*
147 * acquire a non-index cookie
148 * - this must make sure the index chain is instantiated and instantiate the
149 * object representation too
150 */
151static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
152{
153 struct fscache_object *object;
154 struct fscache_cache *cache;
155 uint64_t i_size;
156 int ret;
157
158 _enter("");
159
160 cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE;
161
162 /* now we need to see whether the backing objects for this cookie yet
163 * exist, if not there'll be nothing to search */
164 down_read(&fscache_addremove_sem);
165
166 if (list_empty(&fscache_cache_list)) {
167 up_read(&fscache_addremove_sem);
168 _leave(" = 0 [no caches]");
169 return 0;
170 }
171
172 /* select a cache in which to store the object */
173 cache = fscache_select_cache_for_object(cookie->parent);
174 if (!cache) {
175 up_read(&fscache_addremove_sem);
176 fscache_stat(&fscache_n_acquires_no_cache);
177 _leave(" = -ENOMEDIUM [no cache]");
178 return -ENOMEDIUM;
179 }
180
181 _debug("cache %s", cache->tag->name);
182
183 cookie->flags =
184 (1 << FSCACHE_COOKIE_LOOKING_UP) |
David Howellsccc4fc32009-04-03 16:42:38 +0100185 (1 << FSCACHE_COOKIE_NO_DATA_YET);
186
187 /* ask the cache to allocate objects for this cookie and its parent
188 * chain */
189 ret = fscache_alloc_object(cache, cookie);
190 if (ret < 0) {
191 up_read(&fscache_addremove_sem);
192 _leave(" = %d", ret);
193 return ret;
194 }
195
196 /* pass on how big the object we're caching is supposed to be */
197 cookie->def->get_attr(cookie->netfs_data, &i_size);
198
199 spin_lock(&cookie->lock);
200 if (hlist_empty(&cookie->backing_objects)) {
201 spin_unlock(&cookie->lock);
202 goto unavailable;
203 }
204
205 object = hlist_entry(cookie->backing_objects.first,
206 struct fscache_object, cookie_link);
207
208 fscache_set_store_limit(object, i_size);
209
210 /* initiate the process of looking up all the objects in the chain
211 * (done by fscache_initialise_object()) */
David Howellscaaef692013-05-10 19:50:26 +0100212 fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
David Howellsccc4fc32009-04-03 16:42:38 +0100213
214 spin_unlock(&cookie->lock);
215
216 /* we may be required to wait for lookup to complete at this point */
217 if (!fscache_defer_lookup) {
218 _debug("non-deferred lookup %p", &cookie->flags);
219 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
220 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
221 _debug("complete");
222 if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
223 goto unavailable;
224 }
225
226 up_read(&fscache_addremove_sem);
227 _leave(" = 0 [deferred]");
228 return 0;
229
230unavailable:
231 up_read(&fscache_addremove_sem);
232 _leave(" = -ENOBUFS");
233 return -ENOBUFS;
234}
235
236/*
237 * recursively allocate cache object records for a cookie/cache combination
238 * - caller must be holding the addremove sem
239 */
240static int fscache_alloc_object(struct fscache_cache *cache,
241 struct fscache_cookie *cookie)
242{
243 struct fscache_object *object;
David Howellsccc4fc32009-04-03 16:42:38 +0100244 int ret;
245
246 _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
247
248 spin_lock(&cookie->lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800249 hlist_for_each_entry(object, &cookie->backing_objects,
David Howellsccc4fc32009-04-03 16:42:38 +0100250 cookie_link) {
251 if (object->cache == cache)
252 goto object_already_extant;
253 }
254 spin_unlock(&cookie->lock);
255
256 /* ask the cache to allocate an object (we may end up with duplicate
257 * objects at this stage, but we sort that out later) */
David Howells52bd75f2009-11-19 18:11:08 +0000258 fscache_stat(&fscache_n_cop_alloc_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100259 object = cache->ops->alloc_object(cache, cookie);
David Howells52bd75f2009-11-19 18:11:08 +0000260 fscache_stat_d(&fscache_n_cop_alloc_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100261 if (IS_ERR(object)) {
262 fscache_stat(&fscache_n_object_no_alloc);
263 ret = PTR_ERR(object);
264 goto error;
265 }
266
267 fscache_stat(&fscache_n_object_alloc);
268
269 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
270
271 _debug("ALLOC OBJ%x: %s {%lx}",
272 object->debug_id, cookie->def->name, object->events);
273
274 ret = fscache_alloc_object(cache, cookie->parent);
275 if (ret < 0)
276 goto error_put;
277
278 /* only attach if we managed to allocate all we needed, otherwise
279 * discard the object we just allocated and instead use the one
280 * attached to the cookie */
David Howells52bd75f2009-11-19 18:11:08 +0000281 if (fscache_attach_object(cookie, object) < 0) {
282 fscache_stat(&fscache_n_cop_put_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100283 cache->ops->put_object(object);
David Howells52bd75f2009-11-19 18:11:08 +0000284 fscache_stat_d(&fscache_n_cop_put_object);
285 }
David Howellsccc4fc32009-04-03 16:42:38 +0100286
287 _leave(" = 0");
288 return 0;
289
290object_already_extant:
291 ret = -ENOBUFS;
David Howells493f7bc2013-05-10 19:50:26 +0100292 if (fscache_object_is_dead(object)) {
David Howellsccc4fc32009-04-03 16:42:38 +0100293 spin_unlock(&cookie->lock);
294 goto error;
295 }
296 spin_unlock(&cookie->lock);
297 _leave(" = 0 [found]");
298 return 0;
299
300error_put:
David Howells52bd75f2009-11-19 18:11:08 +0000301 fscache_stat(&fscache_n_cop_put_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100302 cache->ops->put_object(object);
David Howells52bd75f2009-11-19 18:11:08 +0000303 fscache_stat_d(&fscache_n_cop_put_object);
David Howellsccc4fc32009-04-03 16:42:38 +0100304error:
305 _leave(" = %d", ret);
306 return ret;
307}
308
309/*
310 * attach a cache object to a cookie
311 */
312static int fscache_attach_object(struct fscache_cookie *cookie,
313 struct fscache_object *object)
314{
315 struct fscache_object *p;
316 struct fscache_cache *cache = object->cache;
David Howellsccc4fc32009-04-03 16:42:38 +0100317 int ret;
318
319 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
320
321 spin_lock(&cookie->lock);
322
323 /* there may be multiple initial creations of this object, but we only
324 * want one */
325 ret = -EEXIST;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800326 hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
David Howellsccc4fc32009-04-03 16:42:38 +0100327 if (p->cache == object->cache) {
David Howells493f7bc2013-05-10 19:50:26 +0100328 if (fscache_object_is_dying(p))
David Howellsccc4fc32009-04-03 16:42:38 +0100329 ret = -ENOBUFS;
330 goto cant_attach_object;
331 }
332 }
333
334 /* pin the parent object */
335 spin_lock_nested(&cookie->parent->lock, 1);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800336 hlist_for_each_entry(p, &cookie->parent->backing_objects,
David Howellsccc4fc32009-04-03 16:42:38 +0100337 cookie_link) {
338 if (p->cache == object->cache) {
David Howells493f7bc2013-05-10 19:50:26 +0100339 if (fscache_object_is_dying(p)) {
David Howellsccc4fc32009-04-03 16:42:38 +0100340 ret = -ENOBUFS;
341 spin_unlock(&cookie->parent->lock);
342 goto cant_attach_object;
343 }
344 object->parent = p;
345 spin_lock(&p->lock);
346 p->n_children++;
347 spin_unlock(&p->lock);
348 break;
349 }
350 }
351 spin_unlock(&cookie->parent->lock);
352
353 /* attach to the cache's object list */
354 if (list_empty(&object->cache_link)) {
355 spin_lock(&cache->object_list_lock);
356 list_add(&object->cache_link, &cache->object_list);
357 spin_unlock(&cache->object_list_lock);
358 }
359
360 /* attach to the cookie */
361 object->cookie = cookie;
362 atomic_inc(&cookie->usage);
363 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
David Howells4fbf4292009-11-19 18:11:04 +0000364
365 fscache_objlist_add(object);
David Howellsccc4fc32009-04-03 16:42:38 +0100366 ret = 0;
367
368cant_attach_object:
369 spin_unlock(&cookie->lock);
370 _leave(" = %d", ret);
371 return ret;
372}
373
374/*
David Howellsef778e72012-12-20 21:52:36 +0000375 * Invalidate an object. Callable with spinlocks held.
376 */
377void __fscache_invalidate(struct fscache_cookie *cookie)
378{
379 struct fscache_object *object;
380
381 _enter("{%s}", cookie->def->name);
382
383 fscache_stat(&fscache_n_invalidates);
384
385 /* Only permit invalidation of data files. Invalidating an index will
386 * require the caller to release all its attachments to the tree rooted
387 * there, and if it's doing that, it may as well just retire the
388 * cookie.
389 */
390 ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
391
392 /* We will be updating the cookie too. */
393 BUG_ON(!cookie->def->get_aux);
394
395 /* If there's an object, we tell the object state machine to handle the
396 * invalidation on our behalf, otherwise there's nothing to do.
397 */
398 if (!hlist_empty(&cookie->backing_objects)) {
399 spin_lock(&cookie->lock);
400
401 if (!hlist_empty(&cookie->backing_objects) &&
402 !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
403 &cookie->flags)) {
404 object = hlist_entry(cookie->backing_objects.first,
405 struct fscache_object,
406 cookie_link);
David Howells493f7bc2013-05-10 19:50:26 +0100407 if (fscache_object_is_live(object))
David Howellsef778e72012-12-20 21:52:36 +0000408 fscache_raise_event(
409 object, FSCACHE_OBJECT_EV_INVALIDATE);
410 }
411
412 spin_unlock(&cookie->lock);
413 }
414
415 _leave("");
416}
417EXPORT_SYMBOL(__fscache_invalidate);
418
419/*
420 * Wait for object invalidation to complete.
421 */
422void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
423{
424 _enter("%p", cookie);
425
426 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
427 fscache_wait_bit_interruptible,
428 TASK_UNINTERRUPTIBLE);
429
430 _leave("");
431}
432EXPORT_SYMBOL(__fscache_wait_on_invalidate);
433
434/*
David Howellsccc4fc32009-04-03 16:42:38 +0100435 * update the index entries backing a cookie
436 */
437void __fscache_update_cookie(struct fscache_cookie *cookie)
438{
439 struct fscache_object *object;
David Howellsccc4fc32009-04-03 16:42:38 +0100440
441 fscache_stat(&fscache_n_updates);
442
443 if (!cookie) {
444 fscache_stat(&fscache_n_updates_null);
445 _leave(" [no cookie]");
446 return;
447 }
448
449 _enter("{%s}", cookie->def->name);
450
451 BUG_ON(!cookie->def->get_aux);
452
453 spin_lock(&cookie->lock);
454
455 /* update the index entry on disk in each cache backing this cookie */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800456 hlist_for_each_entry(object,
David Howellsccc4fc32009-04-03 16:42:38 +0100457 &cookie->backing_objects, cookie_link) {
458 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
459 }
460
461 spin_unlock(&cookie->lock);
462 _leave("");
463}
464EXPORT_SYMBOL(__fscache_update_cookie);
465
466/*
467 * release a cookie back to the cache
468 * - the object will be marked as recyclable on disk if retire is true
469 * - all dependents of this cookie must have already been unregistered
470 * (indices/files/pages)
471 */
472void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
473{
David Howellsccc4fc32009-04-03 16:42:38 +0100474 struct fscache_object *object;
David Howellsccc4fc32009-04-03 16:42:38 +0100475
476 fscache_stat(&fscache_n_relinquishes);
David Howells2175bb02009-11-19 18:11:38 +0000477 if (retire)
478 fscache_stat(&fscache_n_relinquishes_retire);
David Howellsccc4fc32009-04-03 16:42:38 +0100479
480 if (!cookie) {
481 fscache_stat(&fscache_n_relinquishes_null);
482 _leave(" [no cookie]");
483 return;
484 }
485
David Howells13627292013-05-10 19:50:26 +0100486 _enter("%p{%s,%p,%d},%d",
487 cookie, cookie->def->name, cookie->netfs_data,
488 atomic_read(&cookie->n_active), retire);
489
490 ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
David Howellsccc4fc32009-04-03 16:42:38 +0100491
492 if (atomic_read(&cookie->n_children) != 0) {
493 printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
494 cookie->def->name);
495 BUG();
496 }
497
David Howells13627292013-05-10 19:50:26 +0100498 /* No further netfs-accessing operations on this cookie permitted */
499 set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
500 if (retire)
501 set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
David Howellsccc4fc32009-04-03 16:42:38 +0100502
David Howellsccc4fc32009-04-03 16:42:38 +0100503 spin_lock(&cookie->lock);
David Howells13627292013-05-10 19:50:26 +0100504 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
David Howellscaaef692013-05-10 19:50:26 +0100505 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
David Howellsccc4fc32009-04-03 16:42:38 +0100506 }
David Howells13627292013-05-10 19:50:26 +0100507 spin_unlock(&cookie->lock);
David Howellsccc4fc32009-04-03 16:42:38 +0100508
David Howells13627292013-05-10 19:50:26 +0100509 /* Wait for cessation of activity requiring access to the netfs (when
510 * n_active reaches 0).
511 */
512 if (!atomic_dec_and_test(&cookie->n_active))
513 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
514 TASK_UNINTERRUPTIBLE);
515
516 /* Clear pointers back to the netfs */
David Howells7e311a22009-11-19 18:11:11 +0000517 cookie->netfs_data = NULL;
518 cookie->def = NULL;
David Howells13627292013-05-10 19:50:26 +0100519 BUG_ON(cookie->stores.rnode);
David Howellsccc4fc32009-04-03 16:42:38 +0100520
521 if (cookie->parent) {
522 ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
523 ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
524 atomic_dec(&cookie->parent->n_children);
525 }
526
David Howells13627292013-05-10 19:50:26 +0100527 /* Dispose of the netfs's link to the cookie */
David Howellsccc4fc32009-04-03 16:42:38 +0100528 ASSERTCMP(atomic_read(&cookie->usage), >, 0);
529 fscache_cookie_put(cookie);
530
531 _leave("");
532}
533EXPORT_SYMBOL(__fscache_relinquish_cookie);
534
535/*
David Howells955d00912009-04-03 16:42:38 +0100536 * destroy a cookie
537 */
538void __fscache_cookie_put(struct fscache_cookie *cookie)
539{
540 struct fscache_cookie *parent;
541
542 _enter("%p", cookie);
543
544 for (;;) {
545 _debug("FREE COOKIE %p", cookie);
546 parent = cookie->parent;
547 BUG_ON(!hlist_empty(&cookie->backing_objects));
548 kmem_cache_free(fscache_cookie_jar, cookie);
549
550 if (!parent)
551 break;
552
553 cookie = parent;
554 BUG_ON(atomic_read(&cookie->usage) <= 0);
555 if (!atomic_dec_and_test(&cookie->usage))
556 break;
557 }
558
559 _leave("");
560}