blob: 7bf9d2557052dfe3ca54abda23aa6f262606dfa8 [file] [log] [blame]
David Howellsb5108822009-04-03 16:42:39 +01001/* Cache page management and data I/O routines
2 *
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define FSCACHE_DEBUG_LEVEL PAGE
13#include <linux/module.h>
14#include <linux/fscache-cache.h>
15#include <linux/buffer_head.h>
16#include <linux/pagevec.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
David Howellsb5108822009-04-03 16:42:39 +010018#include "internal.h"
19
20/*
21 * check to see if a page is being written to the cache
22 */
23bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24{
25 void *val;
26
27 rcu_read_lock();
28 val = radix_tree_lookup(&cookie->stores, page->index);
29 rcu_read_unlock();
30
31 return val != NULL;
32}
33EXPORT_SYMBOL(__fscache_check_page_write);
34
35/*
36 * wait for a page to finish being written to the cache
37 */
38void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39{
40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42 wait_event(*wq, !__fscache_check_page_write(cookie, page));
43}
44EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46/*
David Howells201a1542009-11-19 18:11:35 +000047 * decide whether a page can be released, possibly by cancelling a store to it
48 * - we're allowed to sleep if __GFP_WAIT is flagged
49 */
50bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51 struct page *page,
52 gfp_t gfp)
53{
54 struct page *xpage;
55 void *val;
56
57 _enter("%p,%p,%x", cookie, page, gfp);
58
59 rcu_read_lock();
60 val = radix_tree_lookup(&cookie->stores, page->index);
61 if (!val) {
62 rcu_read_unlock();
63 fscache_stat(&fscache_n_store_vmscan_not_storing);
64 __fscache_uncache_page(cookie, page);
65 return true;
66 }
67
68 /* see if the page is actually undergoing storage - if so we can't get
69 * rid of it till the cache has finished with it */
70 if (radix_tree_tag_get(&cookie->stores, page->index,
71 FSCACHE_COOKIE_STORING_TAG)) {
72 rcu_read_unlock();
73 goto page_busy;
74 }
75
76 /* the page is pending storage, so we attempt to cancel the store and
77 * discard the store request so that the page can be reclaimed */
78 spin_lock(&cookie->stores_lock);
79 rcu_read_unlock();
80
81 if (radix_tree_tag_get(&cookie->stores, page->index,
82 FSCACHE_COOKIE_STORING_TAG)) {
83 /* the page started to undergo storage whilst we were looking,
84 * so now we can only wait or return */
85 spin_unlock(&cookie->stores_lock);
86 goto page_busy;
87 }
88
89 xpage = radix_tree_delete(&cookie->stores, page->index);
90 spin_unlock(&cookie->stores_lock);
91
92 if (xpage) {
93 fscache_stat(&fscache_n_store_vmscan_cancelled);
94 fscache_stat(&fscache_n_store_radix_deletes);
95 ASSERTCMP(xpage, ==, page);
96 } else {
97 fscache_stat(&fscache_n_store_vmscan_gone);
98 }
99
100 wake_up_bit(&cookie->flags, 0);
101 if (xpage)
102 page_cache_release(xpage);
103 __fscache_uncache_page(cookie, page);
104 return true;
105
106page_busy:
107 /* we might want to wait here, but that could deadlock the allocator as
Tejun Heo8af7c122010-07-20 22:09:01 +0200108 * the work threads writing to the cache may all end up sleeping
David Howells201a1542009-11-19 18:11:35 +0000109 * on memory allocation */
110 fscache_stat(&fscache_n_store_vmscan_busy);
111 return false;
112}
113EXPORT_SYMBOL(__fscache_maybe_release_page);
114
115/*
David Howellsb5108822009-04-03 16:42:39 +0100116 * note that a page has finished being written to the cache
117 */
David Howells1bccf512009-11-19 18:11:25 +0000118static void fscache_end_page_write(struct fscache_object *object,
119 struct page *page)
David Howellsb5108822009-04-03 16:42:39 +0100120{
David Howells1bccf512009-11-19 18:11:25 +0000121 struct fscache_cookie *cookie;
122 struct page *xpage = NULL;
David Howellsb5108822009-04-03 16:42:39 +0100123
David Howells1bccf512009-11-19 18:11:25 +0000124 spin_lock(&object->lock);
125 cookie = object->cookie;
126 if (cookie) {
127 /* delete the page from the tree if it is now no longer
128 * pending */
129 spin_lock(&cookie->stores_lock);
David Howells201a1542009-11-19 18:11:35 +0000130 radix_tree_tag_clear(&cookie->stores, page->index,
131 FSCACHE_COOKIE_STORING_TAG);
David Howells285e7282009-11-19 18:11:29 +0000132 if (!radix_tree_tag_get(&cookie->stores, page->index,
133 FSCACHE_COOKIE_PENDING_TAG)) {
134 fscache_stat(&fscache_n_store_radix_deletes);
135 xpage = radix_tree_delete(&cookie->stores, page->index);
136 }
David Howells1bccf512009-11-19 18:11:25 +0000137 spin_unlock(&cookie->stores_lock);
138 wake_up_bit(&cookie->flags, 0);
139 }
140 spin_unlock(&object->lock);
141 if (xpage)
142 page_cache_release(xpage);
David Howellsb5108822009-04-03 16:42:39 +0100143}
144
145/*
146 * actually apply the changed attributes to a cache object
147 */
148static void fscache_attr_changed_op(struct fscache_operation *op)
149{
150 struct fscache_object *object = op->object;
David Howells440f0af2009-11-19 18:11:01 +0000151 int ret;
David Howellsb5108822009-04-03 16:42:39 +0100152
153 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
154
155 fscache_stat(&fscache_n_attr_changed_calls);
156
David Howells440f0af2009-11-19 18:11:01 +0000157 if (fscache_object_is_active(object)) {
David Howells52bd75f2009-11-19 18:11:08 +0000158 fscache_stat(&fscache_n_cop_attr_changed);
David Howells440f0af2009-11-19 18:11:01 +0000159 ret = object->cache->ops->attr_changed(object);
David Howells52bd75f2009-11-19 18:11:08 +0000160 fscache_stat_d(&fscache_n_cop_attr_changed);
David Howells440f0af2009-11-19 18:11:01 +0000161 if (ret < 0)
162 fscache_abort_object(object);
163 }
David Howellsb5108822009-04-03 16:42:39 +0100164
David Howells9f105232012-12-20 21:52:35 +0000165 fscache_op_complete(op);
David Howellsb5108822009-04-03 16:42:39 +0100166 _leave("");
167}
168
169/*
170 * notification that the attributes on an object have changed
171 */
172int __fscache_attr_changed(struct fscache_cookie *cookie)
173{
174 struct fscache_operation *op;
175 struct fscache_object *object;
176
177 _enter("%p", cookie);
178
179 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
180
181 fscache_stat(&fscache_n_attr_changed);
182
183 op = kzalloc(sizeof(*op), GFP_KERNEL);
184 if (!op) {
185 fscache_stat(&fscache_n_attr_changed_nomem);
186 _leave(" = -ENOMEM");
187 return -ENOMEM;
188 }
189
Tejun Heo8af7c122010-07-20 22:09:01 +0200190 fscache_operation_init(op, fscache_attr_changed_op, NULL);
191 op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
David Howellsb5108822009-04-03 16:42:39 +0100192
193 spin_lock(&cookie->lock);
194
195 if (hlist_empty(&cookie->backing_objects))
196 goto nobufs;
197 object = hlist_entry(cookie->backing_objects.first,
198 struct fscache_object, cookie_link);
199
200 if (fscache_submit_exclusive_op(object, op) < 0)
201 goto nobufs;
202 spin_unlock(&cookie->lock);
203 fscache_stat(&fscache_n_attr_changed_ok);
204 fscache_put_operation(op);
205 _leave(" = 0");
206 return 0;
207
208nobufs:
209 spin_unlock(&cookie->lock);
210 kfree(op);
211 fscache_stat(&fscache_n_attr_changed_nobufs);
212 _leave(" = %d", -ENOBUFS);
213 return -ENOBUFS;
214}
215EXPORT_SYMBOL(__fscache_attr_changed);
216
217/*
David Howellsb5108822009-04-03 16:42:39 +0100218 * release a retrieval op reference
219 */
220static void fscache_release_retrieval_op(struct fscache_operation *_op)
221{
222 struct fscache_retrieval *op =
223 container_of(_op, struct fscache_retrieval, op);
224
225 _enter("{OP%x}", op->op.debug_id);
226
David Howells9f105232012-12-20 21:52:35 +0000227 ASSERTCMP(op->n_pages, ==, 0);
228
David Howellsb5108822009-04-03 16:42:39 +0100229 fscache_hist(fscache_retrieval_histogram, op->start_time);
230 if (op->context)
231 fscache_put_context(op->op.object->cookie, op->context);
232
233 _leave("");
234}
235
236/*
237 * allocate a retrieval op
238 */
239static struct fscache_retrieval *fscache_alloc_retrieval(
240 struct address_space *mapping,
241 fscache_rw_complete_t end_io_func,
242 void *context)
243{
244 struct fscache_retrieval *op;
245
246 /* allocate a retrieval operation and attempt to submit it */
247 op = kzalloc(sizeof(*op), GFP_NOIO);
248 if (!op) {
249 fscache_stat(&fscache_n_retrievals_nomem);
250 return NULL;
251 }
252
Tejun Heo8af7c122010-07-20 22:09:01 +0200253 fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
David Howellsb5108822009-04-03 16:42:39 +0100254 op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
255 op->mapping = mapping;
256 op->end_io_func = end_io_func;
257 op->context = context;
258 op->start_time = jiffies;
David Howellsb5108822009-04-03 16:42:39 +0100259 INIT_LIST_HEAD(&op->to_do);
260 return op;
261}
262
263/*
264 * wait for a deferred lookup to complete
265 */
266static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
267{
268 unsigned long jif;
269
270 _enter("");
271
272 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
273 _leave(" = 0 [imm]");
274 return 0;
275 }
276
277 fscache_stat(&fscache_n_retrievals_wait);
278
279 jif = jiffies;
280 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
281 fscache_wait_bit_interruptible,
282 TASK_INTERRUPTIBLE) != 0) {
283 fscache_stat(&fscache_n_retrievals_intr);
284 _leave(" = -ERESTARTSYS");
285 return -ERESTARTSYS;
286 }
287
288 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
289
290 smp_rmb();
291 fscache_hist(fscache_retrieval_delay_histogram, jif);
292 _leave(" = 0 [dly]");
293 return 0;
294}
295
296/*
David Howells60d543c2009-11-19 18:11:45 +0000297 * wait for an object to become active (or dead)
298 */
299static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
300 struct fscache_retrieval *op,
301 atomic_t *stat_op_waits,
302 atomic_t *stat_object_dead)
303{
304 int ret;
305
306 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
307 goto check_if_dead;
308
309 _debug(">>> WT");
310 fscache_stat(stat_op_waits);
311 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
312 fscache_wait_bit_interruptible,
313 TASK_INTERRUPTIBLE) < 0) {
314 ret = fscache_cancel_op(&op->op);
315 if (ret == 0)
316 return -ERESTARTSYS;
317
318 /* it's been removed from the pending queue by another party,
319 * so we should get to run shortly */
320 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
321 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
322 }
323 _debug("<<< GO");
324
325check_if_dead:
David Howells9f105232012-12-20 21:52:35 +0000326 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
327 fscache_stat(stat_object_dead);
328 _leave(" = -ENOBUFS [cancelled]");
329 return -ENOBUFS;
330 }
David Howells60d543c2009-11-19 18:11:45 +0000331 if (unlikely(fscache_object_is_dead(object))) {
332 fscache_stat(stat_object_dead);
333 return -ENOBUFS;
334 }
335 return 0;
336}
337
338/*
David Howellsb5108822009-04-03 16:42:39 +0100339 * read a page from the cache or allocate a block in which to store it
340 * - we return:
341 * -ENOMEM - out of memory, nothing done
342 * -ERESTARTSYS - interrupted
343 * -ENOBUFS - no backing object available in which to cache the block
344 * -ENODATA - no data available in the backing object for this block
345 * 0 - dispatched a read - it'll call end_io_func() when finished
346 */
347int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
348 struct page *page,
349 fscache_rw_complete_t end_io_func,
350 void *context,
351 gfp_t gfp)
352{
353 struct fscache_retrieval *op;
354 struct fscache_object *object;
355 int ret;
356
357 _enter("%p,%p,,,", cookie, page);
358
359 fscache_stat(&fscache_n_retrievals);
360
361 if (hlist_empty(&cookie->backing_objects))
362 goto nobufs;
363
David Howellsef778e72012-12-20 21:52:36 +0000364 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
365 _leave(" = -ENOBUFS [invalidating]");
366 return -ENOBUFS;
367 }
368
David Howellsb5108822009-04-03 16:42:39 +0100369 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
370 ASSERTCMP(page, !=, NULL);
371
372 if (fscache_wait_for_deferred_lookup(cookie) < 0)
373 return -ERESTARTSYS;
374
375 op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
376 if (!op) {
377 _leave(" = -ENOMEM");
378 return -ENOMEM;
379 }
David Howells9f105232012-12-20 21:52:35 +0000380 op->n_pages = 1;
David Howellsb5108822009-04-03 16:42:39 +0100381
382 spin_lock(&cookie->lock);
383
384 if (hlist_empty(&cookie->backing_objects))
385 goto nobufs_unlock;
386 object = hlist_entry(cookie->backing_objects.first,
387 struct fscache_object, cookie_link);
388
389 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
390
David Howells4fbf4292009-11-19 18:11:04 +0000391 atomic_inc(&object->n_reads);
David Howells9f105232012-12-20 21:52:35 +0000392 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
David Howells4fbf4292009-11-19 18:11:04 +0000393
David Howellsb5108822009-04-03 16:42:39 +0100394 if (fscache_submit_op(object, &op->op) < 0)
David Howells9f105232012-12-20 21:52:35 +0000395 goto nobufs_unlock_dec;
David Howellsb5108822009-04-03 16:42:39 +0100396 spin_unlock(&cookie->lock);
397
398 fscache_stat(&fscache_n_retrieval_ops);
399
400 /* pin the netfs read context in case we need to do the actual netfs
401 * read because we've encountered a cache read failure */
402 fscache_get_context(object->cookie, op->context);
403
404 /* we wait for the operation to become active, and then process it
405 * *here*, in this thread, and not in the thread pool */
David Howells60d543c2009-11-19 18:11:45 +0000406 ret = fscache_wait_for_retrieval_activation(
407 object, op,
408 __fscache_stat(&fscache_n_retrieval_op_waits),
409 __fscache_stat(&fscache_n_retrievals_object_dead));
410 if (ret < 0)
411 goto error;
David Howellsb5108822009-04-03 16:42:39 +0100412
413 /* ask the cache to honour the operation */
414 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
David Howells52bd75f2009-11-19 18:11:08 +0000415 fscache_stat(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100416 ret = object->cache->ops->allocate_page(op, page, gfp);
David Howells52bd75f2009-11-19 18:11:08 +0000417 fscache_stat_d(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100418 if (ret == 0)
419 ret = -ENODATA;
420 } else {
David Howells52bd75f2009-11-19 18:11:08 +0000421 fscache_stat(&fscache_n_cop_read_or_alloc_page);
David Howellsb5108822009-04-03 16:42:39 +0100422 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
David Howells52bd75f2009-11-19 18:11:08 +0000423 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
David Howellsb5108822009-04-03 16:42:39 +0100424 }
425
David Howells5753c442009-11-19 18:11:19 +0000426error:
David Howellsb5108822009-04-03 16:42:39 +0100427 if (ret == -ENOMEM)
428 fscache_stat(&fscache_n_retrievals_nomem);
429 else if (ret == -ERESTARTSYS)
430 fscache_stat(&fscache_n_retrievals_intr);
431 else if (ret == -ENODATA)
432 fscache_stat(&fscache_n_retrievals_nodata);
433 else if (ret < 0)
434 fscache_stat(&fscache_n_retrievals_nobufs);
435 else
436 fscache_stat(&fscache_n_retrievals_ok);
437
438 fscache_put_retrieval(op);
439 _leave(" = %d", ret);
440 return ret;
441
David Howells9f105232012-12-20 21:52:35 +0000442nobufs_unlock_dec:
443 atomic_dec(&object->n_reads);
David Howellsb5108822009-04-03 16:42:39 +0100444nobufs_unlock:
445 spin_unlock(&cookie->lock);
446 kfree(op);
447nobufs:
448 fscache_stat(&fscache_n_retrievals_nobufs);
449 _leave(" = -ENOBUFS");
450 return -ENOBUFS;
451}
452EXPORT_SYMBOL(__fscache_read_or_alloc_page);
453
454/*
455 * read a list of page from the cache or allocate a block in which to store
456 * them
457 * - we return:
458 * -ENOMEM - out of memory, some pages may be being read
459 * -ERESTARTSYS - interrupted, some pages may be being read
460 * -ENOBUFS - no backing object or space available in which to cache any
461 * pages not being read
462 * -ENODATA - no data available in the backing object for some or all of
463 * the pages
464 * 0 - dispatched a read on all pages
465 *
466 * end_io_func() will be called for each page read from the cache as it is
467 * finishes being read
468 *
469 * any pages for which a read is dispatched will be removed from pages and
470 * nr_pages
471 */
472int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
473 struct address_space *mapping,
474 struct list_head *pages,
475 unsigned *nr_pages,
476 fscache_rw_complete_t end_io_func,
477 void *context,
478 gfp_t gfp)
479{
David Howellsb5108822009-04-03 16:42:39 +0100480 struct fscache_retrieval *op;
481 struct fscache_object *object;
482 int ret;
483
484 _enter("%p,,%d,,,", cookie, *nr_pages);
485
486 fscache_stat(&fscache_n_retrievals);
487
488 if (hlist_empty(&cookie->backing_objects))
489 goto nobufs;
490
David Howellsef778e72012-12-20 21:52:36 +0000491 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
492 _leave(" = -ENOBUFS [invalidating]");
493 return -ENOBUFS;
494 }
495
David Howellsb5108822009-04-03 16:42:39 +0100496 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
497 ASSERTCMP(*nr_pages, >, 0);
498 ASSERT(!list_empty(pages));
499
500 if (fscache_wait_for_deferred_lookup(cookie) < 0)
501 return -ERESTARTSYS;
502
503 op = fscache_alloc_retrieval(mapping, end_io_func, context);
504 if (!op)
505 return -ENOMEM;
David Howells9f105232012-12-20 21:52:35 +0000506 op->n_pages = *nr_pages;
David Howellsb5108822009-04-03 16:42:39 +0100507
508 spin_lock(&cookie->lock);
509
510 if (hlist_empty(&cookie->backing_objects))
511 goto nobufs_unlock;
512 object = hlist_entry(cookie->backing_objects.first,
513 struct fscache_object, cookie_link);
514
David Howells4fbf4292009-11-19 18:11:04 +0000515 atomic_inc(&object->n_reads);
David Howells9f105232012-12-20 21:52:35 +0000516 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
David Howells4fbf4292009-11-19 18:11:04 +0000517
David Howellsb5108822009-04-03 16:42:39 +0100518 if (fscache_submit_op(object, &op->op) < 0)
David Howells9f105232012-12-20 21:52:35 +0000519 goto nobufs_unlock_dec;
David Howellsb5108822009-04-03 16:42:39 +0100520 spin_unlock(&cookie->lock);
521
522 fscache_stat(&fscache_n_retrieval_ops);
523
524 /* pin the netfs read context in case we need to do the actual netfs
525 * read because we've encountered a cache read failure */
526 fscache_get_context(object->cookie, op->context);
527
528 /* we wait for the operation to become active, and then process it
529 * *here*, in this thread, and not in the thread pool */
David Howells60d543c2009-11-19 18:11:45 +0000530 ret = fscache_wait_for_retrieval_activation(
531 object, op,
532 __fscache_stat(&fscache_n_retrieval_op_waits),
533 __fscache_stat(&fscache_n_retrievals_object_dead));
534 if (ret < 0)
535 goto error;
David Howellsb5108822009-04-03 16:42:39 +0100536
537 /* ask the cache to honour the operation */
David Howells52bd75f2009-11-19 18:11:08 +0000538 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
539 fscache_stat(&fscache_n_cop_allocate_pages);
540 ret = object->cache->ops->allocate_pages(
541 op, pages, nr_pages, gfp);
542 fscache_stat_d(&fscache_n_cop_allocate_pages);
543 } else {
544 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
545 ret = object->cache->ops->read_or_alloc_pages(
546 op, pages, nr_pages, gfp);
547 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
548 }
David Howellsb5108822009-04-03 16:42:39 +0100549
David Howells5753c442009-11-19 18:11:19 +0000550error:
David Howellsb5108822009-04-03 16:42:39 +0100551 if (ret == -ENOMEM)
552 fscache_stat(&fscache_n_retrievals_nomem);
553 else if (ret == -ERESTARTSYS)
554 fscache_stat(&fscache_n_retrievals_intr);
555 else if (ret == -ENODATA)
556 fscache_stat(&fscache_n_retrievals_nodata);
557 else if (ret < 0)
558 fscache_stat(&fscache_n_retrievals_nobufs);
559 else
560 fscache_stat(&fscache_n_retrievals_ok);
561
562 fscache_put_retrieval(op);
563 _leave(" = %d", ret);
564 return ret;
565
David Howells9f105232012-12-20 21:52:35 +0000566nobufs_unlock_dec:
567 atomic_dec(&object->n_reads);
David Howellsb5108822009-04-03 16:42:39 +0100568nobufs_unlock:
569 spin_unlock(&cookie->lock);
570 kfree(op);
571nobufs:
572 fscache_stat(&fscache_n_retrievals_nobufs);
573 _leave(" = -ENOBUFS");
574 return -ENOBUFS;
575}
576EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
577
578/*
579 * allocate a block in the cache on which to store a page
580 * - we return:
581 * -ENOMEM - out of memory, nothing done
582 * -ERESTARTSYS - interrupted
583 * -ENOBUFS - no backing object available in which to cache the block
584 * 0 - block allocated
585 */
586int __fscache_alloc_page(struct fscache_cookie *cookie,
587 struct page *page,
588 gfp_t gfp)
589{
590 struct fscache_retrieval *op;
591 struct fscache_object *object;
592 int ret;
593
594 _enter("%p,%p,,,", cookie, page);
595
596 fscache_stat(&fscache_n_allocs);
597
598 if (hlist_empty(&cookie->backing_objects))
599 goto nobufs;
600
601 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
602 ASSERTCMP(page, !=, NULL);
603
David Howellsef778e72012-12-20 21:52:36 +0000604 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
605 _leave(" = -ENOBUFS [invalidating]");
606 return -ENOBUFS;
607 }
608
David Howellsb5108822009-04-03 16:42:39 +0100609 if (fscache_wait_for_deferred_lookup(cookie) < 0)
610 return -ERESTARTSYS;
611
612 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
613 if (!op)
614 return -ENOMEM;
David Howells9f105232012-12-20 21:52:35 +0000615 op->n_pages = 1;
David Howellsb5108822009-04-03 16:42:39 +0100616
617 spin_lock(&cookie->lock);
618
619 if (hlist_empty(&cookie->backing_objects))
620 goto nobufs_unlock;
621 object = hlist_entry(cookie->backing_objects.first,
622 struct fscache_object, cookie_link);
623
624 if (fscache_submit_op(object, &op->op) < 0)
625 goto nobufs_unlock;
626 spin_unlock(&cookie->lock);
627
628 fscache_stat(&fscache_n_alloc_ops);
629
David Howells60d543c2009-11-19 18:11:45 +0000630 ret = fscache_wait_for_retrieval_activation(
631 object, op,
632 __fscache_stat(&fscache_n_alloc_op_waits),
633 __fscache_stat(&fscache_n_allocs_object_dead));
634 if (ret < 0)
635 goto error;
David Howellsb5108822009-04-03 16:42:39 +0100636
637 /* ask the cache to honour the operation */
David Howells52bd75f2009-11-19 18:11:08 +0000638 fscache_stat(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100639 ret = object->cache->ops->allocate_page(op, page, gfp);
David Howells52bd75f2009-11-19 18:11:08 +0000640 fscache_stat_d(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100641
David Howells5753c442009-11-19 18:11:19 +0000642error:
643 if (ret == -ERESTARTSYS)
644 fscache_stat(&fscache_n_allocs_intr);
645 else if (ret < 0)
David Howellsb5108822009-04-03 16:42:39 +0100646 fscache_stat(&fscache_n_allocs_nobufs);
647 else
648 fscache_stat(&fscache_n_allocs_ok);
649
650 fscache_put_retrieval(op);
651 _leave(" = %d", ret);
652 return ret;
653
654nobufs_unlock:
655 spin_unlock(&cookie->lock);
656 kfree(op);
657nobufs:
658 fscache_stat(&fscache_n_allocs_nobufs);
659 _leave(" = -ENOBUFS");
660 return -ENOBUFS;
661}
662EXPORT_SYMBOL(__fscache_alloc_page);
663
664/*
665 * release a write op reference
666 */
667static void fscache_release_write_op(struct fscache_operation *_op)
668{
669 _enter("{OP%x}", _op->debug_id);
670}
671
672/*
673 * perform the background storage of a page into the cache
674 */
675static void fscache_write_op(struct fscache_operation *_op)
676{
677 struct fscache_storage *op =
678 container_of(_op, struct fscache_storage, op);
679 struct fscache_object *object = op->op.object;
David Howells1bccf512009-11-19 18:11:25 +0000680 struct fscache_cookie *cookie;
David Howellsb5108822009-04-03 16:42:39 +0100681 struct page *page;
682 unsigned n;
683 void *results[1];
684 int ret;
685
686 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
687
David Howellsb5108822009-04-03 16:42:39 +0100688 spin_lock(&object->lock);
David Howells1bccf512009-11-19 18:11:25 +0000689 cookie = object->cookie;
David Howellsb5108822009-04-03 16:42:39 +0100690
David Howells1bccf512009-11-19 18:11:25 +0000691 if (!fscache_object_is_active(object) || !cookie) {
David Howellsb5108822009-04-03 16:42:39 +0100692 spin_unlock(&object->lock);
David Howellsb5108822009-04-03 16:42:39 +0100693 _leave("");
694 return;
695 }
696
David Howells1bccf512009-11-19 18:11:25 +0000697 spin_lock(&cookie->stores_lock);
698
David Howellsb5108822009-04-03 16:42:39 +0100699 fscache_stat(&fscache_n_store_calls);
700
701 /* find a page to store */
702 page = NULL;
703 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
704 FSCACHE_COOKIE_PENDING_TAG);
705 if (n != 1)
706 goto superseded;
707 page = results[0];
708 _debug("gang %d [%lx]", n, page->index);
David Howells1bccf512009-11-19 18:11:25 +0000709 if (page->index > op->store_limit) {
710 fscache_stat(&fscache_n_store_pages_over_limit);
David Howellsb5108822009-04-03 16:42:39 +0100711 goto superseded;
David Howells1bccf512009-11-19 18:11:25 +0000712 }
David Howellsb5108822009-04-03 16:42:39 +0100713
Dan Carpenter08a66852010-06-01 20:58:22 +0100714 radix_tree_tag_set(&cookie->stores, page->index,
715 FSCACHE_COOKIE_STORING_TAG);
716 radix_tree_tag_clear(&cookie->stores, page->index,
717 FSCACHE_COOKIE_PENDING_TAG);
David Howellsb5108822009-04-03 16:42:39 +0100718
David Howells1bccf512009-11-19 18:11:25 +0000719 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100720 spin_unlock(&object->lock);
David Howellsb5108822009-04-03 16:42:39 +0100721
Dan Carpenter08a66852010-06-01 20:58:22 +0100722 fscache_stat(&fscache_n_store_pages);
723 fscache_stat(&fscache_n_cop_write_page);
724 ret = object->cache->ops->write_page(op, page);
725 fscache_stat_d(&fscache_n_cop_write_page);
Dan Carpenter08a66852010-06-01 20:58:22 +0100726 fscache_end_page_write(object, page);
727 if (ret < 0) {
Dan Carpenter08a66852010-06-01 20:58:22 +0100728 fscache_abort_object(object);
David Howells9f105232012-12-20 21:52:35 +0000729 fscache_op_complete(&op->op);
Dan Carpenter08a66852010-06-01 20:58:22 +0100730 } else {
731 fscache_enqueue_operation(&op->op);
David Howellsb5108822009-04-03 16:42:39 +0100732 }
733
734 _leave("");
735 return;
736
737superseded:
738 /* this writer is going away and there aren't any more things to
739 * write */
740 _debug("cease");
David Howells1bccf512009-11-19 18:11:25 +0000741 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100742 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
743 spin_unlock(&object->lock);
David Howells9f105232012-12-20 21:52:35 +0000744 fscache_op_complete(&op->op);
David Howellsb5108822009-04-03 16:42:39 +0100745 _leave("");
746}
747
748/*
David Howellsef778e72012-12-20 21:52:36 +0000749 * Clear the pages pending writing for invalidation
750 */
751void fscache_invalidate_writes(struct fscache_cookie *cookie)
752{
753 struct page *page;
754 void *results[16];
755 int n, i;
756
757 _enter("");
758
759 while (spin_lock(&cookie->stores_lock),
760 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
761 ARRAY_SIZE(results),
762 FSCACHE_COOKIE_PENDING_TAG),
763 n > 0) {
764 for (i = n - 1; i >= 0; i--) {
765 page = results[i];
766 radix_tree_delete(&cookie->stores, page->index);
767 }
768
769 spin_unlock(&cookie->stores_lock);
770
771 for (i = n - 1; i >= 0; i--)
772 page_cache_release(results[i]);
773 }
774
775 spin_unlock(&cookie->stores_lock);
776 _leave("");
777}
778
779/*
David Howellsb5108822009-04-03 16:42:39 +0100780 * request a page be stored in the cache
781 * - returns:
782 * -ENOMEM - out of memory, nothing done
783 * -ENOBUFS - no backing object available in which to cache the page
784 * 0 - dispatched a write - it'll call end_io_func() when finished
785 *
786 * if the cookie still has a backing object at this point, that object can be
787 * in one of a few states with respect to storage processing:
788 *
789 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
790 * set)
791 *
792 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
793 * fill op)
794 *
795 * (b) writes deferred till post-creation (mark page for writing and
796 * return immediately)
797 *
798 * (2) negative lookup, object created, initial fill being made from netfs
799 * (FSCACHE_COOKIE_INITIAL_FILL is set)
800 *
801 * (a) fill point not yet reached this page (mark page for writing and
802 * return)
803 *
804 * (b) fill point passed this page (queue op to store this page)
805 *
806 * (3) object extant (queue op to store this page)
807 *
808 * any other state is invalid
809 */
810int __fscache_write_page(struct fscache_cookie *cookie,
811 struct page *page,
812 gfp_t gfp)
813{
814 struct fscache_storage *op;
815 struct fscache_object *object;
816 int ret;
817
818 _enter("%p,%x,", cookie, (u32) page->flags);
819
820 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
821 ASSERT(PageFsCache(page));
822
823 fscache_stat(&fscache_n_stores);
824
David Howellsef778e72012-12-20 21:52:36 +0000825 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
826 _leave(" = -ENOBUFS [invalidating]");
827 return -ENOBUFS;
828 }
829
David Howells5f4f9f42012-12-20 21:52:33 +0000830 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
David Howellsb5108822009-04-03 16:42:39 +0100831 if (!op)
832 goto nomem;
833
Tejun Heo8af7c122010-07-20 22:09:01 +0200834 fscache_operation_init(&op->op, fscache_write_op,
835 fscache_release_write_op);
836 op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
David Howellsb5108822009-04-03 16:42:39 +0100837
838 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
839 if (ret < 0)
840 goto nomem_free;
841
842 ret = -ENOBUFS;
843 spin_lock(&cookie->lock);
844
845 if (hlist_empty(&cookie->backing_objects))
846 goto nobufs;
847 object = hlist_entry(cookie->backing_objects.first,
848 struct fscache_object, cookie_link);
849 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
850 goto nobufs;
851
852 /* add the page to the pending-storage radix tree on the backing
853 * object */
854 spin_lock(&object->lock);
David Howells1bccf512009-11-19 18:11:25 +0000855 spin_lock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100856
857 _debug("store limit %llx", (unsigned long long) object->store_limit);
858
859 ret = radix_tree_insert(&cookie->stores, page->index, page);
860 if (ret < 0) {
861 if (ret == -EEXIST)
862 goto already_queued;
863 _debug("insert failed %d", ret);
864 goto nobufs_unlock_obj;
865 }
866
867 radix_tree_tag_set(&cookie->stores, page->index,
868 FSCACHE_COOKIE_PENDING_TAG);
869 page_cache_get(page);
870
871 /* we only want one writer at a time, but we do need to queue new
872 * writers after exclusive ops */
873 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
874 goto already_pending;
875
David Howells1bccf512009-11-19 18:11:25 +0000876 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100877 spin_unlock(&object->lock);
878
879 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
880 op->store_limit = object->store_limit;
881
882 if (fscache_submit_op(object, &op->op) < 0)
883 goto submit_failed;
884
885 spin_unlock(&cookie->lock);
886 radix_tree_preload_end();
887 fscache_stat(&fscache_n_store_ops);
888 fscache_stat(&fscache_n_stores_ok);
889
Tejun Heo8af7c122010-07-20 22:09:01 +0200890 /* the work queue now carries its own ref on the object */
David Howellsb5108822009-04-03 16:42:39 +0100891 fscache_put_operation(&op->op);
892 _leave(" = 0");
893 return 0;
894
895already_queued:
896 fscache_stat(&fscache_n_stores_again);
897already_pending:
David Howells1bccf512009-11-19 18:11:25 +0000898 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100899 spin_unlock(&object->lock);
900 spin_unlock(&cookie->lock);
901 radix_tree_preload_end();
902 kfree(op);
903 fscache_stat(&fscache_n_stores_ok);
904 _leave(" = 0");
905 return 0;
906
907submit_failed:
David Howells1bccf512009-11-19 18:11:25 +0000908 spin_lock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100909 radix_tree_delete(&cookie->stores, page->index);
David Howells1bccf512009-11-19 18:11:25 +0000910 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100911 page_cache_release(page);
912 ret = -ENOBUFS;
913 goto nobufs;
914
915nobufs_unlock_obj:
Dan Carpenter1147d0f2010-03-23 14:48:37 +0000916 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100917 spin_unlock(&object->lock);
918nobufs:
919 spin_unlock(&cookie->lock);
920 radix_tree_preload_end();
921 kfree(op);
922 fscache_stat(&fscache_n_stores_nobufs);
923 _leave(" = -ENOBUFS");
924 return -ENOBUFS;
925
926nomem_free:
927 kfree(op);
928nomem:
929 fscache_stat(&fscache_n_stores_oom);
930 _leave(" = -ENOMEM");
931 return -ENOMEM;
932}
933EXPORT_SYMBOL(__fscache_write_page);
934
935/*
936 * remove a page from the cache
937 */
938void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
939{
940 struct fscache_object *object;
941
942 _enter(",%p", page);
943
944 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
945 ASSERTCMP(page, !=, NULL);
946
947 fscache_stat(&fscache_n_uncaches);
948
949 /* cache withdrawal may beat us to it */
950 if (!PageFsCache(page))
951 goto done;
952
953 /* get the object */
954 spin_lock(&cookie->lock);
955
956 if (hlist_empty(&cookie->backing_objects)) {
957 ClearPageFsCache(page);
958 goto done_unlock;
959 }
960
961 object = hlist_entry(cookie->backing_objects.first,
962 struct fscache_object, cookie_link);
963
964 /* there might now be stuff on disk we could read */
965 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
966
967 /* only invoke the cache backend if we managed to mark the page
968 * uncached here; this deals with synchronisation vs withdrawal */
969 if (TestClearPageFsCache(page) &&
970 object->cache->ops->uncache_page) {
971 /* the cache backend releases the cookie lock */
David Howells52bd75f2009-11-19 18:11:08 +0000972 fscache_stat(&fscache_n_cop_uncache_page);
David Howellsb5108822009-04-03 16:42:39 +0100973 object->cache->ops->uncache_page(object, page);
David Howells52bd75f2009-11-19 18:11:08 +0000974 fscache_stat_d(&fscache_n_cop_uncache_page);
David Howellsb5108822009-04-03 16:42:39 +0100975 goto done;
976 }
977
978done_unlock:
979 spin_unlock(&cookie->lock);
980done:
981 _leave("");
982}
983EXPORT_SYMBOL(__fscache_uncache_page);
984
985/**
David Howellsc4d6d8d2012-12-20 21:52:32 +0000986 * fscache_mark_page_cached - Mark a page as being cached
987 * @op: The retrieval op pages are being marked for
988 * @page: The page to be marked
989 *
990 * Mark a netfs page as being cached. After this is called, the netfs
991 * must call fscache_uncache_page() to remove the mark.
992 */
993void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
994{
995 struct fscache_cookie *cookie = op->op.object->cookie;
996
997#ifdef CONFIG_FSCACHE_STATS
998 atomic_inc(&fscache_n_marks);
999#endif
1000
1001 _debug("- mark %p{%lx}", page, page->index);
1002 if (TestSetPageFsCache(page)) {
1003 static bool once_only;
1004 if (!once_only) {
1005 once_only = true;
1006 printk(KERN_WARNING "FS-Cache:"
1007 " Cookie type %s marked page %lx"
1008 " multiple times\n",
1009 cookie->def->name, page->index);
1010 }
1011 }
1012
1013 if (cookie->def->mark_page_cached)
1014 cookie->def->mark_page_cached(cookie->netfs_data,
1015 op->mapping, page);
1016}
1017EXPORT_SYMBOL(fscache_mark_page_cached);
1018
1019/**
David Howellsb5108822009-04-03 16:42:39 +01001020 * fscache_mark_pages_cached - Mark pages as being cached
1021 * @op: The retrieval op pages are being marked for
1022 * @pagevec: The pages to be marked
1023 *
1024 * Mark a bunch of netfs pages as being cached. After this is called,
1025 * the netfs must call fscache_uncache_page() to remove the mark.
1026 */
1027void fscache_mark_pages_cached(struct fscache_retrieval *op,
1028 struct pagevec *pagevec)
1029{
David Howellsb5108822009-04-03 16:42:39 +01001030 unsigned long loop;
1031
David Howellsc4d6d8d2012-12-20 21:52:32 +00001032 for (loop = 0; loop < pagevec->nr; loop++)
1033 fscache_mark_page_cached(op, pagevec->pages[loop]);
David Howellsb5108822009-04-03 16:42:39 +01001034
David Howellsb5108822009-04-03 16:42:39 +01001035 pagevec_reinit(pagevec);
1036}
1037EXPORT_SYMBOL(fscache_mark_pages_cached);
David Howellsc902ce12011-07-07 12:19:48 +01001038
1039/*
1040 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1041 * to be associated with the given cookie.
1042 */
1043void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1044 struct inode *inode)
1045{
1046 struct address_space *mapping = inode->i_mapping;
1047 struct pagevec pvec;
1048 pgoff_t next;
1049 int i;
1050
1051 _enter("%p,%p", cookie, inode);
1052
1053 if (!mapping || mapping->nrpages == 0) {
1054 _leave(" [no pages]");
1055 return;
1056 }
1057
1058 pagevec_init(&pvec, 0);
1059 next = 0;
Jan Beulichb307d462011-07-21 15:02:43 +01001060 do {
1061 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
1062 break;
David Howellsc902ce12011-07-07 12:19:48 +01001063 for (i = 0; i < pagevec_count(&pvec); i++) {
1064 struct page *page = pvec.pages[i];
Jan Beulichb307d462011-07-21 15:02:43 +01001065 next = page->index;
David Howellsc902ce12011-07-07 12:19:48 +01001066 if (PageFsCache(page)) {
1067 __fscache_wait_on_page_write(cookie, page);
1068 __fscache_uncache_page(cookie, page);
1069 }
1070 }
1071 pagevec_release(&pvec);
1072 cond_resched();
Jan Beulichb307d462011-07-21 15:02:43 +01001073 } while (++next);
David Howellsc902ce12011-07-07 12:19:48 +01001074
1075 _leave("");
1076}
1077EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);