blob: 41c441c2058daad798aff3766cb5444c9961b44a [file] [log] [blame]
David Howellsb5108822009-04-03 16:42:39 +01001/* Cache page management and data I/O routines
2 *
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define FSCACHE_DEBUG_LEVEL PAGE
13#include <linux/module.h>
14#include <linux/fscache-cache.h>
15#include <linux/buffer_head.h>
16#include <linux/pagevec.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
David Howellsb5108822009-04-03 16:42:39 +010018#include "internal.h"
19
20/*
21 * check to see if a page is being written to the cache
22 */
23bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24{
25 void *val;
26
27 rcu_read_lock();
28 val = radix_tree_lookup(&cookie->stores, page->index);
29 rcu_read_unlock();
30
31 return val != NULL;
32}
33EXPORT_SYMBOL(__fscache_check_page_write);
34
35/*
36 * wait for a page to finish being written to the cache
37 */
38void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39{
40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42 wait_event(*wq, !__fscache_check_page_write(cookie, page));
43}
44EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46/*
David Howells201a1542009-11-19 18:11:35 +000047 * decide whether a page can be released, possibly by cancelling a store to it
48 * - we're allowed to sleep if __GFP_WAIT is flagged
49 */
50bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51 struct page *page,
52 gfp_t gfp)
53{
54 struct page *xpage;
55 void *val;
56
57 _enter("%p,%p,%x", cookie, page, gfp);
58
59 rcu_read_lock();
60 val = radix_tree_lookup(&cookie->stores, page->index);
61 if (!val) {
62 rcu_read_unlock();
63 fscache_stat(&fscache_n_store_vmscan_not_storing);
64 __fscache_uncache_page(cookie, page);
65 return true;
66 }
67
68 /* see if the page is actually undergoing storage - if so we can't get
69 * rid of it till the cache has finished with it */
70 if (radix_tree_tag_get(&cookie->stores, page->index,
71 FSCACHE_COOKIE_STORING_TAG)) {
72 rcu_read_unlock();
73 goto page_busy;
74 }
75
76 /* the page is pending storage, so we attempt to cancel the store and
77 * discard the store request so that the page can be reclaimed */
78 spin_lock(&cookie->stores_lock);
79 rcu_read_unlock();
80
81 if (radix_tree_tag_get(&cookie->stores, page->index,
82 FSCACHE_COOKIE_STORING_TAG)) {
83 /* the page started to undergo storage whilst we were looking,
84 * so now we can only wait or return */
85 spin_unlock(&cookie->stores_lock);
86 goto page_busy;
87 }
88
89 xpage = radix_tree_delete(&cookie->stores, page->index);
90 spin_unlock(&cookie->stores_lock);
91
92 if (xpage) {
93 fscache_stat(&fscache_n_store_vmscan_cancelled);
94 fscache_stat(&fscache_n_store_radix_deletes);
95 ASSERTCMP(xpage, ==, page);
96 } else {
97 fscache_stat(&fscache_n_store_vmscan_gone);
98 }
99
100 wake_up_bit(&cookie->flags, 0);
101 if (xpage)
102 page_cache_release(xpage);
103 __fscache_uncache_page(cookie, page);
104 return true;
105
106page_busy:
107 /* we might want to wait here, but that could deadlock the allocator as
Tejun Heo8af7c122010-07-20 22:09:01 +0200108 * the work threads writing to the cache may all end up sleeping
David Howells201a1542009-11-19 18:11:35 +0000109 * on memory allocation */
110 fscache_stat(&fscache_n_store_vmscan_busy);
111 return false;
112}
113EXPORT_SYMBOL(__fscache_maybe_release_page);
114
115/*
David Howellsb5108822009-04-03 16:42:39 +0100116 * note that a page has finished being written to the cache
117 */
David Howells1bccf512009-11-19 18:11:25 +0000118static void fscache_end_page_write(struct fscache_object *object,
119 struct page *page)
David Howellsb5108822009-04-03 16:42:39 +0100120{
David Howells1bccf512009-11-19 18:11:25 +0000121 struct fscache_cookie *cookie;
122 struct page *xpage = NULL;
David Howellsb5108822009-04-03 16:42:39 +0100123
David Howells1bccf512009-11-19 18:11:25 +0000124 spin_lock(&object->lock);
125 cookie = object->cookie;
126 if (cookie) {
127 /* delete the page from the tree if it is now no longer
128 * pending */
129 spin_lock(&cookie->stores_lock);
David Howells201a1542009-11-19 18:11:35 +0000130 radix_tree_tag_clear(&cookie->stores, page->index,
131 FSCACHE_COOKIE_STORING_TAG);
David Howells285e7282009-11-19 18:11:29 +0000132 if (!radix_tree_tag_get(&cookie->stores, page->index,
133 FSCACHE_COOKIE_PENDING_TAG)) {
134 fscache_stat(&fscache_n_store_radix_deletes);
135 xpage = radix_tree_delete(&cookie->stores, page->index);
136 }
David Howells1bccf512009-11-19 18:11:25 +0000137 spin_unlock(&cookie->stores_lock);
138 wake_up_bit(&cookie->flags, 0);
139 }
140 spin_unlock(&object->lock);
141 if (xpage)
142 page_cache_release(xpage);
David Howellsb5108822009-04-03 16:42:39 +0100143}
144
145/*
146 * actually apply the changed attributes to a cache object
147 */
148static void fscache_attr_changed_op(struct fscache_operation *op)
149{
150 struct fscache_object *object = op->object;
David Howells440f0af2009-11-19 18:11:01 +0000151 int ret;
David Howellsb5108822009-04-03 16:42:39 +0100152
153 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
154
155 fscache_stat(&fscache_n_attr_changed_calls);
156
David Howells440f0af2009-11-19 18:11:01 +0000157 if (fscache_object_is_active(object)) {
158 fscache_set_op_state(op, "CallFS");
David Howells52bd75f2009-11-19 18:11:08 +0000159 fscache_stat(&fscache_n_cop_attr_changed);
David Howells440f0af2009-11-19 18:11:01 +0000160 ret = object->cache->ops->attr_changed(object);
David Howells52bd75f2009-11-19 18:11:08 +0000161 fscache_stat_d(&fscache_n_cop_attr_changed);
David Howells440f0af2009-11-19 18:11:01 +0000162 fscache_set_op_state(op, "Done");
163 if (ret < 0)
164 fscache_abort_object(object);
165 }
David Howellsb5108822009-04-03 16:42:39 +0100166
167 _leave("");
168}
169
170/*
171 * notification that the attributes on an object have changed
172 */
173int __fscache_attr_changed(struct fscache_cookie *cookie)
174{
175 struct fscache_operation *op;
176 struct fscache_object *object;
177
178 _enter("%p", cookie);
179
180 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
181
182 fscache_stat(&fscache_n_attr_changed);
183
184 op = kzalloc(sizeof(*op), GFP_KERNEL);
185 if (!op) {
186 fscache_stat(&fscache_n_attr_changed_nomem);
187 _leave(" = -ENOMEM");
188 return -ENOMEM;
189 }
190
Tejun Heo8af7c122010-07-20 22:09:01 +0200191 fscache_operation_init(op, fscache_attr_changed_op, NULL);
192 op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
David Howells440f0af2009-11-19 18:11:01 +0000193 fscache_set_op_name(op, "Attr");
David Howellsb5108822009-04-03 16:42:39 +0100194
195 spin_lock(&cookie->lock);
196
197 if (hlist_empty(&cookie->backing_objects))
198 goto nobufs;
199 object = hlist_entry(cookie->backing_objects.first,
200 struct fscache_object, cookie_link);
201
202 if (fscache_submit_exclusive_op(object, op) < 0)
203 goto nobufs;
204 spin_unlock(&cookie->lock);
205 fscache_stat(&fscache_n_attr_changed_ok);
206 fscache_put_operation(op);
207 _leave(" = 0");
208 return 0;
209
210nobufs:
211 spin_unlock(&cookie->lock);
212 kfree(op);
213 fscache_stat(&fscache_n_attr_changed_nobufs);
214 _leave(" = %d", -ENOBUFS);
215 return -ENOBUFS;
216}
217EXPORT_SYMBOL(__fscache_attr_changed);
218
219/*
David Howellsb5108822009-04-03 16:42:39 +0100220 * release a retrieval op reference
221 */
222static void fscache_release_retrieval_op(struct fscache_operation *_op)
223{
224 struct fscache_retrieval *op =
225 container_of(_op, struct fscache_retrieval, op);
226
227 _enter("{OP%x}", op->op.debug_id);
228
229 fscache_hist(fscache_retrieval_histogram, op->start_time);
230 if (op->context)
231 fscache_put_context(op->op.object->cookie, op->context);
232
233 _leave("");
234}
235
236/*
237 * allocate a retrieval op
238 */
239static struct fscache_retrieval *fscache_alloc_retrieval(
240 struct address_space *mapping,
241 fscache_rw_complete_t end_io_func,
242 void *context)
243{
244 struct fscache_retrieval *op;
245
246 /* allocate a retrieval operation and attempt to submit it */
247 op = kzalloc(sizeof(*op), GFP_NOIO);
248 if (!op) {
249 fscache_stat(&fscache_n_retrievals_nomem);
250 return NULL;
251 }
252
Tejun Heo8af7c122010-07-20 22:09:01 +0200253 fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
David Howellsb5108822009-04-03 16:42:39 +0100254 op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
255 op->mapping = mapping;
256 op->end_io_func = end_io_func;
257 op->context = context;
258 op->start_time = jiffies;
David Howellsb5108822009-04-03 16:42:39 +0100259 INIT_LIST_HEAD(&op->to_do);
David Howells440f0af2009-11-19 18:11:01 +0000260 fscache_set_op_name(&op->op, "Retr");
David Howellsb5108822009-04-03 16:42:39 +0100261 return op;
262}
263
264/*
265 * wait for a deferred lookup to complete
266 */
267static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
268{
269 unsigned long jif;
270
271 _enter("");
272
273 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
274 _leave(" = 0 [imm]");
275 return 0;
276 }
277
278 fscache_stat(&fscache_n_retrievals_wait);
279
280 jif = jiffies;
281 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
282 fscache_wait_bit_interruptible,
283 TASK_INTERRUPTIBLE) != 0) {
284 fscache_stat(&fscache_n_retrievals_intr);
285 _leave(" = -ERESTARTSYS");
286 return -ERESTARTSYS;
287 }
288
289 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
290
291 smp_rmb();
292 fscache_hist(fscache_retrieval_delay_histogram, jif);
293 _leave(" = 0 [dly]");
294 return 0;
295}
296
297/*
David Howells60d543c2009-11-19 18:11:45 +0000298 * wait for an object to become active (or dead)
299 */
300static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
301 struct fscache_retrieval *op,
302 atomic_t *stat_op_waits,
303 atomic_t *stat_object_dead)
304{
305 int ret;
306
307 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
308 goto check_if_dead;
309
310 _debug(">>> WT");
311 fscache_stat(stat_op_waits);
312 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
313 fscache_wait_bit_interruptible,
314 TASK_INTERRUPTIBLE) < 0) {
315 ret = fscache_cancel_op(&op->op);
316 if (ret == 0)
317 return -ERESTARTSYS;
318
319 /* it's been removed from the pending queue by another party,
320 * so we should get to run shortly */
321 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
322 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
323 }
324 _debug("<<< GO");
325
326check_if_dead:
327 if (unlikely(fscache_object_is_dead(object))) {
328 fscache_stat(stat_object_dead);
329 return -ENOBUFS;
330 }
331 return 0;
332}
333
334/*
David Howellsb5108822009-04-03 16:42:39 +0100335 * read a page from the cache or allocate a block in which to store it
336 * - we return:
337 * -ENOMEM - out of memory, nothing done
338 * -ERESTARTSYS - interrupted
339 * -ENOBUFS - no backing object available in which to cache the block
340 * -ENODATA - no data available in the backing object for this block
341 * 0 - dispatched a read - it'll call end_io_func() when finished
342 */
343int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
344 struct page *page,
345 fscache_rw_complete_t end_io_func,
346 void *context,
347 gfp_t gfp)
348{
349 struct fscache_retrieval *op;
350 struct fscache_object *object;
351 int ret;
352
353 _enter("%p,%p,,,", cookie, page);
354
355 fscache_stat(&fscache_n_retrievals);
356
357 if (hlist_empty(&cookie->backing_objects))
358 goto nobufs;
359
360 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
361 ASSERTCMP(page, !=, NULL);
362
363 if (fscache_wait_for_deferred_lookup(cookie) < 0)
364 return -ERESTARTSYS;
365
366 op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
367 if (!op) {
368 _leave(" = -ENOMEM");
369 return -ENOMEM;
370 }
David Howells440f0af2009-11-19 18:11:01 +0000371 fscache_set_op_name(&op->op, "RetrRA1");
David Howellsb5108822009-04-03 16:42:39 +0100372
373 spin_lock(&cookie->lock);
374
375 if (hlist_empty(&cookie->backing_objects))
376 goto nobufs_unlock;
377 object = hlist_entry(cookie->backing_objects.first,
378 struct fscache_object, cookie_link);
379
380 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
381
David Howells4fbf4292009-11-19 18:11:04 +0000382 atomic_inc(&object->n_reads);
383 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
384
David Howellsb5108822009-04-03 16:42:39 +0100385 if (fscache_submit_op(object, &op->op) < 0)
386 goto nobufs_unlock;
387 spin_unlock(&cookie->lock);
388
389 fscache_stat(&fscache_n_retrieval_ops);
390
391 /* pin the netfs read context in case we need to do the actual netfs
392 * read because we've encountered a cache read failure */
393 fscache_get_context(object->cookie, op->context);
394
395 /* we wait for the operation to become active, and then process it
396 * *here*, in this thread, and not in the thread pool */
David Howells60d543c2009-11-19 18:11:45 +0000397 ret = fscache_wait_for_retrieval_activation(
398 object, op,
399 __fscache_stat(&fscache_n_retrieval_op_waits),
400 __fscache_stat(&fscache_n_retrievals_object_dead));
401 if (ret < 0)
402 goto error;
David Howellsb5108822009-04-03 16:42:39 +0100403
404 /* ask the cache to honour the operation */
405 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
David Howells52bd75f2009-11-19 18:11:08 +0000406 fscache_stat(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100407 ret = object->cache->ops->allocate_page(op, page, gfp);
David Howells52bd75f2009-11-19 18:11:08 +0000408 fscache_stat_d(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100409 if (ret == 0)
410 ret = -ENODATA;
411 } else {
David Howells52bd75f2009-11-19 18:11:08 +0000412 fscache_stat(&fscache_n_cop_read_or_alloc_page);
David Howellsb5108822009-04-03 16:42:39 +0100413 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
David Howells52bd75f2009-11-19 18:11:08 +0000414 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
David Howellsb5108822009-04-03 16:42:39 +0100415 }
416
David Howells5753c442009-11-19 18:11:19 +0000417error:
David Howellsb5108822009-04-03 16:42:39 +0100418 if (ret == -ENOMEM)
419 fscache_stat(&fscache_n_retrievals_nomem);
420 else if (ret == -ERESTARTSYS)
421 fscache_stat(&fscache_n_retrievals_intr);
422 else if (ret == -ENODATA)
423 fscache_stat(&fscache_n_retrievals_nodata);
424 else if (ret < 0)
425 fscache_stat(&fscache_n_retrievals_nobufs);
426 else
427 fscache_stat(&fscache_n_retrievals_ok);
428
429 fscache_put_retrieval(op);
430 _leave(" = %d", ret);
431 return ret;
432
433nobufs_unlock:
434 spin_unlock(&cookie->lock);
435 kfree(op);
436nobufs:
437 fscache_stat(&fscache_n_retrievals_nobufs);
438 _leave(" = -ENOBUFS");
439 return -ENOBUFS;
440}
441EXPORT_SYMBOL(__fscache_read_or_alloc_page);
442
443/*
444 * read a list of page from the cache or allocate a block in which to store
445 * them
446 * - we return:
447 * -ENOMEM - out of memory, some pages may be being read
448 * -ERESTARTSYS - interrupted, some pages may be being read
449 * -ENOBUFS - no backing object or space available in which to cache any
450 * pages not being read
451 * -ENODATA - no data available in the backing object for some or all of
452 * the pages
453 * 0 - dispatched a read on all pages
454 *
455 * end_io_func() will be called for each page read from the cache as it is
456 * finishes being read
457 *
458 * any pages for which a read is dispatched will be removed from pages and
459 * nr_pages
460 */
461int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
462 struct address_space *mapping,
463 struct list_head *pages,
464 unsigned *nr_pages,
465 fscache_rw_complete_t end_io_func,
466 void *context,
467 gfp_t gfp)
468{
David Howellsb5108822009-04-03 16:42:39 +0100469 struct fscache_retrieval *op;
470 struct fscache_object *object;
471 int ret;
472
473 _enter("%p,,%d,,,", cookie, *nr_pages);
474
475 fscache_stat(&fscache_n_retrievals);
476
477 if (hlist_empty(&cookie->backing_objects))
478 goto nobufs;
479
480 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
481 ASSERTCMP(*nr_pages, >, 0);
482 ASSERT(!list_empty(pages));
483
484 if (fscache_wait_for_deferred_lookup(cookie) < 0)
485 return -ERESTARTSYS;
486
487 op = fscache_alloc_retrieval(mapping, end_io_func, context);
488 if (!op)
489 return -ENOMEM;
David Howells440f0af2009-11-19 18:11:01 +0000490 fscache_set_op_name(&op->op, "RetrRAN");
David Howellsb5108822009-04-03 16:42:39 +0100491
492 spin_lock(&cookie->lock);
493
494 if (hlist_empty(&cookie->backing_objects))
495 goto nobufs_unlock;
496 object = hlist_entry(cookie->backing_objects.first,
497 struct fscache_object, cookie_link);
498
David Howells4fbf4292009-11-19 18:11:04 +0000499 atomic_inc(&object->n_reads);
500 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
501
David Howellsb5108822009-04-03 16:42:39 +0100502 if (fscache_submit_op(object, &op->op) < 0)
503 goto nobufs_unlock;
504 spin_unlock(&cookie->lock);
505
506 fscache_stat(&fscache_n_retrieval_ops);
507
508 /* pin the netfs read context in case we need to do the actual netfs
509 * read because we've encountered a cache read failure */
510 fscache_get_context(object->cookie, op->context);
511
512 /* we wait for the operation to become active, and then process it
513 * *here*, in this thread, and not in the thread pool */
David Howells60d543c2009-11-19 18:11:45 +0000514 ret = fscache_wait_for_retrieval_activation(
515 object, op,
516 __fscache_stat(&fscache_n_retrieval_op_waits),
517 __fscache_stat(&fscache_n_retrievals_object_dead));
518 if (ret < 0)
519 goto error;
David Howellsb5108822009-04-03 16:42:39 +0100520
521 /* ask the cache to honour the operation */
David Howells52bd75f2009-11-19 18:11:08 +0000522 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
523 fscache_stat(&fscache_n_cop_allocate_pages);
524 ret = object->cache->ops->allocate_pages(
525 op, pages, nr_pages, gfp);
526 fscache_stat_d(&fscache_n_cop_allocate_pages);
527 } else {
528 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
529 ret = object->cache->ops->read_or_alloc_pages(
530 op, pages, nr_pages, gfp);
531 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
532 }
David Howellsb5108822009-04-03 16:42:39 +0100533
David Howells5753c442009-11-19 18:11:19 +0000534error:
David Howellsb5108822009-04-03 16:42:39 +0100535 if (ret == -ENOMEM)
536 fscache_stat(&fscache_n_retrievals_nomem);
537 else if (ret == -ERESTARTSYS)
538 fscache_stat(&fscache_n_retrievals_intr);
539 else if (ret == -ENODATA)
540 fscache_stat(&fscache_n_retrievals_nodata);
541 else if (ret < 0)
542 fscache_stat(&fscache_n_retrievals_nobufs);
543 else
544 fscache_stat(&fscache_n_retrievals_ok);
545
546 fscache_put_retrieval(op);
547 _leave(" = %d", ret);
548 return ret;
549
550nobufs_unlock:
551 spin_unlock(&cookie->lock);
552 kfree(op);
553nobufs:
554 fscache_stat(&fscache_n_retrievals_nobufs);
555 _leave(" = -ENOBUFS");
556 return -ENOBUFS;
557}
558EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
559
560/*
561 * allocate a block in the cache on which to store a page
562 * - we return:
563 * -ENOMEM - out of memory, nothing done
564 * -ERESTARTSYS - interrupted
565 * -ENOBUFS - no backing object available in which to cache the block
566 * 0 - block allocated
567 */
568int __fscache_alloc_page(struct fscache_cookie *cookie,
569 struct page *page,
570 gfp_t gfp)
571{
572 struct fscache_retrieval *op;
573 struct fscache_object *object;
574 int ret;
575
576 _enter("%p,%p,,,", cookie, page);
577
578 fscache_stat(&fscache_n_allocs);
579
580 if (hlist_empty(&cookie->backing_objects))
581 goto nobufs;
582
583 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
584 ASSERTCMP(page, !=, NULL);
585
586 if (fscache_wait_for_deferred_lookup(cookie) < 0)
587 return -ERESTARTSYS;
588
589 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
590 if (!op)
591 return -ENOMEM;
David Howells440f0af2009-11-19 18:11:01 +0000592 fscache_set_op_name(&op->op, "RetrAL1");
David Howellsb5108822009-04-03 16:42:39 +0100593
594 spin_lock(&cookie->lock);
595
596 if (hlist_empty(&cookie->backing_objects))
597 goto nobufs_unlock;
598 object = hlist_entry(cookie->backing_objects.first,
599 struct fscache_object, cookie_link);
600
601 if (fscache_submit_op(object, &op->op) < 0)
602 goto nobufs_unlock;
603 spin_unlock(&cookie->lock);
604
605 fscache_stat(&fscache_n_alloc_ops);
606
David Howells60d543c2009-11-19 18:11:45 +0000607 ret = fscache_wait_for_retrieval_activation(
608 object, op,
609 __fscache_stat(&fscache_n_alloc_op_waits),
610 __fscache_stat(&fscache_n_allocs_object_dead));
611 if (ret < 0)
612 goto error;
David Howellsb5108822009-04-03 16:42:39 +0100613
614 /* ask the cache to honour the operation */
David Howells52bd75f2009-11-19 18:11:08 +0000615 fscache_stat(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100616 ret = object->cache->ops->allocate_page(op, page, gfp);
David Howells52bd75f2009-11-19 18:11:08 +0000617 fscache_stat_d(&fscache_n_cop_allocate_page);
David Howellsb5108822009-04-03 16:42:39 +0100618
David Howells5753c442009-11-19 18:11:19 +0000619error:
620 if (ret == -ERESTARTSYS)
621 fscache_stat(&fscache_n_allocs_intr);
622 else if (ret < 0)
David Howellsb5108822009-04-03 16:42:39 +0100623 fscache_stat(&fscache_n_allocs_nobufs);
624 else
625 fscache_stat(&fscache_n_allocs_ok);
626
627 fscache_put_retrieval(op);
628 _leave(" = %d", ret);
629 return ret;
630
631nobufs_unlock:
632 spin_unlock(&cookie->lock);
633 kfree(op);
634nobufs:
635 fscache_stat(&fscache_n_allocs_nobufs);
636 _leave(" = -ENOBUFS");
637 return -ENOBUFS;
638}
639EXPORT_SYMBOL(__fscache_alloc_page);
640
641/*
642 * release a write op reference
643 */
644static void fscache_release_write_op(struct fscache_operation *_op)
645{
646 _enter("{OP%x}", _op->debug_id);
647}
648
649/*
650 * perform the background storage of a page into the cache
651 */
652static void fscache_write_op(struct fscache_operation *_op)
653{
654 struct fscache_storage *op =
655 container_of(_op, struct fscache_storage, op);
656 struct fscache_object *object = op->op.object;
David Howells1bccf512009-11-19 18:11:25 +0000657 struct fscache_cookie *cookie;
David Howellsb5108822009-04-03 16:42:39 +0100658 struct page *page;
659 unsigned n;
660 void *results[1];
661 int ret;
662
663 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
664
David Howells440f0af2009-11-19 18:11:01 +0000665 fscache_set_op_state(&op->op, "GetPage");
666
David Howellsb5108822009-04-03 16:42:39 +0100667 spin_lock(&object->lock);
David Howells1bccf512009-11-19 18:11:25 +0000668 cookie = object->cookie;
David Howellsb5108822009-04-03 16:42:39 +0100669
David Howells1bccf512009-11-19 18:11:25 +0000670 if (!fscache_object_is_active(object) || !cookie) {
David Howellsb5108822009-04-03 16:42:39 +0100671 spin_unlock(&object->lock);
David Howellsb5108822009-04-03 16:42:39 +0100672 _leave("");
673 return;
674 }
675
David Howells1bccf512009-11-19 18:11:25 +0000676 spin_lock(&cookie->stores_lock);
677
David Howellsb5108822009-04-03 16:42:39 +0100678 fscache_stat(&fscache_n_store_calls);
679
680 /* find a page to store */
681 page = NULL;
682 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
683 FSCACHE_COOKIE_PENDING_TAG);
684 if (n != 1)
685 goto superseded;
686 page = results[0];
687 _debug("gang %d [%lx]", n, page->index);
David Howells1bccf512009-11-19 18:11:25 +0000688 if (page->index > op->store_limit) {
689 fscache_stat(&fscache_n_store_pages_over_limit);
David Howellsb5108822009-04-03 16:42:39 +0100690 goto superseded;
David Howells1bccf512009-11-19 18:11:25 +0000691 }
David Howellsb5108822009-04-03 16:42:39 +0100692
Dan Carpenter08a66852010-06-01 20:58:22 +0100693 radix_tree_tag_set(&cookie->stores, page->index,
694 FSCACHE_COOKIE_STORING_TAG);
695 radix_tree_tag_clear(&cookie->stores, page->index,
696 FSCACHE_COOKIE_PENDING_TAG);
David Howellsb5108822009-04-03 16:42:39 +0100697
David Howells1bccf512009-11-19 18:11:25 +0000698 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100699 spin_unlock(&object->lock);
David Howellsb5108822009-04-03 16:42:39 +0100700
Dan Carpenter08a66852010-06-01 20:58:22 +0100701 fscache_set_op_state(&op->op, "Store");
702 fscache_stat(&fscache_n_store_pages);
703 fscache_stat(&fscache_n_cop_write_page);
704 ret = object->cache->ops->write_page(op, page);
705 fscache_stat_d(&fscache_n_cop_write_page);
706 fscache_set_op_state(&op->op, "EndWrite");
707 fscache_end_page_write(object, page);
708 if (ret < 0) {
709 fscache_set_op_state(&op->op, "Abort");
710 fscache_abort_object(object);
711 } else {
712 fscache_enqueue_operation(&op->op);
David Howellsb5108822009-04-03 16:42:39 +0100713 }
714
715 _leave("");
716 return;
717
718superseded:
719 /* this writer is going away and there aren't any more things to
720 * write */
721 _debug("cease");
David Howells1bccf512009-11-19 18:11:25 +0000722 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100723 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
724 spin_unlock(&object->lock);
David Howellsb5108822009-04-03 16:42:39 +0100725 _leave("");
726}
727
728/*
729 * request a page be stored in the cache
730 * - returns:
731 * -ENOMEM - out of memory, nothing done
732 * -ENOBUFS - no backing object available in which to cache the page
733 * 0 - dispatched a write - it'll call end_io_func() when finished
734 *
735 * if the cookie still has a backing object at this point, that object can be
736 * in one of a few states with respect to storage processing:
737 *
738 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
739 * set)
740 *
741 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
742 * fill op)
743 *
744 * (b) writes deferred till post-creation (mark page for writing and
745 * return immediately)
746 *
747 * (2) negative lookup, object created, initial fill being made from netfs
748 * (FSCACHE_COOKIE_INITIAL_FILL is set)
749 *
750 * (a) fill point not yet reached this page (mark page for writing and
751 * return)
752 *
753 * (b) fill point passed this page (queue op to store this page)
754 *
755 * (3) object extant (queue op to store this page)
756 *
757 * any other state is invalid
758 */
759int __fscache_write_page(struct fscache_cookie *cookie,
760 struct page *page,
761 gfp_t gfp)
762{
763 struct fscache_storage *op;
764 struct fscache_object *object;
765 int ret;
766
767 _enter("%p,%x,", cookie, (u32) page->flags);
768
769 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
770 ASSERT(PageFsCache(page));
771
772 fscache_stat(&fscache_n_stores);
773
774 op = kzalloc(sizeof(*op), GFP_NOIO);
775 if (!op)
776 goto nomem;
777
Tejun Heo8af7c122010-07-20 22:09:01 +0200778 fscache_operation_init(&op->op, fscache_write_op,
779 fscache_release_write_op);
780 op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
David Howells440f0af2009-11-19 18:11:01 +0000781 fscache_set_op_name(&op->op, "Write1");
David Howellsb5108822009-04-03 16:42:39 +0100782
783 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
784 if (ret < 0)
785 goto nomem_free;
786
787 ret = -ENOBUFS;
788 spin_lock(&cookie->lock);
789
790 if (hlist_empty(&cookie->backing_objects))
791 goto nobufs;
792 object = hlist_entry(cookie->backing_objects.first,
793 struct fscache_object, cookie_link);
794 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
795 goto nobufs;
796
797 /* add the page to the pending-storage radix tree on the backing
798 * object */
799 spin_lock(&object->lock);
David Howells1bccf512009-11-19 18:11:25 +0000800 spin_lock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100801
802 _debug("store limit %llx", (unsigned long long) object->store_limit);
803
804 ret = radix_tree_insert(&cookie->stores, page->index, page);
805 if (ret < 0) {
806 if (ret == -EEXIST)
807 goto already_queued;
808 _debug("insert failed %d", ret);
809 goto nobufs_unlock_obj;
810 }
811
812 radix_tree_tag_set(&cookie->stores, page->index,
813 FSCACHE_COOKIE_PENDING_TAG);
814 page_cache_get(page);
815
816 /* we only want one writer at a time, but we do need to queue new
817 * writers after exclusive ops */
818 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
819 goto already_pending;
820
David Howells1bccf512009-11-19 18:11:25 +0000821 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100822 spin_unlock(&object->lock);
823
824 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
825 op->store_limit = object->store_limit;
826
827 if (fscache_submit_op(object, &op->op) < 0)
828 goto submit_failed;
829
830 spin_unlock(&cookie->lock);
831 radix_tree_preload_end();
832 fscache_stat(&fscache_n_store_ops);
833 fscache_stat(&fscache_n_stores_ok);
834
Tejun Heo8af7c122010-07-20 22:09:01 +0200835 /* the work queue now carries its own ref on the object */
David Howellsb5108822009-04-03 16:42:39 +0100836 fscache_put_operation(&op->op);
837 _leave(" = 0");
838 return 0;
839
840already_queued:
841 fscache_stat(&fscache_n_stores_again);
842already_pending:
David Howells1bccf512009-11-19 18:11:25 +0000843 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100844 spin_unlock(&object->lock);
845 spin_unlock(&cookie->lock);
846 radix_tree_preload_end();
847 kfree(op);
848 fscache_stat(&fscache_n_stores_ok);
849 _leave(" = 0");
850 return 0;
851
852submit_failed:
David Howells1bccf512009-11-19 18:11:25 +0000853 spin_lock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100854 radix_tree_delete(&cookie->stores, page->index);
David Howells1bccf512009-11-19 18:11:25 +0000855 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100856 page_cache_release(page);
857 ret = -ENOBUFS;
858 goto nobufs;
859
860nobufs_unlock_obj:
Dan Carpenter1147d0f2010-03-23 14:48:37 +0000861 spin_unlock(&cookie->stores_lock);
David Howellsb5108822009-04-03 16:42:39 +0100862 spin_unlock(&object->lock);
863nobufs:
864 spin_unlock(&cookie->lock);
865 radix_tree_preload_end();
866 kfree(op);
867 fscache_stat(&fscache_n_stores_nobufs);
868 _leave(" = -ENOBUFS");
869 return -ENOBUFS;
870
871nomem_free:
872 kfree(op);
873nomem:
874 fscache_stat(&fscache_n_stores_oom);
875 _leave(" = -ENOMEM");
876 return -ENOMEM;
877}
878EXPORT_SYMBOL(__fscache_write_page);
879
880/*
881 * remove a page from the cache
882 */
883void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
884{
885 struct fscache_object *object;
886
887 _enter(",%p", page);
888
889 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
890 ASSERTCMP(page, !=, NULL);
891
892 fscache_stat(&fscache_n_uncaches);
893
894 /* cache withdrawal may beat us to it */
895 if (!PageFsCache(page))
896 goto done;
897
898 /* get the object */
899 spin_lock(&cookie->lock);
900
901 if (hlist_empty(&cookie->backing_objects)) {
902 ClearPageFsCache(page);
903 goto done_unlock;
904 }
905
906 object = hlist_entry(cookie->backing_objects.first,
907 struct fscache_object, cookie_link);
908
909 /* there might now be stuff on disk we could read */
910 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
911
912 /* only invoke the cache backend if we managed to mark the page
913 * uncached here; this deals with synchronisation vs withdrawal */
914 if (TestClearPageFsCache(page) &&
915 object->cache->ops->uncache_page) {
916 /* the cache backend releases the cookie lock */
David Howells52bd75f2009-11-19 18:11:08 +0000917 fscache_stat(&fscache_n_cop_uncache_page);
David Howellsb5108822009-04-03 16:42:39 +0100918 object->cache->ops->uncache_page(object, page);
David Howells52bd75f2009-11-19 18:11:08 +0000919 fscache_stat_d(&fscache_n_cop_uncache_page);
David Howellsb5108822009-04-03 16:42:39 +0100920 goto done;
921 }
922
923done_unlock:
924 spin_unlock(&cookie->lock);
925done:
926 _leave("");
927}
928EXPORT_SYMBOL(__fscache_uncache_page);
929
930/**
931 * fscache_mark_pages_cached - Mark pages as being cached
932 * @op: The retrieval op pages are being marked for
933 * @pagevec: The pages to be marked
934 *
935 * Mark a bunch of netfs pages as being cached. After this is called,
936 * the netfs must call fscache_uncache_page() to remove the mark.
937 */
938void fscache_mark_pages_cached(struct fscache_retrieval *op,
939 struct pagevec *pagevec)
940{
941 struct fscache_cookie *cookie = op->op.object->cookie;
942 unsigned long loop;
943
944#ifdef CONFIG_FSCACHE_STATS
945 atomic_add(pagevec->nr, &fscache_n_marks);
946#endif
947
948 for (loop = 0; loop < pagevec->nr; loop++) {
949 struct page *page = pagevec->pages[loop];
950
951 _debug("- mark %p{%lx}", page, page->index);
952 if (TestSetPageFsCache(page)) {
953 static bool once_only;
954 if (!once_only) {
955 once_only = true;
956 printk(KERN_WARNING "FS-Cache:"
957 " Cookie type %s marked page %lx"
958 " multiple times\n",
959 cookie->def->name, page->index);
960 }
961 }
962 }
963
964 if (cookie->def->mark_pages_cached)
965 cookie->def->mark_pages_cached(cookie->netfs_data,
966 op->mapping, pagevec);
967 pagevec_reinit(pagevec);
968}
969EXPORT_SYMBOL(fscache_mark_pages_cached);