blob: 0d5b8508869bf0642a88d4c87b3feb49c1fab433 [file] [log] [blame]
David Howells08e0e7c2007-04-26 15:55:03 -07001/* AFS filesystem file handling
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David Howells08e0e7c2007-04-26 15:55:03 -07003 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/fs.h>
16#include <linux/pagemap.h>
David Howells31143d52007-05-09 02:33:46 -070017#include <linux/writeback.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/gfp.h>
David Howells91b467e2017-01-05 10:38:35 +000019#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "internal.h"
21
David Howells416351f2007-05-09 02:33:45 -070022static int afs_readpage(struct file *file, struct page *page);
Lukas Czernerd47992f2013-05-21 23:17:23 -040023static void afs_invalidatepage(struct page *page, unsigned int offset,
24 unsigned int length);
David Howells416351f2007-05-09 02:33:45 -070025static int afs_releasepage(struct page *page, gfp_t gfp_flags);
David Howells31143d52007-05-09 02:33:46 -070026static int afs_launder_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
David Howells9b3f26c2009-04-03 16:42:41 +010028static int afs_readpages(struct file *filp, struct address_space *mapping,
29 struct list_head *pages, unsigned nr_pages);
30
David Howells00d3b7a2007-04-26 15:57:07 -070031const struct file_operations afs_file_operations = {
32 .open = afs_open,
David Howells58fed942017-03-16 16:27:45 +000033 .flush = afs_flush,
David Howells00d3b7a2007-04-26 15:57:07 -070034 .release = afs_release,
35 .llseek = generic_file_llseek,
Al Viroaad4f8b2014-04-02 14:33:16 -040036 .read_iter = generic_file_read_iter,
Al Viro50b55512014-04-03 14:13:46 -040037 .write_iter = afs_file_write,
David Howells00d3b7a2007-04-26 15:57:07 -070038 .mmap = generic_file_readonly_mmap,
Jens Axboe5ffc4ef2007-06-01 11:49:19 +020039 .splice_read = generic_file_splice_read,
David Howells31143d52007-05-09 02:33:46 -070040 .fsync = afs_fsync,
David Howellse8d6c552007-07-15 23:40:12 -070041 .lock = afs_lock,
42 .flock = afs_flock,
David Howells00d3b7a2007-04-26 15:57:07 -070043};
44
Arjan van de Ven754661f2007-02-12 00:55:38 -080045const struct inode_operations afs_file_inode_operations = {
David Howells416351f2007-05-09 02:33:45 -070046 .getattr = afs_getattr,
David Howells31143d52007-05-09 02:33:46 -070047 .setattr = afs_setattr,
David Howells00d3b7a2007-04-26 15:57:07 -070048 .permission = afs_permission,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049};
50
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070051const struct address_space_operations afs_fs_aops = {
David Howells416351f2007-05-09 02:33:45 -070052 .readpage = afs_readpage,
David Howells9b3f26c2009-04-03 16:42:41 +010053 .readpages = afs_readpages,
David Howells31143d52007-05-09 02:33:46 -070054 .set_page_dirty = afs_set_page_dirty,
55 .launder_page = afs_launder_page,
David Howells416351f2007-05-09 02:33:45 -070056 .releasepage = afs_releasepage,
57 .invalidatepage = afs_invalidatepage,
Nick Piggin15b46502008-10-15 22:04:32 -070058 .write_begin = afs_write_begin,
59 .write_end = afs_write_end,
David Howells31143d52007-05-09 02:33:46 -070060 .writepage = afs_writepage,
61 .writepages = afs_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -070062};
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
David Howells00d3b7a2007-04-26 15:57:07 -070065 * open an AFS file or directory and attach a key to it
66 */
67int afs_open(struct inode *inode, struct file *file)
68{
69 struct afs_vnode *vnode = AFS_FS_I(inode);
70 struct key *key;
David Howells260a9802007-04-26 15:59:35 -070071 int ret;
David Howells00d3b7a2007-04-26 15:57:07 -070072
David Howells416351f2007-05-09 02:33:45 -070073 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
David Howells00d3b7a2007-04-26 15:57:07 -070074
75 key = afs_request_key(vnode->volume->cell);
76 if (IS_ERR(key)) {
77 _leave(" = %ld [key]", PTR_ERR(key));
78 return PTR_ERR(key);
79 }
80
David Howells260a9802007-04-26 15:59:35 -070081 ret = afs_validate(vnode, key);
82 if (ret < 0) {
83 _leave(" = %d [val]", ret);
84 return ret;
85 }
86
David Howells00d3b7a2007-04-26 15:57:07 -070087 file->private_data = key;
88 _leave(" = 0");
89 return 0;
90}
91
92/*
93 * release an AFS file or directory and discard its key
94 */
95int afs_release(struct inode *inode, struct file *file)
96{
97 struct afs_vnode *vnode = AFS_FS_I(inode);
98
David Howells416351f2007-05-09 02:33:45 -070099 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
David Howells00d3b7a2007-04-26 15:57:07 -0700100
101 key_put(file->private_data);
102 _leave(" = 0");
103 return 0;
104}
105
David Howells196ee9c2017-01-05 10:38:34 +0000106/*
107 * Dispose of a ref to a read record.
108 */
109void afs_put_read(struct afs_read *req)
110{
111 int i;
112
113 if (atomic_dec_and_test(&req->usage)) {
114 for (i = 0; i < req->nr_pages; i++)
115 if (req->pages[i])
116 put_page(req->pages[i]);
117 kfree(req);
118 }
119}
120
Matt Kraai6566abd2009-04-17 12:56:38 +0100121#ifdef CONFIG_AFS_FSCACHE
David Howells00d3b7a2007-04-26 15:57:07 -0700122/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 * deal with notification that a page was read from the cache
124 */
David Howells9b3f26c2009-04-03 16:42:41 +0100125static void afs_file_readpage_read_complete(struct page *page,
126 void *data,
127 int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
David Howells9b3f26c2009-04-03 16:42:41 +0100129 _enter("%p,%p,%d", page, data, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
David Howells9b3f26c2009-04-03 16:42:41 +0100131 /* if the read completes with an error, we just unlock the page and let
132 * the VM reissue the readpage */
133 if (!error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 SetPageUptodate(page);
135 unlock_page(page);
David Howellsec268152007-04-26 15:49:28 -0700136}
Matt Kraai6566abd2009-04-17 12:56:38 +0100137#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
Al Virof6d335c2010-05-21 15:27:09 +0100140 * read page from file, directory or symlink, given a key to use
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 */
Al Virof6d335c2010-05-21 15:27:09 +0100142int afs_page_filler(void *data, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Al Virof6d335c2010-05-21 15:27:09 +0100144 struct inode *inode = page->mapping->host;
145 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells196ee9c2017-01-05 10:38:34 +0000146 struct afs_read *req;
Al Virof6d335c2010-05-21 15:27:09 +0100147 struct key *key = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 int ret;
149
David Howells00d3b7a2007-04-26 15:57:07 -0700150 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Matt Mackallcd7619d2005-05-01 08:59:01 -0700152 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 ret = -ESTALE;
David Howells08e0e7c2007-04-26 15:55:03 -0700155 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 goto error;
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 /* is it cached? */
David Howells9b3f26c2009-04-03 16:42:41 +0100159#ifdef CONFIG_AFS_FSCACHE
160 ret = fscache_read_or_alloc_page(vnode->cache,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 page,
162 afs_file_readpage_read_complete,
163 NULL,
164 GFP_KERNEL);
165#else
166 ret = -ENOBUFS;
167#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 switch (ret) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 /* read BIO submitted (page in cache) */
170 case 0:
171 break;
172
David Howells9b3f26c2009-04-03 16:42:41 +0100173 /* page not yet cached */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 case -ENODATA:
David Howells9b3f26c2009-04-03 16:42:41 +0100175 _debug("cache said ENODATA");
176 goto go_on;
177
178 /* page will not be cached */
179 case -ENOBUFS:
180 _debug("cache said ENOBUFS");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 default:
David Howells9b3f26c2009-04-03 16:42:41 +0100182 go_on:
David Howells196ee9c2017-01-05 10:38:34 +0000183 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
184 GFP_KERNEL);
185 if (!req)
186 goto enomem;
187
David Howells6db3ac32017-03-16 16:27:44 +0000188 /* We request a full page. If the page is a partial one at the
189 * end of the file, the server will return a short read and the
190 * unmarshalling code will clear the unfilled space.
191 */
David Howells196ee9c2017-01-05 10:38:34 +0000192 atomic_set(&req->usage, 1);
193 req->pos = (loff_t)page->index << PAGE_SHIFT;
David Howells6db3ac32017-03-16 16:27:44 +0000194 req->len = PAGE_SIZE;
David Howells196ee9c2017-01-05 10:38:34 +0000195 req->nr_pages = 1;
196 req->pages[0] = page;
197 get_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 /* read the contents of the file from the server into the
200 * page */
David Howells196ee9c2017-01-05 10:38:34 +0000201 ret = afs_vnode_fetch_data(vnode, key, req);
202 afs_put_read(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 if (ret < 0) {
David Howells08e0e7c2007-04-26 15:55:03 -0700204 if (ret == -ENOENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 _debug("got NOENT from server"
206 " - marking file deleted and stale");
David Howells08e0e7c2007-04-26 15:55:03 -0700207 set_bit(AFS_VNODE_DELETED, &vnode->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 ret = -ESTALE;
209 }
David Howells9b3f26c2009-04-03 16:42:41 +0100210
211#ifdef CONFIG_AFS_FSCACHE
212 fscache_uncache_page(vnode->cache, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213#endif
David Howells9b3f26c2009-04-03 16:42:41 +0100214 BUG_ON(PageFsCache(page));
David Howells68ae8492017-03-16 16:27:48 +0000215
216 if (ret == -EINTR ||
217 ret == -ENOMEM ||
218 ret == -ERESTARTSYS ||
219 ret == -EAGAIN)
220 goto error;
221 goto io_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 }
223
224 SetPageUptodate(page);
225
David Howells9b3f26c2009-04-03 16:42:41 +0100226 /* send the page to the cache */
227#ifdef CONFIG_AFS_FSCACHE
228 if (PageFsCache(page) &&
229 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
230 fscache_uncache_page(vnode->cache, page);
231 BUG_ON(PageFsCache(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233#endif
David Howells9b3f26c2009-04-03 16:42:41 +0100234 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 }
236
237 _leave(" = 0");
238 return 0;
239
David Howells68ae8492017-03-16 16:27:48 +0000240io_error:
241 SetPageError(page);
242 goto error;
David Howells196ee9c2017-01-05 10:38:34 +0000243enomem:
244 ret = -ENOMEM;
David Howells08e0e7c2007-04-26 15:55:03 -0700245error:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 _leave(" = %d", ret);
248 return ret;
David Howellsec268152007-04-26 15:49:28 -0700249}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/*
Al Virof6d335c2010-05-21 15:27:09 +0100252 * read page from file, directory or symlink, given a file to nominate the key
253 * to be used
254 */
255static int afs_readpage(struct file *file, struct page *page)
256{
257 struct key *key;
258 int ret;
259
260 if (file) {
261 key = file->private_data;
262 ASSERT(key != NULL);
263 ret = afs_page_filler(key, page);
264 } else {
265 struct inode *inode = page->mapping->host;
266 key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
267 if (IS_ERR(key)) {
268 ret = PTR_ERR(key);
269 } else {
270 ret = afs_page_filler(key, page);
271 key_put(key);
272 }
273 }
274 return ret;
275}
276
277/*
David Howells91b467e2017-01-05 10:38:35 +0000278 * Make pages available as they're filled.
279 */
280static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
281{
Arnd Bergmann51c89e62017-01-13 14:46:19 +0000282#ifdef CONFIG_AFS_FSCACHE
David Howells91b467e2017-01-05 10:38:35 +0000283 struct afs_vnode *vnode = call->reply;
Arnd Bergmann51c89e62017-01-13 14:46:19 +0000284#endif
David Howells91b467e2017-01-05 10:38:35 +0000285 struct page *page = req->pages[req->index];
286
287 req->pages[req->index] = NULL;
288 SetPageUptodate(page);
289
290 /* send the page to the cache */
291#ifdef CONFIG_AFS_FSCACHE
292 if (PageFsCache(page) &&
293 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
294 fscache_uncache_page(vnode->cache, page);
295 BUG_ON(PageFsCache(page));
296 }
297#endif
298 unlock_page(page);
299 put_page(page);
300}
301
302/*
303 * Read a contiguous set of pages.
304 */
305static int afs_readpages_one(struct file *file, struct address_space *mapping,
306 struct list_head *pages)
307{
308 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
309 struct afs_read *req;
310 struct list_head *p;
311 struct page *first, *page;
312 struct key *key = file->private_data;
313 pgoff_t index;
314 int ret, n, i;
315
316 /* Count the number of contiguous pages at the front of the list. Note
317 * that the list goes prev-wards rather than next-wards.
318 */
319 first = list_entry(pages->prev, struct page, lru);
320 index = first->index + 1;
321 n = 1;
322 for (p = first->lru.prev; p != pages; p = p->prev) {
323 page = list_entry(p, struct page, lru);
324 if (page->index != index)
325 break;
326 index++;
327 n++;
328 }
329
330 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
331 GFP_NOFS);
332 if (!req)
333 return -ENOMEM;
334
335 atomic_set(&req->usage, 1);
336 req->page_done = afs_readpages_page_done;
337 req->pos = first->index;
338 req->pos <<= PAGE_SHIFT;
339
340 /* Transfer the pages to the request. We add them in until one fails
341 * to add to the LRU and then we stop (as that'll make a hole in the
342 * contiguous run.
343 *
344 * Note that it's possible for the file size to change whilst we're
345 * doing this, but we rely on the server returning less than we asked
346 * for if the file shrank. We also rely on this to deal with a partial
347 * page at the end of the file.
348 */
349 do {
350 page = list_entry(pages->prev, struct page, lru);
351 list_del(&page->lru);
352 index = page->index;
353 if (add_to_page_cache_lru(page, mapping, index,
354 readahead_gfp_mask(mapping))) {
355#ifdef CONFIG_AFS_FSCACHE
356 fscache_uncache_page(vnode->cache, page);
357#endif
358 put_page(page);
359 break;
360 }
361
362 req->pages[req->nr_pages++] = page;
363 req->len += PAGE_SIZE;
364 } while (req->nr_pages < n);
365
366 if (req->nr_pages == 0) {
367 kfree(req);
368 return 0;
369 }
370
371 ret = afs_vnode_fetch_data(vnode, key, req);
372 if (ret < 0)
373 goto error;
374
375 task_io_account_read(PAGE_SIZE * req->nr_pages);
376 afs_put_read(req);
377 return 0;
378
379error:
380 if (ret == -ENOENT) {
381 _debug("got NOENT from server"
382 " - marking file deleted and stale");
383 set_bit(AFS_VNODE_DELETED, &vnode->flags);
384 ret = -ESTALE;
385 }
386
387 for (i = 0; i < req->nr_pages; i++) {
388 page = req->pages[i];
389 if (page) {
390#ifdef CONFIG_AFS_FSCACHE
391 fscache_uncache_page(vnode->cache, page);
392#endif
393 SetPageError(page);
394 unlock_page(page);
395 }
396 }
397
398 afs_put_read(req);
399 return ret;
400}
401
402/*
David Howells9b3f26c2009-04-03 16:42:41 +0100403 * read a set of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 */
David Howells9b3f26c2009-04-03 16:42:41 +0100405static int afs_readpages(struct file *file, struct address_space *mapping,
406 struct list_head *pages, unsigned nr_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407{
Al Virof6d335c2010-05-21 15:27:09 +0100408 struct key *key = file->private_data;
David Howells9b3f26c2009-04-03 16:42:41 +0100409 struct afs_vnode *vnode;
410 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Al Virof6d335c2010-05-21 15:27:09 +0100412 _enter("{%d},{%lu},,%d",
413 key_serial(key), mapping->host->i_ino, nr_pages);
414
415 ASSERT(key != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
David Howells9b3f26c2009-04-03 16:42:41 +0100417 vnode = AFS_FS_I(mapping->host);
Dan Carpenterad2a8e62012-03-20 16:58:06 +0000418 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
David Howells9b3f26c2009-04-03 16:42:41 +0100419 _leave(" = -ESTALE");
420 return -ESTALE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 }
422
David Howells9b3f26c2009-04-03 16:42:41 +0100423 /* attempt to read as many of the pages as possible */
424#ifdef CONFIG_AFS_FSCACHE
425 ret = fscache_read_or_alloc_pages(vnode->cache,
426 mapping,
427 pages,
428 &nr_pages,
429 afs_file_readpage_read_complete,
430 NULL,
431 mapping_gfp_mask(mapping));
432#else
433 ret = -ENOBUFS;
434#endif
435
436 switch (ret) {
437 /* all pages are being read from the cache */
438 case 0:
439 BUG_ON(!list_empty(pages));
440 BUG_ON(nr_pages != 0);
441 _leave(" = 0 [reading all]");
442 return 0;
443
444 /* there were pages that couldn't be read from the cache */
445 case -ENODATA:
446 case -ENOBUFS:
447 break;
448
449 /* other error */
450 default:
451 _leave(" = %d", ret);
452 return ret;
453 }
454
David Howells91b467e2017-01-05 10:38:35 +0000455 while (!list_empty(pages)) {
456 ret = afs_readpages_one(file, mapping, pages);
457 if (ret < 0)
458 break;
459 }
David Howells9b3f26c2009-04-03 16:42:41 +0100460
461 _leave(" = %d [netting]", ret);
462 return ret;
David Howellsec268152007-04-26 15:49:28 -0700463}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465/*
David Howells31143d52007-05-09 02:33:46 -0700466 * write back a dirty page
467 */
468static int afs_launder_page(struct page *page)
469{
470 _enter("{%lu}", page->index);
471
472 return 0;
473}
474
475/*
David Howells9b3f26c2009-04-03 16:42:41 +0100476 * invalidate part or all of a page
477 * - release a page and clean up its private data if offset is 0 (indicating
478 * the entire page)
479 */
Lukas Czernerd47992f2013-05-21 23:17:23 -0400480static void afs_invalidatepage(struct page *page, unsigned int offset,
481 unsigned int length)
David Howells9b3f26c2009-04-03 16:42:41 +0100482{
483 struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
484
Lukas Czernerd47992f2013-05-21 23:17:23 -0400485 _enter("{%lu},%u,%u", page->index, offset, length);
David Howells9b3f26c2009-04-03 16:42:41 +0100486
487 BUG_ON(!PageLocked(page));
488
489 /* we clean up only if the entire page is being invalidated */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300490 if (offset == 0 && length == PAGE_SIZE) {
David Howells9b3f26c2009-04-03 16:42:41 +0100491#ifdef CONFIG_AFS_FSCACHE
492 if (PageFsCache(page)) {
493 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
494 fscache_wait_on_page_write(vnode->cache, page);
495 fscache_uncache_page(vnode->cache, page);
David Howells9b3f26c2009-04-03 16:42:41 +0100496 }
497#endif
498
499 if (PagePrivate(page)) {
500 if (wb && !PageWriteback(page)) {
501 set_page_private(page, 0);
502 afs_put_writeback(wb);
503 }
504
505 if (!page_private(page))
506 ClearPagePrivate(page);
507 }
508 }
509
510 _leave("");
511}
512
513/*
514 * release a page and clean up its private state if it's not busy
515 * - return true if the page can now be released, false if not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 */
David Howells416351f2007-05-09 02:33:45 -0700517static int afs_releasepage(struct page *page, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
David Howells9b3f26c2009-04-03 16:42:41 +0100519 struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
David Howells416351f2007-05-09 02:33:45 -0700520 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
David Howells416351f2007-05-09 02:33:45 -0700522 _enter("{{%x:%u}[%lu],%lx},%x",
523 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
524 gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
David Howells9b3f26c2009-04-03 16:42:41 +0100526 /* deny if page is being written to the cache and the caller hasn't
527 * elected to wait */
528#ifdef CONFIG_AFS_FSCACHE
David Howells201a1542009-11-19 18:11:35 +0000529 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
530 _leave(" = F [cache busy]");
531 return 0;
David Howells9b3f26c2009-04-03 16:42:41 +0100532 }
533#endif
534
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 if (PagePrivate(page)) {
David Howells9b3f26c2009-04-03 16:42:41 +0100536 if (wb) {
537 set_page_private(page, 0);
538 afs_put_writeback(wb);
539 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 ClearPagePrivate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
542
David Howells9b3f26c2009-04-03 16:42:41 +0100543 /* indicate that the page can be released */
544 _leave(" = T");
545 return 1;
David Howellsec268152007-04-26 15:49:28 -0700546}