blob: e56e846e9d2d61ebef728826a9125bcab61c0597 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/nfs/pagelist.c
3 *
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
7 *
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9 *
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/slab.h>
13#include <linux/file.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040014#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/sunrpc/clnt.h>
Trond Myklebust1313e602012-01-17 22:04:24 -050016#include <linux/nfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/nfs3.h>
18#include <linux/nfs4.h>
19#include <linux/nfs_page.h>
20#include <linux/nfs_fs.h>
21#include <linux/nfs_mount.h>
Paul Gortmakerafeacc82011-05-26 16:00:52 -040022#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Trond Myklebust8d5658c2007-04-10 09:26:35 -040024#include "internal.h"
Fred Isamanbae724e2011-03-01 01:34:15 +000025#include "pnfs.h"
Trond Myklebust8d5658c2007-04-10 09:26:35 -040026
Christoph Lametere18b8902006-12-06 20:33:20 -080027static struct kmem_cache *nfs_page_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Fred Isaman30dd3742012-04-20 14:47:45 -040029bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
30{
31 p->npages = pagecount;
32 if (pagecount <= ARRAY_SIZE(p->page_array))
33 p->pagevec = p->page_array;
34 else {
35 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
36 if (!p->pagevec)
37 p->npages = 0;
38 }
39 return p->pagevec != NULL;
40}
41
Fred Isaman4db6e0b2012-04-20 14:47:46 -040042void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
43 struct nfs_pgio_header *hdr,
44 void (*release)(struct nfs_pgio_header *hdr))
45{
46 hdr->req = nfs_list_entry(desc->pg_list.next);
47 hdr->inode = desc->pg_inode;
48 hdr->cred = hdr->req->wb_context->cred;
49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count;
Fred Isaman584aa812012-04-20 14:47:51 -040051 hdr->dreq = desc->pg_dreq;
Peng Taof6166382012-08-02 15:36:09 +030052 hdr->layout_private = desc->pg_layout_private;
Fred Isaman4db6e0b2012-04-20 14:47:46 -040053 hdr->release = release;
Fred Isaman061ae2e2012-04-20 14:47:48 -040054 hdr->completion_ops = desc->pg_completion_ops;
Fred Isaman584aa812012-04-20 14:47:51 -040055 if (hdr->completion_ops->init_hdr)
56 hdr->completion_ops->init_hdr(hdr);
Fred Isaman4db6e0b2012-04-20 14:47:46 -040057}
Bryan Schumaker89d77c82012-07-30 16:05:25 -040058EXPORT_SYMBOL_GPL(nfs_pgheader_init);
Fred Isaman4db6e0b2012-04-20 14:47:46 -040059
60void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
61{
62 spin_lock(&hdr->lock);
63 if (pos < hdr->io_start + hdr->good_bytes) {
64 set_bit(NFS_IOHDR_ERROR, &hdr->flags);
65 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
66 hdr->good_bytes = pos - hdr->io_start;
67 hdr->error = error;
68 }
69 spin_unlock(&hdr->lock);
70}
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072static inline struct nfs_page *
73nfs_page_alloc(void)
74{
Mel Gorman192e5012012-07-31 16:45:16 -070075 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
Jesper Juhl72895b12010-12-09 23:17:15 +010076 if (p)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 INIT_LIST_HEAD(&p->wb_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 return p;
79}
80
81static inline void
82nfs_page_free(struct nfs_page *p)
83{
84 kmem_cache_free(nfs_page_cachep, p);
85}
86
87/**
88 * nfs_create_request - Create an NFS read/write request.
Chuck Leverc02f5572011-10-25 12:17:43 -040089 * @ctx: open context to use
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 * @inode: inode to which the request is attached
91 * @page: page to write
92 * @offset: starting offset within the page for the write
93 * @count: number of bytes to read/write
94 *
95 * The page must be locked by the caller. This makes sure we never
Jason Uhlenkotta19b89c2007-04-26 17:25:51 -070096 * create two different requests for the same page.
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 * User should ensure it is safe to sleep in this function.
98 */
99struct nfs_page *
100nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
101 struct page *page,
102 unsigned int offset, unsigned int count)
103{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 struct nfs_page *req;
Trond Myklebustb3c54de2012-08-13 17:15:50 -0400105 struct nfs_lock_context *l_ctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Trond Myklebust18eb8842010-05-13 12:51:02 -0400107 /* try to allocate the request struct */
108 req = nfs_page_alloc();
109 if (req == NULL)
110 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Jeff Layton015f0212010-10-28 10:10:37 -0400112 /* get lock context early so we can deal with alloc failures */
Trond Myklebustb3c54de2012-08-13 17:15:50 -0400113 l_ctx = nfs_get_lock_context(ctx);
114 if (IS_ERR(l_ctx)) {
Jeff Layton015f0212010-10-28 10:10:37 -0400115 nfs_page_free(req);
Trond Myklebustb3c54de2012-08-13 17:15:50 -0400116 return ERR_CAST(l_ctx);
Jeff Layton015f0212010-10-28 10:10:37 -0400117 }
Trond Myklebustb3c54de2012-08-13 17:15:50 -0400118 req->wb_lock_context = l_ctx;
Jeff Layton015f0212010-10-28 10:10:37 -0400119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 /* Initialize the request struct. Initially, we assume a
121 * long write-back delay. This will be adjusted in
122 * update_nfs_request below if the region is not locked. */
123 req->wb_page = page;
Mel Gormand56b4dd2012-07-31 16:45:06 -0700124 req->wb_index = page_file_index(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 page_cache_get(page);
126 req->wb_offset = offset;
127 req->wb_pgbase = offset;
128 req->wb_bytes = count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 req->wb_context = get_nfs_open_context(ctx);
Trond Myklebustc03b4022007-06-17 13:26:38 -0400130 kref_init(&req->wb_kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 return req;
132}
133
134/**
Trond Myklebust1d1afcb2012-05-09 14:04:55 -0400135 * nfs_unlock_request - Unlock request and wake up sleepers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 * @req:
137 */
Trond Myklebust1d1afcb2012-05-09 14:04:55 -0400138void nfs_unlock_request(struct nfs_page *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
140 if (!NFS_WBACK_BUSY(req)) {
141 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
142 BUG();
143 }
144 smp_mb__before_clear_bit();
145 clear_bit(PG_BUSY, &req->wb_flags);
146 smp_mb__after_clear_bit();
Trond Myklebust464a98b2005-06-22 17:16:21 +0000147 wake_up_bit(&req->wb_flags, PG_BUSY);
Trond Myklebust3aff4eb2012-05-09 14:30:35 -0400148}
149
150/**
Trond Myklebust1d1afcb2012-05-09 14:04:55 -0400151 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
152 * @req:
Trond Myklebust3aff4eb2012-05-09 14:30:35 -0400153 */
Trond Myklebust1d1afcb2012-05-09 14:04:55 -0400154void nfs_unlock_and_release_request(struct nfs_page *req)
Trond Myklebust3aff4eb2012-05-09 14:30:35 -0400155{
Trond Myklebust1d1afcb2012-05-09 14:04:55 -0400156 nfs_unlock_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 nfs_release_request(req);
158}
159
Trond Myklebust4d65c522011-03-25 14:15:11 -0400160/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 * nfs_clear_request - Free up all resources allocated to the request
162 * @req:
163 *
Trond Myklebustbb6fbc42010-03-11 09:19:35 -0500164 * Release page and open context resources associated with a read/write
165 * request after it has completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 */
Trond Myklebust4d65c522011-03-25 14:15:11 -0400167static void nfs_clear_request(struct nfs_page *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500169 struct page *page = req->wb_page;
Trond Myklebustbb6fbc42010-03-11 09:19:35 -0500170 struct nfs_open_context *ctx = req->wb_context;
Trond Myklebustf11ac8d2010-06-25 16:35:53 -0400171 struct nfs_lock_context *l_ctx = req->wb_lock_context;
Trond Myklebustbb6fbc42010-03-11 09:19:35 -0500172
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500173 if (page != NULL) {
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500174 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 req->wb_page = NULL;
176 }
Trond Myklebustf11ac8d2010-06-25 16:35:53 -0400177 if (l_ctx != NULL) {
178 nfs_put_lock_context(l_ctx);
179 req->wb_lock_context = NULL;
180 }
Trond Myklebustbb6fbc42010-03-11 09:19:35 -0500181 if (ctx != NULL) {
182 put_nfs_open_context(ctx);
183 req->wb_context = NULL;
184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
187
188/**
189 * nfs_release_request - Release the count on an NFS read/write request
190 * @req: request to release
191 *
192 * Note: Should never be called with the spinlock held!
193 */
Trond Myklebustc03b4022007-06-17 13:26:38 -0400194static void nfs_free_request(struct kref *kref)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Trond Myklebustc03b4022007-06-17 13:26:38 -0400196 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Trond Myklebustbb6fbc42010-03-11 09:19:35 -0500198 /* Release struct file and open context */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 nfs_clear_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 nfs_page_free(req);
201}
202
Trond Myklebustc03b4022007-06-17 13:26:38 -0400203void nfs_release_request(struct nfs_page *req)
204{
205 kref_put(&req->wb_kref, nfs_free_request);
206}
207
Trond Myklebust9f557cd2010-02-03 08:27:22 -0500208static int nfs_wait_bit_uninterruptible(void *word)
209{
210 io_schedule();
211 return 0;
212}
213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214/**
215 * nfs_wait_on_request - Wait for a request to complete.
216 * @req: request to wait upon.
217 *
Matthew Wilcox150030b2007-12-06 16:24:39 -0500218 * Interruptible by fatal signals only.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 * The user is responsible for holding a count on the request.
220 */
221int
222nfs_wait_on_request(struct nfs_page *req)
223{
Trond Myklebust9f557cd2010-02-03 08:27:22 -0500224 return wait_on_bit(&req->wb_flags, PG_BUSY,
225 nfs_wait_bit_uninterruptible,
226 TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
Benny Halevy19345cb2011-06-19 18:33:46 -0400229bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
Boaz Harrosh5b36c7d2011-05-29 11:45:39 +0300230{
231 /*
232 * FIXME: ideally we should be able to coalesce all requests
233 * that are not block boundary aligned, but currently this
234 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
235 * since nfs_flush_multi and nfs_pagein_multi assume you
236 * can have only one struct nfs_page.
237 */
238 if (desc->pg_bsize < PAGE_SIZE)
239 return 0;
240
241 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
242}
Benny Halevy19345cb2011-06-19 18:33:46 -0400243EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
Boaz Harrosh5b36c7d2011-05-29 11:45:39 +0300244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245/**
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400246 * nfs_pageio_init - initialise a page io descriptor
247 * @desc: pointer to descriptor
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400248 * @inode: pointer to inode
249 * @doio: pointer to io function
250 * @bsize: io block size
251 * @io_flags: extra parameters for the io function
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400252 */
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400253void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
254 struct inode *inode,
Trond Myklebust1751c362011-06-10 13:30:23 -0400255 const struct nfs_pageio_ops *pg_ops,
Fred Isaman061ae2e2012-04-20 14:47:48 -0400256 const struct nfs_pgio_completion_ops *compl_ops,
Trond Myklebust84dde762007-05-04 14:44:06 -0400257 size_t bsize,
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400258 int io_flags)
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400259{
260 INIT_LIST_HEAD(&desc->pg_list);
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400261 desc->pg_bytes_written = 0;
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400262 desc->pg_count = 0;
263 desc->pg_bsize = bsize;
264 desc->pg_base = 0;
Trond Myklebustb31268a2011-03-21 17:02:00 -0400265 desc->pg_moreio = 0;
Trond Myklebustd9156f92011-07-12 13:42:02 -0400266 desc->pg_recoalesce = 0;
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400267 desc->pg_inode = inode;
Trond Myklebust1751c362011-06-10 13:30:23 -0400268 desc->pg_ops = pg_ops;
Fred Isaman061ae2e2012-04-20 14:47:48 -0400269 desc->pg_completion_ops = compl_ops;
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400270 desc->pg_ioflags = io_flags;
271 desc->pg_error = 0;
Fred Isaman94ad1c82011-03-01 01:34:14 +0000272 desc->pg_lseg = NULL;
Fred Isaman584aa812012-04-20 14:47:51 -0400273 desc->pg_dreq = NULL;
Peng Taof6166382012-08-02 15:36:09 +0300274 desc->pg_layout_private = NULL;
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400275}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400276EXPORT_SYMBOL_GPL(nfs_pageio_init);
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400277
278/**
279 * nfs_can_coalesce_requests - test two requests for compatibility
280 * @prev: pointer to nfs_page
281 * @req: pointer to nfs_page
282 *
283 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
284 * page data area they describe is contiguous, and that their RPC
285 * credentials, NFSv4 open state, and lockowners are the same.
286 *
287 * Return 'true' if this is the case, else return 'false'.
288 */
Benny Halevy18ad0a92011-05-25 21:03:56 +0300289static bool nfs_can_coalesce_requests(struct nfs_page *prev,
290 struct nfs_page *req,
291 struct nfs_pageio_descriptor *pgio)
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400292{
293 if (req->wb_context->cred != prev->wb_context->cred)
Benny Halevy18ad0a92011-05-25 21:03:56 +0300294 return false;
Trond Myklebust2a369152012-08-13 18:54:45 -0400295 if (req->wb_lock_context->lockowner.l_owner != prev->wb_lock_context->lockowner.l_owner)
296 return false;
297 if (req->wb_lock_context->lockowner.l_pid != prev->wb_lock_context->lockowner.l_pid)
Benny Halevy18ad0a92011-05-25 21:03:56 +0300298 return false;
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400299 if (req->wb_context->state != prev->wb_context->state)
Benny Halevy18ad0a92011-05-25 21:03:56 +0300300 return false;
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400301 if (req->wb_pgbase != 0)
Benny Halevy18ad0a92011-05-25 21:03:56 +0300302 return false;
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400303 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
Benny Halevy18ad0a92011-05-25 21:03:56 +0300304 return false;
Fred Isaman1825a0d2012-04-20 19:55:31 -0400305 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
306 return false;
Trond Myklebust1751c362011-06-10 13:30:23 -0400307 return pgio->pg_ops->pg_test(pgio, prev, req);
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400308}
309
310/**
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400311 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400312 * @desc: destination io descriptor
313 * @req: request
314 *
315 * Returns true if the request 'req' was successfully coalesced into the
316 * existing list of pages 'desc'.
317 */
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400318static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
319 struct nfs_page *req)
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400320{
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400321 if (desc->pg_count != 0) {
322 struct nfs_page *prev;
323
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400324 prev = nfs_list_entry(desc->pg_list.prev);
Fred Isaman94ad1c82011-03-01 01:34:14 +0000325 if (!nfs_can_coalesce_requests(prev, req, desc))
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400326 return 0;
Boaz Harrosh5b36c7d2011-05-29 11:45:39 +0300327 } else {
Trond Myklebustd8007d42011-06-10 13:30:23 -0400328 if (desc->pg_ops->pg_init)
329 desc->pg_ops->pg_init(desc, req);
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400330 desc->pg_base = req->wb_pgbase;
Boaz Harrosh5b36c7d2011-05-29 11:45:39 +0300331 }
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400332 nfs_list_remove_request(req);
333 nfs_list_add_request(req, &desc->pg_list);
Boaz Harrosh5b36c7d2011-05-29 11:45:39 +0300334 desc->pg_count += req->wb_bytes;
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400335 return 1;
336}
337
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400338/*
339 * Helper for nfs_pageio_add_request and nfs_pageio_complete
340 */
341static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
342{
343 if (!list_empty(&desc->pg_list)) {
Trond Myklebust1751c362011-06-10 13:30:23 -0400344 int error = desc->pg_ops->pg_doio(desc);
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400345 if (error < 0)
346 desc->pg_error = error;
347 else
348 desc->pg_bytes_written += desc->pg_count;
349 }
350 if (list_empty(&desc->pg_list)) {
351 desc->pg_count = 0;
352 desc->pg_base = 0;
353 }
354}
355
356/**
357 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
358 * @desc: destination io descriptor
359 * @req: request
360 *
361 * Returns true if the request 'req' was successfully coalesced into the
362 * existing list of pages 'desc'.
363 */
Trond Myklebustd9156f92011-07-12 13:42:02 -0400364static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
Trond Myklebust8b09bee2007-04-02 18:48:28 -0400365 struct nfs_page *req)
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400366{
367 while (!nfs_pageio_do_add_request(desc, req)) {
Trond Myklebustb31268a2011-03-21 17:02:00 -0400368 desc->pg_moreio = 1;
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400369 nfs_pageio_doio(desc);
370 if (desc->pg_error < 0)
371 return 0;
Trond Myklebustb31268a2011-03-21 17:02:00 -0400372 desc->pg_moreio = 0;
Trond Myklebustd9156f92011-07-12 13:42:02 -0400373 if (desc->pg_recoalesce)
374 return 0;
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400375 }
376 return 1;
377}
378
Trond Myklebustd9156f92011-07-12 13:42:02 -0400379static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
380{
381 LIST_HEAD(head);
382
383 do {
384 list_splice_init(&desc->pg_list, &head);
385 desc->pg_bytes_written -= desc->pg_count;
386 desc->pg_count = 0;
387 desc->pg_base = 0;
388 desc->pg_recoalesce = 0;
389
390 while (!list_empty(&head)) {
391 struct nfs_page *req;
392
393 req = list_first_entry(&head, struct nfs_page, wb_list);
394 nfs_list_remove_request(req);
395 if (__nfs_pageio_add_request(desc, req))
396 continue;
397 if (desc->pg_error < 0)
398 return 0;
399 break;
400 }
401 } while (desc->pg_recoalesce);
402 return 1;
403}
404
405int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
406 struct nfs_page *req)
407{
408 int ret;
409
410 do {
411 ret = __nfs_pageio_add_request(desc, req);
412 if (ret)
413 break;
414 if (desc->pg_error < 0)
415 break;
416 ret = nfs_do_recoalesce(desc);
417 } while (ret);
418 return ret;
419}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400420EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
Trond Myklebustd9156f92011-07-12 13:42:02 -0400421
Trond Myklebustd8a5ad72007-04-02 18:48:28 -0400422/**
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400423 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
424 * @desc: pointer to io descriptor
425 */
426void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
427{
Trond Myklebustd9156f92011-07-12 13:42:02 -0400428 for (;;) {
429 nfs_pageio_doio(desc);
430 if (!desc->pg_recoalesce)
431 break;
432 if (!nfs_do_recoalesce(desc))
433 break;
434 }
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400435}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400436EXPORT_SYMBOL_GPL(nfs_pageio_complete);
Trond Myklebustbcb71bb2007-04-02 18:48:28 -0400437
Trond Myklebust7fe7f842007-05-20 10:18:27 -0400438/**
439 * nfs_pageio_cond_complete - Conditional I/O completion
440 * @desc: pointer to io descriptor
441 * @index: page index
442 *
443 * It is important to ensure that processes don't try to take locks
444 * on non-contiguous ranges of pages as that might deadlock. This
445 * function should be called before attempting to wait on a locked
446 * nfs_page. It will complete the I/O if the page index 'index'
447 * is not contiguous with the existing list of pages in 'desc'.
448 */
449void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
450{
451 if (!list_empty(&desc->pg_list)) {
452 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
453 if (index != prev->wb_index + 1)
Trond Myklebustd9156f92011-07-12 13:42:02 -0400454 nfs_pageio_complete(desc);
Trond Myklebust7fe7f842007-05-20 10:18:27 -0400455 }
456}
457
David Howellsf7b422b2006-06-09 09:34:33 -0400458int __init nfs_init_nfspagecache(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
460 nfs_page_cachep = kmem_cache_create("nfs_page",
461 sizeof(struct nfs_page),
462 0, SLAB_HWCACHE_ALIGN,
Paul Mundt20c2df82007-07-20 10:11:58 +0900463 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 if (nfs_page_cachep == NULL)
465 return -ENOMEM;
466
467 return 0;
468}
469
David Brownell266bee82006-06-27 12:59:15 -0700470void nfs_destroy_nfspagecache(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471{
Alexey Dobriyan1a1d92c2006-09-27 01:49:40 -0700472 kmem_cache_destroy(nfs_page_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473}
474