blob: 5130eda231d7a0513e887a3138e26aef3b1f5cbc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/nfs/write.c
3 *
4 * Writing file data over NFS.
5 *
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
12 *
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
14 *
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
20 *
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
24 *
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
28 *
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
32 *
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
35 *
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
40 *
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
45 *
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
47 */
48
49#include <linux/config.h>
50#include <linux/types.h>
51#include <linux/slab.h>
52#include <linux/mm.h>
53#include <linux/pagemap.h>
54#include <linux/file.h>
55#include <linux/mpage.h>
56#include <linux/writeback.h>
57
58#include <linux/sunrpc/clnt.h>
59#include <linux/nfs_fs.h>
60#include <linux/nfs_mount.h>
61#include <linux/nfs_page.h>
62#include <asm/uaccess.h>
63#include <linux/smp_lock.h>
64
65#include "delegation.h"
66
67#define NFSDBG_FACILITY NFSDBG_PAGECACHE
68
69#define MIN_POOL_WRITE (32)
70#define MIN_POOL_COMMIT (4)
71
72/*
73 * Local function declarations
74 */
75static struct nfs_page * nfs_update_request(struct nfs_open_context*,
76 struct inode *,
77 struct page *,
78 unsigned int, unsigned int);
79static void nfs_writeback_done_partial(struct nfs_write_data *, int);
80static void nfs_writeback_done_full(struct nfs_write_data *, int);
81static int nfs_wait_on_write_congestion(struct address_space *, int);
82static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
83static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
84 unsigned int npages, int how);
85
86static kmem_cache_t *nfs_wdata_cachep;
87mempool_t *nfs_wdata_mempool;
88static mempool_t *nfs_commit_mempool;
89
90static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
91
92static inline struct nfs_write_data *nfs_commit_alloc(void)
93{
94 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
95 if (p) {
96 memset(p, 0, sizeof(*p));
97 INIT_LIST_HEAD(&p->pages);
98 }
99 return p;
100}
101
102static inline void nfs_commit_free(struct nfs_write_data *p)
103{
104 mempool_free(p, nfs_commit_mempool);
105}
106
107static void nfs_writedata_release(struct rpc_task *task)
108{
109 struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
110 nfs_writedata_free(wdata);
111}
112
113/* Adjust the file length if we're writing beyond the end */
114static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
115{
116 struct inode *inode = page->mapping->host;
117 loff_t end, i_size = i_size_read(inode);
118 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
119
120 if (i_size > 0 && page->index < end_index)
121 return;
122 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
123 if (i_size >= end)
124 return;
125 i_size_write(inode, end);
126}
127
128/* We can set the PG_uptodate flag if we see that a write request
129 * covers the full page.
130 */
131static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
132{
133 loff_t end_offs;
134
135 if (PageUptodate(page))
136 return;
137 if (base != 0)
138 return;
139 if (count == PAGE_CACHE_SIZE) {
140 SetPageUptodate(page);
141 return;
142 }
143
144 end_offs = i_size_read(page->mapping->host) - 1;
145 if (end_offs < 0)
146 return;
147 /* Is this the last page? */
148 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
149 return;
150 /* This is the last page: set PG_uptodate if we cover the entire
151 * extent of the data, then zero the rest of the page.
152 */
153 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
154 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
155 SetPageUptodate(page);
156 }
157}
158
159/*
160 * Write a page synchronously.
161 * Offset is the data offset within the page.
162 */
163static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
164 struct page *page, unsigned int offset, unsigned int count,
165 int how)
166{
167 unsigned int wsize = NFS_SERVER(inode)->wsize;
168 int result, written = 0;
169 struct nfs_write_data *wdata;
170
171 wdata = nfs_writedata_alloc();
172 if (!wdata)
173 return -ENOMEM;
174
175 wdata->flags = how;
176 wdata->cred = ctx->cred;
177 wdata->inode = inode;
178 wdata->args.fh = NFS_FH(inode);
179 wdata->args.context = ctx;
180 wdata->args.pages = &page;
181 wdata->args.stable = NFS_FILE_SYNC;
182 wdata->args.pgbase = offset;
183 wdata->args.count = wsize;
184 wdata->res.fattr = &wdata->fattr;
185 wdata->res.verf = &wdata->verf;
186
187 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
188 inode->i_sb->s_id,
189 (long long)NFS_FILEID(inode),
190 count, (long long)(page_offset(page) + offset));
191
192 nfs_begin_data_update(inode);
193 do {
194 if (count < wsize)
195 wdata->args.count = count;
196 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
197
198 result = NFS_PROTO(inode)->write(wdata);
199
200 if (result < 0) {
201 /* Must mark the page invalid after I/O error */
202 ClearPageUptodate(page);
203 goto io_error;
204 }
205 if (result < wdata->args.count)
206 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
207 wdata->args.count, result);
208
209 wdata->args.offset += result;
210 wdata->args.pgbase += result;
211 written += result;
212 count -= result;
213 } while (count);
214 /* Update file length */
215 nfs_grow_file(page, offset, written);
216 /* Set the PG_uptodate flag? */
217 nfs_mark_uptodate(page, offset, written);
218
219 if (PageError(page))
220 ClearPageError(page);
221
222io_error:
Trond Myklebust951a1432005-06-22 17:16:30 +0000223 nfs_end_data_update(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 nfs_writedata_free(wdata);
225 return written ? written : result;
226}
227
228static int nfs_writepage_async(struct nfs_open_context *ctx,
229 struct inode *inode, struct page *page,
230 unsigned int offset, unsigned int count)
231{
232 struct nfs_page *req;
233 int status;
234
235 req = nfs_update_request(ctx, inode, page, offset, count);
236 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
237 if (status < 0)
238 goto out;
239 /* Update file length */
240 nfs_grow_file(page, offset, count);
241 /* Set the PG_uptodate flag? */
242 nfs_mark_uptodate(page, offset, count);
243 nfs_unlock_request(req);
244 out:
245 return status;
246}
247
248static int wb_priority(struct writeback_control *wbc)
249{
250 if (wbc->for_reclaim)
251 return FLUSH_HIGHPRI;
252 if (wbc->for_kupdate)
253 return FLUSH_LOWPRI;
254 return 0;
255}
256
257/*
258 * Write an mmapped page to the server.
259 */
260int nfs_writepage(struct page *page, struct writeback_control *wbc)
261{
262 struct nfs_open_context *ctx;
263 struct inode *inode = page->mapping->host;
264 unsigned long end_index;
265 unsigned offset = PAGE_CACHE_SIZE;
266 loff_t i_size = i_size_read(inode);
267 int inode_referenced = 0;
268 int priority = wb_priority(wbc);
269 int err;
270
271 /*
272 * Note: We need to ensure that we have a reference to the inode
273 * if we are to do asynchronous writes. If not, waiting
274 * in nfs_wait_on_request() may deadlock with clear_inode().
275 *
276 * If igrab() fails here, then it is in any case safe to
277 * call nfs_wb_page(), since there will be no pending writes.
278 */
279 if (igrab(inode) != 0)
280 inode_referenced = 1;
281 end_index = i_size >> PAGE_CACHE_SHIFT;
282
283 /* Ensure we've flushed out any previous writes */
284 nfs_wb_page_priority(inode, page, priority);
285
286 /* easy case */
287 if (page->index < end_index)
288 goto do_it;
289 /* things got complicated... */
290 offset = i_size & (PAGE_CACHE_SIZE-1);
291
292 /* OK, are we completely out? */
293 err = 0; /* potential race with truncate - ignore */
294 if (page->index >= end_index+1 || !offset)
295 goto out;
296do_it:
297 ctx = nfs_find_open_context(inode, FMODE_WRITE);
298 if (ctx == NULL) {
299 err = -EBADF;
300 goto out;
301 }
302 lock_kernel();
303 if (!IS_SYNC(inode) && inode_referenced) {
304 err = nfs_writepage_async(ctx, inode, page, 0, offset);
305 if (err >= 0) {
306 err = 0;
307 if (wbc->for_reclaim)
308 nfs_flush_inode(inode, 0, 0, FLUSH_STABLE);
309 }
310 } else {
311 err = nfs_writepage_sync(ctx, inode, page, 0,
312 offset, priority);
313 if (err >= 0) {
314 if (err != offset)
315 redirty_page_for_writepage(wbc, page);
316 err = 0;
317 }
318 }
319 unlock_kernel();
320 put_nfs_open_context(ctx);
321out:
322 unlock_page(page);
323 if (inode_referenced)
324 iput(inode);
325 return err;
326}
327
328/*
329 * Note: causes nfs_update_request() to block on the assumption
330 * that the writeback is generated due to memory pressure.
331 */
332int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
333{
334 struct backing_dev_info *bdi = mapping->backing_dev_info;
335 struct inode *inode = mapping->host;
336 int err;
337
338 err = generic_writepages(mapping, wbc);
339 if (err)
340 return err;
341 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
342 if (wbc->nonblocking)
343 return 0;
344 nfs_wait_on_write_congestion(mapping, 0);
345 }
346 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
347 if (err < 0)
348 goto out;
349 wbc->nr_to_write -= err;
350 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
351 err = nfs_wait_on_requests(inode, 0, 0);
352 if (err < 0)
353 goto out;
354 }
Trond Myklebust3da28eb2005-06-22 17:16:31 +0000355 err = nfs_commit_inode(inode, wb_priority(wbc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 if (err > 0) {
357 wbc->nr_to_write -= err;
358 err = 0;
359 }
360out:
361 clear_bit(BDI_write_congested, &bdi->state);
362 wake_up_all(&nfs_write_congestion);
363 return err;
364}
365
366/*
367 * Insert a write request into an inode
368 */
369static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
370{
371 struct nfs_inode *nfsi = NFS_I(inode);
372 int error;
373
374 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
375 BUG_ON(error == -EEXIST);
376 if (error)
377 return error;
378 if (!nfsi->npages) {
379 igrab(inode);
380 nfs_begin_data_update(inode);
381 if (nfs_have_delegation(inode, FMODE_WRITE))
382 nfsi->change_attr++;
383 }
384 nfsi->npages++;
385 atomic_inc(&req->wb_count);
386 return 0;
387}
388
389/*
390 * Insert a write request into an inode
391 */
392static void nfs_inode_remove_request(struct nfs_page *req)
393{
394 struct inode *inode = req->wb_context->dentry->d_inode;
395 struct nfs_inode *nfsi = NFS_I(inode);
396
397 BUG_ON (!NFS_WBACK_BUSY(req));
398
399 spin_lock(&nfsi->req_lock);
400 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
401 nfsi->npages--;
402 if (!nfsi->npages) {
403 spin_unlock(&nfsi->req_lock);
Trond Myklebust951a1432005-06-22 17:16:30 +0000404 nfs_end_data_update(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 iput(inode);
406 } else
407 spin_unlock(&nfsi->req_lock);
408 nfs_clear_request(req);
409 nfs_release_request(req);
410}
411
412/*
413 * Find a request
414 */
415static inline struct nfs_page *
416_nfs_find_request(struct inode *inode, unsigned long index)
417{
418 struct nfs_inode *nfsi = NFS_I(inode);
419 struct nfs_page *req;
420
421 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
422 if (req)
423 atomic_inc(&req->wb_count);
424 return req;
425}
426
427static struct nfs_page *
428nfs_find_request(struct inode *inode, unsigned long index)
429{
430 struct nfs_page *req;
431 struct nfs_inode *nfsi = NFS_I(inode);
432
433 spin_lock(&nfsi->req_lock);
434 req = _nfs_find_request(inode, index);
435 spin_unlock(&nfsi->req_lock);
436 return req;
437}
438
439/*
440 * Add a request to the inode's dirty list.
441 */
442static void
443nfs_mark_request_dirty(struct nfs_page *req)
444{
445 struct inode *inode = req->wb_context->dentry->d_inode;
446 struct nfs_inode *nfsi = NFS_I(inode);
447
448 spin_lock(&nfsi->req_lock);
Trond Myklebust3da28eb2005-06-22 17:16:31 +0000449 radix_tree_tag_set(&nfsi->nfs_page_tree,
450 req->wb_index, NFS_PAGE_TAG_DIRTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 nfs_list_add_request(req, &nfsi->dirty);
452 nfsi->ndirty++;
453 spin_unlock(&nfsi->req_lock);
454 inc_page_state(nr_dirty);
455 mark_inode_dirty(inode);
456}
457
458/*
459 * Check if a request is dirty
460 */
461static inline int
462nfs_dirty_request(struct nfs_page *req)
463{
464 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
465 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
466}
467
468#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
469/*
470 * Add a request to the inode's commit list.
471 */
472static void
473nfs_mark_request_commit(struct nfs_page *req)
474{
475 struct inode *inode = req->wb_context->dentry->d_inode;
476 struct nfs_inode *nfsi = NFS_I(inode);
477
478 spin_lock(&nfsi->req_lock);
479 nfs_list_add_request(req, &nfsi->commit);
480 nfsi->ncommit++;
481 spin_unlock(&nfsi->req_lock);
482 inc_page_state(nr_unstable);
483 mark_inode_dirty(inode);
484}
485#endif
486
487/*
488 * Wait for a request to complete.
489 *
490 * Interruptible by signals only if mounted with intr flag.
491 */
492static int
493nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
494{
495 struct nfs_inode *nfsi = NFS_I(inode);
496 struct nfs_page *req;
497 unsigned long idx_end, next;
498 unsigned int res = 0;
499 int error;
500
501 if (npages == 0)
502 idx_end = ~0;
503 else
504 idx_end = idx_start + npages - 1;
505
506 spin_lock(&nfsi->req_lock);
507 next = idx_start;
Trond Myklebustc6a556b2005-06-22 17:16:30 +0000508 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 if (req->wb_index > idx_end)
510 break;
511
512 next = req->wb_index + 1;
Trond Myklebustc6a556b2005-06-22 17:16:30 +0000513 BUG_ON(!NFS_WBACK_BUSY(req));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
515 atomic_inc(&req->wb_count);
516 spin_unlock(&nfsi->req_lock);
517 error = nfs_wait_on_request(req);
518 nfs_release_request(req);
519 if (error < 0)
520 return error;
521 spin_lock(&nfsi->req_lock);
522 res++;
523 }
524 spin_unlock(&nfsi->req_lock);
525 return res;
526}
527
528/*
529 * nfs_scan_dirty - Scan an inode for dirty requests
530 * @inode: NFS inode to scan
531 * @dst: destination list
532 * @idx_start: lower bound of page->index to scan.
533 * @npages: idx_start + npages sets the upper bound to scan.
534 *
535 * Moves requests from the inode's dirty page list.
536 * The requests are *not* checked to ensure that they form a contiguous set.
537 */
538static int
539nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
540{
541 struct nfs_inode *nfsi = NFS_I(inode);
Trond Myklebust3da28eb2005-06-22 17:16:31 +0000542 int res = 0;
543
544 if (nfsi->ndirty != 0) {
545 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
546 nfsi->ndirty -= res;
547 sub_page_state(nr_dirty,res);
548 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
549 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 return res;
552}
553
554#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
555/*
556 * nfs_scan_commit - Scan an inode for commit requests
557 * @inode: NFS inode to scan
558 * @dst: destination list
559 * @idx_start: lower bound of page->index to scan.
560 * @npages: idx_start + npages sets the upper bound to scan.
561 *
562 * Moves requests from the inode's 'commit' request list.
563 * The requests are *not* checked to ensure that they form a contiguous set.
564 */
565static int
566nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
567{
568 struct nfs_inode *nfsi = NFS_I(inode);
Trond Myklebust3da28eb2005-06-22 17:16:31 +0000569 int res = 0;
570
571 if (nfsi->ncommit != 0) {
572 res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);
573 nfsi->ncommit -= res;
574 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
575 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 return res;
578}
579#endif
580
581static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
582{
583 struct backing_dev_info *bdi = mapping->backing_dev_info;
584 DEFINE_WAIT(wait);
585 int ret = 0;
586
587 might_sleep();
588
589 if (!bdi_write_congested(bdi))
590 return 0;
591 if (intr) {
592 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
593 sigset_t oldset;
594
595 rpc_clnt_sigmask(clnt, &oldset);
596 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
597 if (bdi_write_congested(bdi)) {
598 if (signalled())
599 ret = -ERESTARTSYS;
600 else
601 schedule();
602 }
603 rpc_clnt_sigunmask(clnt, &oldset);
604 } else {
605 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
606 if (bdi_write_congested(bdi))
607 schedule();
608 }
609 finish_wait(&nfs_write_congestion, &wait);
610 return ret;
611}
612
613
614/*
615 * Try to update any existing write request, or create one if there is none.
616 * In order to match, the request's credentials must match those of
617 * the calling process.
618 *
619 * Note: Should always be called with the Page Lock held!
620 */
621static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
622 struct inode *inode, struct page *page,
623 unsigned int offset, unsigned int bytes)
624{
625 struct nfs_server *server = NFS_SERVER(inode);
626 struct nfs_inode *nfsi = NFS_I(inode);
627 struct nfs_page *req, *new = NULL;
628 unsigned long rqend, end;
629
630 end = offset + bytes;
631
632 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
633 return ERR_PTR(-ERESTARTSYS);
634 for (;;) {
635 /* Loop over all inode entries and see if we find
636 * A request for the page we wish to update
637 */
638 spin_lock(&nfsi->req_lock);
639 req = _nfs_find_request(inode, page->index);
640 if (req) {
641 if (!nfs_lock_request_dontget(req)) {
642 int error;
643 spin_unlock(&nfsi->req_lock);
644 error = nfs_wait_on_request(req);
645 nfs_release_request(req);
646 if (error < 0)
647 return ERR_PTR(error);
648 continue;
649 }
650 spin_unlock(&nfsi->req_lock);
651 if (new)
652 nfs_release_request(new);
653 break;
654 }
655
656 if (new) {
657 int error;
658 nfs_lock_request_dontget(new);
659 error = nfs_inode_add_request(inode, new);
660 if (error) {
661 spin_unlock(&nfsi->req_lock);
662 nfs_unlock_request(new);
663 return ERR_PTR(error);
664 }
665 spin_unlock(&nfsi->req_lock);
666 nfs_mark_request_dirty(new);
667 return new;
668 }
669 spin_unlock(&nfsi->req_lock);
670
671 new = nfs_create_request(ctx, inode, page, offset, bytes);
672 if (IS_ERR(new))
673 return new;
674 }
675
676 /* We have a request for our page.
677 * If the creds don't match, or the
678 * page addresses don't match,
679 * tell the caller to wait on the conflicting
680 * request.
681 */
682 rqend = req->wb_offset + req->wb_bytes;
683 if (req->wb_context != ctx
684 || req->wb_page != page
685 || !nfs_dirty_request(req)
686 || offset > rqend || end < req->wb_offset) {
687 nfs_unlock_request(req);
688 return ERR_PTR(-EBUSY);
689 }
690
691 /* Okay, the request matches. Update the region */
692 if (offset < req->wb_offset) {
693 req->wb_offset = offset;
694 req->wb_pgbase = offset;
695 req->wb_bytes = rqend - req->wb_offset;
696 }
697
698 if (end > rqend)
699 req->wb_bytes = end - req->wb_offset;
700
701 return req;
702}
703
704int nfs_flush_incompatible(struct file *file, struct page *page)
705{
706 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
707 struct inode *inode = page->mapping->host;
708 struct nfs_page *req;
709 int status = 0;
710 /*
711 * Look for a request corresponding to this page. If there
712 * is one, and it belongs to another file, we flush it out
713 * before we try to copy anything into the page. Do this
714 * due to the lack of an ACCESS-type call in NFSv2.
715 * Also do the same if we find a request from an existing
716 * dropped page.
717 */
718 req = nfs_find_request(inode, page->index);
719 if (req) {
720 if (req->wb_page != page || ctx != req->wb_context)
721 status = nfs_wb_page(inode, page);
722 nfs_release_request(req);
723 }
724 return (status < 0) ? status : 0;
725}
726
727/*
728 * Update and possibly write a cached page of an NFS file.
729 *
730 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
731 * things with a page scheduled for an RPC call (e.g. invalidate it).
732 */
733int nfs_updatepage(struct file *file, struct page *page,
734 unsigned int offset, unsigned int count)
735{
736 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
737 struct dentry *dentry = file->f_dentry;
738 struct inode *inode = page->mapping->host;
739 struct nfs_page *req;
740 int status = 0;
741
742 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
743 dentry->d_parent->d_name.name, dentry->d_name.name,
744 count, (long long)(page_offset(page) +offset));
745
746 if (IS_SYNC(inode)) {
747 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
748 if (status > 0) {
749 if (offset == 0 && status == PAGE_CACHE_SIZE)
750 SetPageUptodate(page);
751 return 0;
752 }
753 return status;
754 }
755
756 /* If we're not using byte range locks, and we know the page
757 * is entirely in cache, it may be more efficient to avoid
758 * fragmenting write requests.
759 */
Trond Myklebustab0a3db2005-06-22 17:16:30 +0000760 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 loff_t end_offs = i_size_read(inode) - 1;
762 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
763
764 count += offset;
765 offset = 0;
766 if (unlikely(end_offs < 0)) {
767 /* Do nothing */
768 } else if (page->index == end_index) {
769 unsigned int pglen;
770 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
771 if (count < pglen)
772 count = pglen;
773 } else if (page->index < end_index)
774 count = PAGE_CACHE_SIZE;
775 }
776
777 /*
778 * Try to find an NFS request corresponding to this page
779 * and update it.
780 * If the existing request cannot be updated, we must flush
781 * it out now.
782 */
783 do {
784 req = nfs_update_request(ctx, inode, page, offset, count);
785 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
786 if (status != -EBUSY)
787 break;
788 /* Request could not be updated. Flush it out and try again */
789 status = nfs_wb_page(inode, page);
790 } while (status >= 0);
791 if (status < 0)
792 goto done;
793
794 status = 0;
795
796 /* Update file length */
797 nfs_grow_file(page, offset, count);
798 /* Set the PG_uptodate flag? */
799 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
800 nfs_unlock_request(req);
801done:
802 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
803 status, (long long)i_size_read(inode));
804 if (status < 0)
805 ClearPageUptodate(page);
806 return status;
807}
808
809static void nfs_writepage_release(struct nfs_page *req)
810{
811 end_page_writeback(req->wb_page);
812
813#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
814 if (!PageError(req->wb_page)) {
815 if (NFS_NEED_RESCHED(req)) {
816 nfs_mark_request_dirty(req);
817 goto out;
818 } else if (NFS_NEED_COMMIT(req)) {
819 nfs_mark_request_commit(req);
820 goto out;
821 }
822 }
823 nfs_inode_remove_request(req);
824
825out:
826 nfs_clear_commit(req);
827 nfs_clear_reschedule(req);
828#else
829 nfs_inode_remove_request(req);
830#endif
Trond Myklebustc6a556b2005-06-22 17:16:30 +0000831 nfs_clear_page_writeback(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832}
833
834static inline int flush_task_priority(int how)
835{
836 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
837 case FLUSH_HIGHPRI:
838 return RPC_PRIORITY_HIGH;
839 case FLUSH_LOWPRI:
840 return RPC_PRIORITY_LOW;
841 }
842 return RPC_PRIORITY_NORMAL;
843}
844
845/*
846 * Set up the argument/result storage required for the RPC call.
847 */
848static void nfs_write_rpcsetup(struct nfs_page *req,
849 struct nfs_write_data *data,
850 unsigned int count, unsigned int offset,
851 int how)
852{
853 struct rpc_task *task = &data->task;
854 struct inode *inode;
855
856 /* Set up the RPC argument and reply structs
857 * NB: take care not to mess about with data->commit et al. */
858
859 data->req = req;
860 data->inode = inode = req->wb_context->dentry->d_inode;
861 data->cred = req->wb_context->cred;
862
863 data->args.fh = NFS_FH(inode);
864 data->args.offset = req_offset(req) + offset;
865 data->args.pgbase = req->wb_pgbase + offset;
866 data->args.pages = data->pagevec;
867 data->args.count = count;
868 data->args.context = req->wb_context;
869
870 data->res.fattr = &data->fattr;
871 data->res.count = count;
872 data->res.verf = &data->verf;
873
874 NFS_PROTO(inode)->write_setup(data, how);
875
876 data->task.tk_priority = flush_task_priority(how);
877 data->task.tk_cookie = (unsigned long)inode;
878 data->task.tk_calldata = data;
879 /* Release requests */
880 data->task.tk_release = nfs_writedata_release;
881
882 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
883 task->tk_pid,
884 inode->i_sb->s_id,
885 (long long)NFS_FILEID(inode),
886 count,
887 (unsigned long long)data->args.offset);
888}
889
890static void nfs_execute_write(struct nfs_write_data *data)
891{
892 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
893 sigset_t oldset;
894
895 rpc_clnt_sigmask(clnt, &oldset);
896 lock_kernel();
897 rpc_execute(&data->task);
898 unlock_kernel();
899 rpc_clnt_sigunmask(clnt, &oldset);
900}
901
902/*
903 * Generate multiple small requests to write out a single
904 * contiguous dirty area on one page.
905 */
906static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
907{
908 struct nfs_page *req = nfs_list_entry(head->next);
909 struct page *page = req->wb_page;
910 struct nfs_write_data *data;
911 unsigned int wsize = NFS_SERVER(inode)->wsize;
912 unsigned int nbytes, offset;
913 int requests = 0;
914 LIST_HEAD(list);
915
916 nfs_list_remove_request(req);
917
918 nbytes = req->wb_bytes;
919 for (;;) {
920 data = nfs_writedata_alloc();
921 if (!data)
922 goto out_bad;
923 list_add(&data->pages, &list);
924 requests++;
925 if (nbytes <= wsize)
926 break;
927 nbytes -= wsize;
928 }
929 atomic_set(&req->wb_complete, requests);
930
931 ClearPageError(page);
932 SetPageWriteback(page);
933 offset = 0;
934 nbytes = req->wb_bytes;
935 do {
936 data = list_entry(list.next, struct nfs_write_data, pages);
937 list_del_init(&data->pages);
938
939 data->pagevec[0] = page;
940 data->complete = nfs_writeback_done_partial;
941
942 if (nbytes > wsize) {
943 nfs_write_rpcsetup(req, data, wsize, offset, how);
944 offset += wsize;
945 nbytes -= wsize;
946 } else {
947 nfs_write_rpcsetup(req, data, nbytes, offset, how);
948 nbytes = 0;
949 }
950 nfs_execute_write(data);
951 } while (nbytes != 0);
952
953 return 0;
954
955out_bad:
956 while (!list_empty(&list)) {
957 data = list_entry(list.next, struct nfs_write_data, pages);
958 list_del(&data->pages);
959 nfs_writedata_free(data);
960 }
961 nfs_mark_request_dirty(req);
Trond Myklebustc6a556b2005-06-22 17:16:30 +0000962 nfs_clear_page_writeback(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 return -ENOMEM;
964}
965
966/*
967 * Create an RPC task for the given write request and kick it.
968 * The page must have been locked by the caller.
969 *
970 * It may happen that the page we're passed is not marked dirty.
971 * This is the case if nfs_updatepage detects a conflicting request
972 * that has been written but not committed.
973 */
974static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
975{
976 struct nfs_page *req;
977 struct page **pages;
978 struct nfs_write_data *data;
979 unsigned int count;
980
981 if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
982 return nfs_flush_multi(head, inode, how);
983
984 data = nfs_writedata_alloc();
985 if (!data)
986 goto out_bad;
987
988 pages = data->pagevec;
989 count = 0;
990 while (!list_empty(head)) {
991 req = nfs_list_entry(head->next);
992 nfs_list_remove_request(req);
993 nfs_list_add_request(req, &data->pages);
994 ClearPageError(req->wb_page);
995 SetPageWriteback(req->wb_page);
996 *pages++ = req->wb_page;
997 count += req->wb_bytes;
998 }
999 req = nfs_list_entry(data->pages.next);
1000
1001 data->complete = nfs_writeback_done_full;
1002 /* Set up the argument struct */
1003 nfs_write_rpcsetup(req, data, count, 0, how);
1004
1005 nfs_execute_write(data);
1006 return 0;
1007 out_bad:
1008 while (!list_empty(head)) {
1009 struct nfs_page *req = nfs_list_entry(head->next);
1010 nfs_list_remove_request(req);
1011 nfs_mark_request_dirty(req);
Trond Myklebustc6a556b2005-06-22 17:16:30 +00001012 nfs_clear_page_writeback(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 }
1014 return -ENOMEM;
1015}
1016
1017static int
1018nfs_flush_list(struct list_head *head, int wpages, int how)
1019{
1020 LIST_HEAD(one_request);
1021 struct nfs_page *req;
1022 int error = 0;
1023 unsigned int pages = 0;
1024
1025 while (!list_empty(head)) {
1026 pages += nfs_coalesce_requests(head, &one_request, wpages);
1027 req = nfs_list_entry(one_request.next);
1028 error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how);
1029 if (error < 0)
1030 break;
1031 }
1032 if (error >= 0)
1033 return pages;
1034
1035 while (!list_empty(head)) {
1036 req = nfs_list_entry(head->next);
1037 nfs_list_remove_request(req);
1038 nfs_mark_request_dirty(req);
Trond Myklebustc6a556b2005-06-22 17:16:30 +00001039 nfs_clear_page_writeback(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 }
1041 return error;
1042}
1043
1044/*
1045 * Handle a write reply that flushed part of a page.
1046 */
1047static void nfs_writeback_done_partial(struct nfs_write_data *data, int status)
1048{
1049 struct nfs_page *req = data->req;
1050 struct page *page = req->wb_page;
1051
1052 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1053 req->wb_context->dentry->d_inode->i_sb->s_id,
1054 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1055 req->wb_bytes,
1056 (long long)req_offset(req));
1057
1058 if (status < 0) {
1059 ClearPageUptodate(page);
1060 SetPageError(page);
1061 req->wb_context->error = status;
1062 dprintk(", error = %d\n", status);
1063 } else {
1064#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1065 if (data->verf.committed < NFS_FILE_SYNC) {
1066 if (!NFS_NEED_COMMIT(req)) {
1067 nfs_defer_commit(req);
1068 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1069 dprintk(" defer commit\n");
1070 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1071 nfs_defer_reschedule(req);
1072 dprintk(" server reboot detected\n");
1073 }
1074 } else
1075#endif
1076 dprintk(" OK\n");
1077 }
1078
1079 if (atomic_dec_and_test(&req->wb_complete))
1080 nfs_writepage_release(req);
1081}
1082
1083/*
1084 * Handle a write reply that flushes a whole page.
1085 *
1086 * FIXME: There is an inherent race with invalidate_inode_pages and
1087 * writebacks since the page->count is kept > 1 for as long
1088 * as the page has a write request pending.
1089 */
1090static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
1091{
1092 struct nfs_page *req;
1093 struct page *page;
1094
1095 /* Update attributes as result of writeback. */
1096 while (!list_empty(&data->pages)) {
1097 req = nfs_list_entry(data->pages.next);
1098 nfs_list_remove_request(req);
1099 page = req->wb_page;
1100
1101 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1102 req->wb_context->dentry->d_inode->i_sb->s_id,
1103 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1104 req->wb_bytes,
1105 (long long)req_offset(req));
1106
1107 if (status < 0) {
1108 ClearPageUptodate(page);
1109 SetPageError(page);
1110 req->wb_context->error = status;
1111 end_page_writeback(page);
1112 nfs_inode_remove_request(req);
1113 dprintk(", error = %d\n", status);
1114 goto next;
1115 }
1116 end_page_writeback(page);
1117
1118#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1119 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1120 nfs_inode_remove_request(req);
1121 dprintk(" OK\n");
1122 goto next;
1123 }
1124 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1125 nfs_mark_request_commit(req);
1126 dprintk(" marked for commit\n");
1127#else
1128 nfs_inode_remove_request(req);
1129#endif
1130 next:
Trond Myklebustc6a556b2005-06-22 17:16:30 +00001131 nfs_clear_page_writeback(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133}
1134
1135/*
1136 * This function is called when the WRITE call is complete.
1137 */
1138void nfs_writeback_done(struct rpc_task *task)
1139{
1140 struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
1141 struct nfs_writeargs *argp = &data->args;
1142 struct nfs_writeres *resp = &data->res;
1143
1144 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1145 task->tk_pid, task->tk_status);
1146
1147#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1148 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1149 /* We tried a write call, but the server did not
1150 * commit data to stable storage even though we
1151 * requested it.
1152 * Note: There is a known bug in Tru64 < 5.0 in which
1153 * the server reports NFS_DATA_SYNC, but performs
1154 * NFS_FILE_SYNC. We therefore implement this checking
1155 * as a dprintk() in order to avoid filling syslog.
1156 */
1157 static unsigned long complain;
1158
1159 if (time_before(complain, jiffies)) {
1160 dprintk("NFS: faulty NFS server %s:"
1161 " (committed = %d) != (stable = %d)\n",
1162 NFS_SERVER(data->inode)->hostname,
1163 resp->verf->committed, argp->stable);
1164 complain = jiffies + 300 * HZ;
1165 }
1166 }
1167#endif
1168 /* Is this a short write? */
1169 if (task->tk_status >= 0 && resp->count < argp->count) {
1170 static unsigned long complain;
1171
1172 /* Has the server at least made some progress? */
1173 if (resp->count != 0) {
1174 /* Was this an NFSv2 write or an NFSv3 stable write? */
1175 if (resp->verf->committed != NFS_UNSTABLE) {
1176 /* Resend from where the server left off */
1177 argp->offset += resp->count;
1178 argp->pgbase += resp->count;
1179 argp->count -= resp->count;
1180 } else {
1181 /* Resend as a stable write in order to avoid
1182 * headaches in the case of a server crash.
1183 */
1184 argp->stable = NFS_FILE_SYNC;
1185 }
1186 rpc_restart_call(task);
1187 return;
1188 }
1189 if (time_before(complain, jiffies)) {
1190 printk(KERN_WARNING
1191 "NFS: Server wrote zero bytes, expected %u.\n",
1192 argp->count);
1193 complain = jiffies + 300 * HZ;
1194 }
1195 /* Can't do anything about it except throw an error. */
1196 task->tk_status = -EIO;
1197 }
1198
1199 /*
1200 * Process the nfs_page list
1201 */
1202 data->complete(data, task->tk_status);
1203}
1204
1205
1206#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1207static void nfs_commit_release(struct rpc_task *task)
1208{
1209 struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
1210 nfs_commit_free(wdata);
1211}
1212
1213/*
1214 * Set up the argument/result storage required for the RPC call.
1215 */
1216static void nfs_commit_rpcsetup(struct list_head *head,
1217 struct nfs_write_data *data, int how)
1218{
1219 struct rpc_task *task = &data->task;
Trond Myklebust3da28eb2005-06-22 17:16:31 +00001220 struct nfs_page *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
1223 /* Set up the RPC argument and reply structs
1224 * NB: take care not to mess about with data->commit et al. */
1225
1226 list_splice_init(head, &data->pages);
1227 first = nfs_list_entry(data->pages.next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 inode = first->wb_context->dentry->d_inode;
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 data->inode = inode;
1231 data->cred = first->wb_context->cred;
1232
1233 data->args.fh = NFS_FH(data->inode);
Trond Myklebust3da28eb2005-06-22 17:16:31 +00001234 /* Note: we always request a commit of the entire inode */
1235 data->args.offset = 0;
1236 data->args.count = 0;
1237 data->res.count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 data->res.fattr = &data->fattr;
1239 data->res.verf = &data->verf;
1240
1241 NFS_PROTO(inode)->commit_setup(data, how);
1242
1243 data->task.tk_priority = flush_task_priority(how);
1244 data->task.tk_cookie = (unsigned long)inode;
1245 data->task.tk_calldata = data;
1246 /* Release requests */
1247 data->task.tk_release = nfs_commit_release;
1248
1249 dprintk("NFS: %4d initiated commit call\n", task->tk_pid);
1250}
1251
1252/*
1253 * Commit dirty pages
1254 */
1255static int
1256nfs_commit_list(struct list_head *head, int how)
1257{
1258 struct nfs_write_data *data;
1259 struct nfs_page *req;
1260
1261 data = nfs_commit_alloc();
1262
1263 if (!data)
1264 goto out_bad;
1265
1266 /* Set up the argument struct */
1267 nfs_commit_rpcsetup(head, data, how);
1268
1269 nfs_execute_write(data);
1270 return 0;
1271 out_bad:
1272 while (!list_empty(head)) {
1273 req = nfs_list_entry(head->next);
1274 nfs_list_remove_request(req);
1275 nfs_mark_request_commit(req);
Trond Myklebustc6a556b2005-06-22 17:16:30 +00001276 nfs_clear_page_writeback(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 }
1278 return -ENOMEM;
1279}
1280
1281/*
1282 * COMMIT call returned
1283 */
1284void
1285nfs_commit_done(struct rpc_task *task)
1286{
1287 struct nfs_write_data *data = (struct nfs_write_data *)task->tk_calldata;
1288 struct nfs_page *req;
1289 int res = 0;
1290
1291 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1292 task->tk_pid, task->tk_status);
1293
1294 while (!list_empty(&data->pages)) {
1295 req = nfs_list_entry(data->pages.next);
1296 nfs_list_remove_request(req);
1297
1298 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1299 req->wb_context->dentry->d_inode->i_sb->s_id,
1300 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1301 req->wb_bytes,
1302 (long long)req_offset(req));
1303 if (task->tk_status < 0) {
1304 req->wb_context->error = task->tk_status;
1305 nfs_inode_remove_request(req);
1306 dprintk(", error = %d\n", task->tk_status);
1307 goto next;
1308 }
1309
1310 /* Okay, COMMIT succeeded, apparently. Check the verifier
1311 * returned by the server against all stored verfs. */
1312 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1313 /* We have a match */
1314 nfs_inode_remove_request(req);
1315 dprintk(" OK\n");
1316 goto next;
1317 }
1318 /* We have a mismatch. Write the page again */
1319 dprintk(" mismatch\n");
1320 nfs_mark_request_dirty(req);
1321 next:
Trond Myklebustc6a556b2005-06-22 17:16:30 +00001322 nfs_clear_page_writeback(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 res++;
1324 }
1325 sub_page_state(nr_unstable,res);
1326}
1327#endif
1328
1329static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1330 unsigned int npages, int how)
1331{
1332 struct nfs_inode *nfsi = NFS_I(inode);
1333 LIST_HEAD(head);
1334 int res,
1335 error = 0;
1336
1337 spin_lock(&nfsi->req_lock);
1338 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1339 spin_unlock(&nfsi->req_lock);
Trond Myklebustab0a3db2005-06-22 17:16:30 +00001340 if (res) {
1341 struct nfs_server *server = NFS_SERVER(inode);
1342
1343 /* For single writes, FLUSH_STABLE is more efficient */
1344 if (res == nfsi->npages && nfsi->npages <= server->wpages) {
1345 if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
1346 how |= FLUSH_STABLE;
1347 }
1348 error = nfs_flush_list(&head, server->wpages, how);
1349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (error < 0)
1351 return error;
1352 return res;
1353}
1354
1355#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
Trond Myklebust3da28eb2005-06-22 17:16:31 +00001356int nfs_commit_inode(struct inode *inode, int how)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357{
1358 struct nfs_inode *nfsi = NFS_I(inode);
1359 LIST_HEAD(head);
1360 int res,
1361 error = 0;
1362
1363 spin_lock(&nfsi->req_lock);
Trond Myklebust3da28eb2005-06-22 17:16:31 +00001364 res = nfs_scan_commit(inode, &head, 0, 0);
1365 spin_unlock(&nfsi->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 if (res) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 error = nfs_commit_list(&head, how);
Trond Myklebust3da28eb2005-06-22 17:16:31 +00001368 if (error < 0)
1369 return error;
1370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 return res;
1372}
1373#endif
1374
1375int nfs_sync_inode(struct inode *inode, unsigned long idx_start,
1376 unsigned int npages, int how)
1377{
1378 int error,
1379 wait;
1380
1381 wait = how & FLUSH_WAIT;
1382 how &= ~FLUSH_WAIT;
1383
1384 do {
1385 error = 0;
1386 if (wait)
1387 error = nfs_wait_on_requests(inode, idx_start, npages);
1388 if (error == 0)
1389 error = nfs_flush_inode(inode, idx_start, npages, how);
1390#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1391 if (error == 0)
Trond Myklebust3da28eb2005-06-22 17:16:31 +00001392 error = nfs_commit_inode(inode, how);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393#endif
1394 } while (error > 0);
1395 return error;
1396}
1397
1398int nfs_init_writepagecache(void)
1399{
1400 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1401 sizeof(struct nfs_write_data),
1402 0, SLAB_HWCACHE_ALIGN,
1403 NULL, NULL);
1404 if (nfs_wdata_cachep == NULL)
1405 return -ENOMEM;
1406
1407 nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE,
1408 mempool_alloc_slab,
1409 mempool_free_slab,
1410 nfs_wdata_cachep);
1411 if (nfs_wdata_mempool == NULL)
1412 return -ENOMEM;
1413
1414 nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT,
1415 mempool_alloc_slab,
1416 mempool_free_slab,
1417 nfs_wdata_cachep);
1418 if (nfs_commit_mempool == NULL)
1419 return -ENOMEM;
1420
1421 return 0;
1422}
1423
1424void nfs_destroy_writepagecache(void)
1425{
1426 mempool_destroy(nfs_commit_mempool);
1427 mempool_destroy(nfs_wdata_mempool);
1428 if (kmem_cache_destroy(nfs_wdata_cachep))
1429 printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
1430}
1431