Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/nfs/pagelist.c |
| 3 | * |
| 4 | * A set of helper functions for managing NFS read and write requests. |
| 5 | * The main purpose of these routines is to provide support for the |
| 6 | * coalescing of several requests into a single RPC call. |
| 7 | * |
| 8 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> |
| 9 | * |
| 10 | */ |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/slab.h> |
| 13 | #include <linux/file.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 14 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/sunrpc/clnt.h> |
| 16 | #include <linux/nfs3.h> |
| 17 | #include <linux/nfs4.h> |
| 18 | #include <linux/nfs_page.h> |
| 19 | #include <linux/nfs_fs.h> |
| 20 | #include <linux/nfs_mount.h> |
| 21 | |
Trond Myklebust | 8d5658c | 2007-04-10 09:26:35 -0400 | [diff] [blame] | 22 | #include "internal.h" |
| 23 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 24 | static struct kmem_cache *nfs_page_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
| 26 | static inline struct nfs_page * |
| 27 | nfs_page_alloc(void) |
| 28 | { |
Jesper Juhl | 72895b1 | 2010-12-09 23:17:15 +0100 | [diff] [blame] | 29 | struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); |
| 30 | if (p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | INIT_LIST_HEAD(&p->wb_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | return p; |
| 33 | } |
| 34 | |
| 35 | static inline void |
| 36 | nfs_page_free(struct nfs_page *p) |
| 37 | { |
| 38 | kmem_cache_free(nfs_page_cachep, p); |
| 39 | } |
| 40 | |
| 41 | /** |
| 42 | * nfs_create_request - Create an NFS read/write request. |
| 43 | * @file: file descriptor to use |
| 44 | * @inode: inode to which the request is attached |
| 45 | * @page: page to write |
| 46 | * @offset: starting offset within the page for the write |
| 47 | * @count: number of bytes to read/write |
| 48 | * |
| 49 | * The page must be locked by the caller. This makes sure we never |
Jason Uhlenkott | a19b89c | 2007-04-26 17:25:51 -0700 | [diff] [blame] | 50 | * create two different requests for the same page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | * User should ensure it is safe to sleep in this function. |
| 52 | */ |
| 53 | struct nfs_page * |
| 54 | nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, |
| 55 | struct page *page, |
| 56 | unsigned int offset, unsigned int count) |
| 57 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | struct nfs_page *req; |
| 59 | |
Trond Myklebust | 18eb884 | 2010-05-13 12:51:02 -0400 | [diff] [blame] | 60 | /* try to allocate the request struct */ |
| 61 | req = nfs_page_alloc(); |
| 62 | if (req == NULL) |
| 63 | return ERR_PTR(-ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Jeff Layton | 015f021 | 2010-10-28 10:10:37 -0400 | [diff] [blame] | 65 | /* get lock context early so we can deal with alloc failures */ |
| 66 | req->wb_lock_context = nfs_get_lock_context(ctx); |
| 67 | if (req->wb_lock_context == NULL) { |
| 68 | nfs_page_free(req); |
| 69 | return ERR_PTR(-ENOMEM); |
| 70 | } |
| 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | /* Initialize the request struct. Initially, we assume a |
| 73 | * long write-back delay. This will be adjusted in |
| 74 | * update_nfs_request below if the region is not locked. */ |
| 75 | req->wb_page = page; |
| 76 | atomic_set(&req->wb_complete, 0); |
| 77 | req->wb_index = page->index; |
| 78 | page_cache_get(page); |
Trond Myklebust | cd52ed3 | 2006-03-20 13:44:04 -0500 | [diff] [blame] | 79 | BUG_ON(PagePrivate(page)); |
| 80 | BUG_ON(!PageLocked(page)); |
| 81 | BUG_ON(page->mapping->host != inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | req->wb_offset = offset; |
| 83 | req->wb_pgbase = offset; |
| 84 | req->wb_bytes = count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | req->wb_context = get_nfs_open_context(ctx); |
Trond Myklebust | c03b402 | 2007-06-17 13:26:38 -0400 | [diff] [blame] | 86 | kref_init(&req->wb_kref); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | return req; |
| 88 | } |
| 89 | |
| 90 | /** |
| 91 | * nfs_unlock_request - Unlock request and wake up sleepers. |
| 92 | * @req: |
| 93 | */ |
| 94 | void nfs_unlock_request(struct nfs_page *req) |
| 95 | { |
| 96 | if (!NFS_WBACK_BUSY(req)) { |
| 97 | printk(KERN_ERR "NFS: Invalid unlock attempted\n"); |
| 98 | BUG(); |
| 99 | } |
| 100 | smp_mb__before_clear_bit(); |
| 101 | clear_bit(PG_BUSY, &req->wb_flags); |
| 102 | smp_mb__after_clear_bit(); |
Trond Myklebust | 464a98b | 2005-06-22 17:16:21 +0000 | [diff] [blame] | 103 | wake_up_bit(&req->wb_flags, PG_BUSY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | nfs_release_request(req); |
| 105 | } |
| 106 | |
| 107 | /** |
Trond Myklebust | 9fd367f | 2007-06-17 15:10:24 -0400 | [diff] [blame] | 108 | * nfs_set_page_tag_locked - Tag a request as locked |
Trond Myklebust | c6a556b | 2005-06-22 17:16:30 +0000 | [diff] [blame] | 109 | * @req: |
| 110 | */ |
Trond Myklebust | acee478 | 2008-01-22 17:13:07 -0500 | [diff] [blame] | 111 | int nfs_set_page_tag_locked(struct nfs_page *req) |
Trond Myklebust | c6a556b | 2005-06-22 17:16:30 +0000 | [diff] [blame] | 112 | { |
Trond Myklebust | acee478 | 2008-01-22 17:13:07 -0500 | [diff] [blame] | 113 | if (!nfs_lock_request_dontget(req)) |
Trond Myklebust | c6a556b | 2005-06-22 17:16:30 +0000 | [diff] [blame] | 114 | return 0; |
Trond Myklebust | 2df485a | 2010-12-07 22:39:17 -0500 | [diff] [blame] | 115 | if (test_bit(PG_MAPPED, &req->wb_flags)) |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 116 | radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
Trond Myklebust | c6a556b | 2005-06-22 17:16:30 +0000 | [diff] [blame] | 117 | return 1; |
| 118 | } |
| 119 | |
| 120 | /** |
Trond Myklebust | 9fd367f | 2007-06-17 15:10:24 -0400 | [diff] [blame] | 121 | * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers |
Trond Myklebust | c6a556b | 2005-06-22 17:16:30 +0000 | [diff] [blame] | 122 | */ |
Trond Myklebust | 9fd367f | 2007-06-17 15:10:24 -0400 | [diff] [blame] | 123 | void nfs_clear_page_tag_locked(struct nfs_page *req) |
Trond Myklebust | c6a556b | 2005-06-22 17:16:30 +0000 | [diff] [blame] | 124 | { |
Trond Myklebust | 2df485a | 2010-12-07 22:39:17 -0500 | [diff] [blame] | 125 | if (test_bit(PG_MAPPED, &req->wb_flags)) { |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 126 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
| 127 | struct nfs_inode *nfsi = NFS_I(inode); |
| 128 | |
Trond Myklebust | 587142f | 2007-07-02 09:57:54 -0400 | [diff] [blame] | 129 | spin_lock(&inode->i_lock); |
Trond Myklebust | 9fd367f | 2007-06-17 15:10:24 -0400 | [diff] [blame] | 130 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
Trond Myklebust | acee478 | 2008-01-22 17:13:07 -0500 | [diff] [blame] | 131 | nfs_unlock_request(req); |
Trond Myklebust | 587142f | 2007-07-02 09:57:54 -0400 | [diff] [blame] | 132 | spin_unlock(&inode->i_lock); |
Trond Myklebust | acee478 | 2008-01-22 17:13:07 -0500 | [diff] [blame] | 133 | } else |
| 134 | nfs_unlock_request(req); |
Trond Myklebust | c6a556b | 2005-06-22 17:16:30 +0000 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | * nfs_clear_request - Free up all resources allocated to the request |
| 139 | * @req: |
| 140 | * |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 141 | * Release page and open context resources associated with a read/write |
| 142 | * request after it has completed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | */ |
| 144 | void nfs_clear_request(struct nfs_page *req) |
| 145 | { |
Trond Myklebust | cd52ed3 | 2006-03-20 13:44:04 -0500 | [diff] [blame] | 146 | struct page *page = req->wb_page; |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 147 | struct nfs_open_context *ctx = req->wb_context; |
Trond Myklebust | f11ac8d | 2010-06-25 16:35:53 -0400 | [diff] [blame] | 148 | struct nfs_lock_context *l_ctx = req->wb_lock_context; |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 149 | |
Trond Myklebust | cd52ed3 | 2006-03-20 13:44:04 -0500 | [diff] [blame] | 150 | if (page != NULL) { |
Trond Myklebust | cd52ed3 | 2006-03-20 13:44:04 -0500 | [diff] [blame] | 151 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | req->wb_page = NULL; |
| 153 | } |
Trond Myklebust | f11ac8d | 2010-06-25 16:35:53 -0400 | [diff] [blame] | 154 | if (l_ctx != NULL) { |
| 155 | nfs_put_lock_context(l_ctx); |
| 156 | req->wb_lock_context = NULL; |
| 157 | } |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 158 | if (ctx != NULL) { |
| 159 | put_nfs_open_context(ctx); |
| 160 | req->wb_context = NULL; |
| 161 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | |
| 165 | /** |
| 166 | * nfs_release_request - Release the count on an NFS read/write request |
| 167 | * @req: request to release |
| 168 | * |
| 169 | * Note: Should never be called with the spinlock held! |
| 170 | */ |
Trond Myklebust | c03b402 | 2007-06-17 13:26:38 -0400 | [diff] [blame] | 171 | static void nfs_free_request(struct kref *kref) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | { |
Trond Myklebust | c03b402 | 2007-06-17 13:26:38 -0400 | [diff] [blame] | 173 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 175 | /* Release struct file and open context */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | nfs_clear_request(req); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | nfs_page_free(req); |
| 178 | } |
| 179 | |
Trond Myklebust | c03b402 | 2007-06-17 13:26:38 -0400 | [diff] [blame] | 180 | void nfs_release_request(struct nfs_page *req) |
| 181 | { |
| 182 | kref_put(&req->wb_kref, nfs_free_request); |
| 183 | } |
| 184 | |
Trond Myklebust | 9f557cd | 2010-02-03 08:27:22 -0500 | [diff] [blame] | 185 | static int nfs_wait_bit_uninterruptible(void *word) |
| 186 | { |
| 187 | io_schedule(); |
| 188 | return 0; |
| 189 | } |
| 190 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | /** |
| 192 | * nfs_wait_on_request - Wait for a request to complete. |
| 193 | * @req: request to wait upon. |
| 194 | * |
Matthew Wilcox | 150030b | 2007-12-06 16:24:39 -0500 | [diff] [blame] | 195 | * Interruptible by fatal signals only. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | * The user is responsible for holding a count on the request. |
| 197 | */ |
| 198 | int |
| 199 | nfs_wait_on_request(struct nfs_page *req) |
| 200 | { |
Trond Myklebust | 9f557cd | 2010-02-03 08:27:22 -0500 | [diff] [blame] | 201 | return wait_on_bit(&req->wb_flags, PG_BUSY, |
| 202 | nfs_wait_bit_uninterruptible, |
| 203 | TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | /** |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 207 | * nfs_pageio_init - initialise a page io descriptor |
| 208 | * @desc: pointer to descriptor |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 209 | * @inode: pointer to inode |
| 210 | * @doio: pointer to io function |
| 211 | * @bsize: io block size |
| 212 | * @io_flags: extra parameters for the io function |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 213 | */ |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 214 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
| 215 | struct inode *inode, |
Trond Myklebust | 8d5658c | 2007-04-10 09:26:35 -0400 | [diff] [blame] | 216 | int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), |
Trond Myklebust | 84dde76 | 2007-05-04 14:44:06 -0400 | [diff] [blame] | 217 | size_t bsize, |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 218 | int io_flags) |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 219 | { |
| 220 | INIT_LIST_HEAD(&desc->pg_list); |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 221 | desc->pg_bytes_written = 0; |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 222 | desc->pg_count = 0; |
| 223 | desc->pg_bsize = bsize; |
| 224 | desc->pg_base = 0; |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 225 | desc->pg_inode = inode; |
| 226 | desc->pg_doio = doio; |
| 227 | desc->pg_ioflags = io_flags; |
| 228 | desc->pg_error = 0; |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | /** |
| 232 | * nfs_can_coalesce_requests - test two requests for compatibility |
| 233 | * @prev: pointer to nfs_page |
| 234 | * @req: pointer to nfs_page |
| 235 | * |
| 236 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the |
| 237 | * page data area they describe is contiguous, and that their RPC |
| 238 | * credentials, NFSv4 open state, and lockowners are the same. |
| 239 | * |
| 240 | * Return 'true' if this is the case, else return 'false'. |
| 241 | */ |
| 242 | static int nfs_can_coalesce_requests(struct nfs_page *prev, |
| 243 | struct nfs_page *req) |
| 244 | { |
| 245 | if (req->wb_context->cred != prev->wb_context->cred) |
| 246 | return 0; |
Trond Myklebust | f11ac8d | 2010-06-25 16:35:53 -0400 | [diff] [blame] | 247 | if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner) |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 248 | return 0; |
| 249 | if (req->wb_context->state != prev->wb_context->state) |
| 250 | return 0; |
| 251 | if (req->wb_index != (prev->wb_index + 1)) |
| 252 | return 0; |
| 253 | if (req->wb_pgbase != 0) |
| 254 | return 0; |
| 255 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) |
| 256 | return 0; |
| 257 | return 1; |
| 258 | } |
| 259 | |
| 260 | /** |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 261 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 262 | * @desc: destination io descriptor |
| 263 | * @req: request |
| 264 | * |
| 265 | * Returns true if the request 'req' was successfully coalesced into the |
| 266 | * existing list of pages 'desc'. |
| 267 | */ |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 268 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, |
| 269 | struct nfs_page *req) |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 270 | { |
| 271 | size_t newlen = req->wb_bytes; |
| 272 | |
| 273 | if (desc->pg_count != 0) { |
| 274 | struct nfs_page *prev; |
| 275 | |
| 276 | /* |
| 277 | * FIXME: ideally we should be able to coalesce all requests |
| 278 | * that are not block boundary aligned, but currently this |
| 279 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, |
| 280 | * since nfs_flush_multi and nfs_pagein_multi assume you |
| 281 | * can have only one struct nfs_page. |
| 282 | */ |
Trond Myklebust | 8d5658c | 2007-04-10 09:26:35 -0400 | [diff] [blame] | 283 | if (desc->pg_bsize < PAGE_SIZE) |
| 284 | return 0; |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 285 | newlen += desc->pg_count; |
Trond Myklebust | 8d5658c | 2007-04-10 09:26:35 -0400 | [diff] [blame] | 286 | if (newlen > desc->pg_bsize) |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 287 | return 0; |
| 288 | prev = nfs_list_entry(desc->pg_list.prev); |
| 289 | if (!nfs_can_coalesce_requests(prev, req)) |
| 290 | return 0; |
| 291 | } else |
| 292 | desc->pg_base = req->wb_pgbase; |
| 293 | nfs_list_remove_request(req); |
| 294 | nfs_list_add_request(req, &desc->pg_list); |
| 295 | desc->pg_count = newlen; |
| 296 | return 1; |
| 297 | } |
| 298 | |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 299 | /* |
| 300 | * Helper for nfs_pageio_add_request and nfs_pageio_complete |
| 301 | */ |
| 302 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) |
| 303 | { |
| 304 | if (!list_empty(&desc->pg_list)) { |
| 305 | int error = desc->pg_doio(desc->pg_inode, |
| 306 | &desc->pg_list, |
Trond Myklebust | 8d5658c | 2007-04-10 09:26:35 -0400 | [diff] [blame] | 307 | nfs_page_array_len(desc->pg_base, |
| 308 | desc->pg_count), |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 309 | desc->pg_count, |
| 310 | desc->pg_ioflags); |
| 311 | if (error < 0) |
| 312 | desc->pg_error = error; |
| 313 | else |
| 314 | desc->pg_bytes_written += desc->pg_count; |
| 315 | } |
| 316 | if (list_empty(&desc->pg_list)) { |
| 317 | desc->pg_count = 0; |
| 318 | desc->pg_base = 0; |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | /** |
| 323 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. |
| 324 | * @desc: destination io descriptor |
| 325 | * @req: request |
| 326 | * |
| 327 | * Returns true if the request 'req' was successfully coalesced into the |
| 328 | * existing list of pages 'desc'. |
| 329 | */ |
Trond Myklebust | 8b09bee | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 330 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
| 331 | struct nfs_page *req) |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 332 | { |
| 333 | while (!nfs_pageio_do_add_request(desc, req)) { |
| 334 | nfs_pageio_doio(desc); |
| 335 | if (desc->pg_error < 0) |
| 336 | return 0; |
| 337 | } |
| 338 | return 1; |
| 339 | } |
| 340 | |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 341 | /** |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 342 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor |
| 343 | * @desc: pointer to io descriptor |
| 344 | */ |
| 345 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) |
| 346 | { |
| 347 | nfs_pageio_doio(desc); |
| 348 | } |
| 349 | |
Trond Myklebust | 7fe7f84 | 2007-05-20 10:18:27 -0400 | [diff] [blame] | 350 | /** |
| 351 | * nfs_pageio_cond_complete - Conditional I/O completion |
| 352 | * @desc: pointer to io descriptor |
| 353 | * @index: page index |
| 354 | * |
| 355 | * It is important to ensure that processes don't try to take locks |
| 356 | * on non-contiguous ranges of pages as that might deadlock. This |
| 357 | * function should be called before attempting to wait on a locked |
| 358 | * nfs_page. It will complete the I/O if the page index 'index' |
| 359 | * is not contiguous with the existing list of pages in 'desc'. |
| 360 | */ |
| 361 | void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) |
| 362 | { |
| 363 | if (!list_empty(&desc->pg_list)) { |
| 364 | struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); |
| 365 | if (index != prev->wb_index + 1) |
| 366 | nfs_pageio_doio(desc); |
| 367 | } |
| 368 | } |
| 369 | |
Trond Myklebust | 3da28eb | 2005-06-22 17:16:31 +0000 | [diff] [blame] | 370 | #define NFS_SCAN_MAXENTRIES 16 |
| 371 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | * nfs_scan_list - Scan a list for matching requests |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 373 | * @nfsi: NFS inode |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | * @dst: Destination list |
| 375 | * @idx_start: lower bound of page->index to scan |
| 376 | * @npages: idx_start + npages sets the upper bound to scan. |
Trond Myklebust | 5c36968 | 2007-06-17 15:27:42 -0400 | [diff] [blame] | 377 | * @tag: tag to scan for |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | * |
| 379 | * Moves elements from one of the inode request lists. |
| 380 | * If the number of requests is set to 0, the entire address_space |
| 381 | * starting at index idx_start, is scanned. |
| 382 | * The requests are *not* checked to ensure that they form a contiguous set. |
Trond Myklebust | 587142f | 2007-07-02 09:57:54 -0400 | [diff] [blame] | 383 | * You must be holding the inode's i_lock when calling this function |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | */ |
Trond Myklebust | 5c36968 | 2007-06-17 15:27:42 -0400 | [diff] [blame] | 385 | int nfs_scan_list(struct nfs_inode *nfsi, |
Trond Myklebust | ca52fec | 2007-04-17 17:22:13 -0400 | [diff] [blame] | 386 | struct list_head *dst, pgoff_t idx_start, |
Trond Myklebust | 5c36968 | 2007-06-17 15:27:42 -0400 | [diff] [blame] | 387 | unsigned int npages, int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | { |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 389 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
| 390 | struct nfs_page *req; |
Trond Myklebust | ca52fec | 2007-04-17 17:22:13 -0400 | [diff] [blame] | 391 | pgoff_t idx_end; |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 392 | int found, i; |
| 393 | int res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | |
| 395 | res = 0; |
| 396 | if (npages == 0) |
| 397 | idx_end = ~0; |
| 398 | else |
| 399 | idx_end = idx_start + npages - 1; |
| 400 | |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 401 | for (;;) { |
Trond Myklebust | 5c36968 | 2007-06-17 15:27:42 -0400 | [diff] [blame] | 402 | found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 403 | (void **)&pgvec[0], idx_start, |
Trond Myklebust | 5c36968 | 2007-06-17 15:27:42 -0400 | [diff] [blame] | 404 | NFS_SCAN_MAXENTRIES, tag); |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 405 | if (found <= 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | break; |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 407 | for (i = 0; i < found; i++) { |
| 408 | req = pgvec[i]; |
| 409 | if (req->wb_index > idx_end) |
| 410 | goto out; |
| 411 | idx_start = req->wb_index + 1; |
Trond Myklebust | 9fd367f | 2007-06-17 15:10:24 -0400 | [diff] [blame] | 412 | if (nfs_set_page_tag_locked(req)) { |
Trond Myklebust | acee478 | 2008-01-22 17:13:07 -0500 | [diff] [blame] | 413 | kref_get(&req->wb_kref); |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 414 | nfs_list_remove_request(req); |
Trond Myklebust | 5c36968 | 2007-06-17 15:27:42 -0400 | [diff] [blame] | 415 | radix_tree_tag_clear(&nfsi->nfs_page_tree, |
| 416 | req->wb_index, tag); |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 417 | nfs_list_add_request(req, dst); |
| 418 | res++; |
Trond Myklebust | dce34ce | 2007-06-17 15:47:53 -0400 | [diff] [blame] | 419 | if (res == INT_MAX) |
| 420 | goto out; |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 421 | } |
| 422 | } |
Trond Myklebust | edc05fc | 2007-06-17 16:02:34 -0400 | [diff] [blame] | 423 | /* for latency reduction */ |
Trond Myklebust | 587142f | 2007-07-02 09:57:54 -0400 | [diff] [blame] | 424 | cond_resched_lock(&nfsi->vfs_inode.i_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | } |
Trond Myklebust | d2ccddf | 2006-05-31 01:13:38 -0400 | [diff] [blame] | 426 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | return res; |
| 428 | } |
| 429 | |
David Howells | f7b422b | 2006-06-09 09:34:33 -0400 | [diff] [blame] | 430 | int __init nfs_init_nfspagecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | { |
| 432 | nfs_page_cachep = kmem_cache_create("nfs_page", |
| 433 | sizeof(struct nfs_page), |
| 434 | 0, SLAB_HWCACHE_ALIGN, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 435 | NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | if (nfs_page_cachep == NULL) |
| 437 | return -ENOMEM; |
| 438 | |
| 439 | return 0; |
| 440 | } |
| 441 | |
David Brownell | 266bee8 | 2006-06-27 12:59:15 -0700 | [diff] [blame] | 442 | void nfs_destroy_nfspagecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | { |
Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 444 | kmem_cache_destroy(nfs_page_cachep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | } |
| 446 | |