David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 1 | /* Storage object read/write |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/mount.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 14 | #include <linux/file.h> |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 15 | #include <linux/swap.h> |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 16 | #include "internal.h" |
| 17 | |
| 18 | /* |
| 19 | * detect wake up events generated by the unlocking of pages in which we're |
| 20 | * interested |
| 21 | * - we use this to detect read completion of backing pages |
| 22 | * - the caller holds the waitqueue lock |
| 23 | */ |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 24 | static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 25 | int sync, void *_key) |
| 26 | { |
| 27 | struct cachefiles_one_read *monitor = |
| 28 | container_of(wait, struct cachefiles_one_read, monitor); |
| 29 | struct cachefiles_object *object; |
Kiran Kumar Modukuri | 934140a | 2017-07-18 16:25:49 -0700 | [diff] [blame] | 30 | struct fscache_retrieval *op = monitor->op; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 31 | struct wait_bit_key *key = _key; |
| 32 | struct page *page = wait->private; |
| 33 | |
| 34 | ASSERT(key); |
| 35 | |
| 36 | _enter("{%lu},%u,%d,{%p,%u}", |
| 37 | monitor->netfs_page->index, mode, sync, |
| 38 | key->flags, key->bit_nr); |
| 39 | |
| 40 | if (key->flags != &page->flags || |
| 41 | key->bit_nr != PG_locked) |
| 42 | return 0; |
| 43 | |
| 44 | _debug("--- monitor %p %lx ---", page, page->flags); |
| 45 | |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 46 | if (!PageUptodate(page) && !PageError(page)) { |
| 47 | /* unlocked, not uptodate and not erronous? */ |
| 48 | _debug("page probably truncated"); |
| 49 | } |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 50 | |
| 51 | /* remove from the waitqueue */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 52 | list_del(&wait->entry); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 53 | |
| 54 | /* move onto the action list and queue for FS-Cache thread pool */ |
Kiran Kumar Modukuri | 934140a | 2017-07-18 16:25:49 -0700 | [diff] [blame] | 55 | ASSERT(op); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 56 | |
Kiran Kumar Modukuri | 934140a | 2017-07-18 16:25:49 -0700 | [diff] [blame] | 57 | /* We need to temporarily bump the usage count as we don't own a ref |
| 58 | * here otherwise cachefiles_read_copier() may free the op between the |
| 59 | * monitor being enqueued on the op->to_do list and the op getting |
| 60 | * enqueued on the work queue. |
| 61 | */ |
| 62 | fscache_get_retrieval(op); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 63 | |
Kiran Kumar Modukuri | 934140a | 2017-07-18 16:25:49 -0700 | [diff] [blame] | 64 | object = container_of(op->op.object, struct cachefiles_object, fscache); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 65 | spin_lock(&object->work_lock); |
Kiran Kumar Modukuri | 934140a | 2017-07-18 16:25:49 -0700 | [diff] [blame] | 66 | list_add_tail(&monitor->op_link, &op->to_do); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 67 | spin_unlock(&object->work_lock); |
| 68 | |
Kiran Kumar Modukuri | 934140a | 2017-07-18 16:25:49 -0700 | [diff] [blame] | 69 | fscache_enqueue_retrieval(op); |
| 70 | fscache_put_retrieval(op); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 71 | return 0; |
| 72 | } |
| 73 | |
| 74 | /* |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 75 | * handle a probably truncated page |
| 76 | * - check to see if the page is still relevant and reissue the read if |
| 77 | * possible |
| 78 | * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we |
| 79 | * must wait again and 0 if successful |
| 80 | */ |
| 81 | static int cachefiles_read_reissue(struct cachefiles_object *object, |
| 82 | struct cachefiles_one_read *monitor) |
| 83 | { |
David Howells | 466b77b | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 84 | struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 85 | struct page *backpage = monitor->back_page, *backpage2; |
| 86 | int ret; |
| 87 | |
David Howells | 37491a1 | 2012-12-20 21:52:34 +0000 | [diff] [blame] | 88 | _enter("{ino=%lx},{%lx,%lx}", |
David Howells | 466b77b | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 89 | d_backing_inode(object->backer)->i_ino, |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 90 | backpage->index, backpage->flags); |
| 91 | |
| 92 | /* skip if the page was truncated away completely */ |
| 93 | if (backpage->mapping != bmapping) { |
David Howells | 37491a1 | 2012-12-20 21:52:34 +0000 | [diff] [blame] | 94 | _leave(" = -ENODATA [mapping]"); |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 95 | return -ENODATA; |
| 96 | } |
| 97 | |
| 98 | backpage2 = find_get_page(bmapping, backpage->index); |
| 99 | if (!backpage2) { |
David Howells | 37491a1 | 2012-12-20 21:52:34 +0000 | [diff] [blame] | 100 | _leave(" = -ENODATA [gone]"); |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 101 | return -ENODATA; |
| 102 | } |
| 103 | |
| 104 | if (backpage != backpage2) { |
| 105 | put_page(backpage2); |
David Howells | 37491a1 | 2012-12-20 21:52:34 +0000 | [diff] [blame] | 106 | _leave(" = -ENODATA [different]"); |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 107 | return -ENODATA; |
| 108 | } |
| 109 | |
| 110 | /* the page is still there and we already have a ref on it, so we don't |
| 111 | * need a second */ |
| 112 | put_page(backpage2); |
| 113 | |
| 114 | INIT_LIST_HEAD(&monitor->op_link); |
| 115 | add_page_wait_queue(backpage, &monitor->monitor); |
| 116 | |
| 117 | if (trylock_page(backpage)) { |
| 118 | ret = -EIO; |
| 119 | if (PageError(backpage)) |
| 120 | goto unlock_discard; |
| 121 | ret = 0; |
| 122 | if (PageUptodate(backpage)) |
| 123 | goto unlock_discard; |
| 124 | |
David Howells | 37491a1 | 2012-12-20 21:52:34 +0000 | [diff] [blame] | 125 | _debug("reissue read"); |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 126 | ret = bmapping->a_ops->readpage(NULL, backpage); |
| 127 | if (ret < 0) |
| 128 | goto unlock_discard; |
| 129 | } |
| 130 | |
| 131 | /* but the page may have been read before the monitor was installed, so |
| 132 | * the monitor may miss the event - so we have to ensure that we do get |
| 133 | * one in such a case */ |
| 134 | if (trylock_page(backpage)) { |
| 135 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); |
| 136 | unlock_page(backpage); |
| 137 | } |
| 138 | |
| 139 | /* it'll reappear on the todo list */ |
David Howells | 37491a1 | 2012-12-20 21:52:34 +0000 | [diff] [blame] | 140 | _leave(" = -EINPROGRESS"); |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 141 | return -EINPROGRESS; |
| 142 | |
| 143 | unlock_discard: |
| 144 | unlock_page(backpage); |
| 145 | spin_lock_irq(&object->work_lock); |
| 146 | list_del(&monitor->op_link); |
| 147 | spin_unlock_irq(&object->work_lock); |
David Howells | 37491a1 | 2012-12-20 21:52:34 +0000 | [diff] [blame] | 148 | _leave(" = %d", ret); |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 149 | return ret; |
| 150 | } |
| 151 | |
| 152 | /* |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 153 | * copy data from backing pages to netfs pages to complete a read operation |
| 154 | * - driven by FS-Cache's thread pool |
| 155 | */ |
| 156 | static void cachefiles_read_copier(struct fscache_operation *_op) |
| 157 | { |
| 158 | struct cachefiles_one_read *monitor; |
| 159 | struct cachefiles_object *object; |
| 160 | struct fscache_retrieval *op; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 161 | int error, max; |
| 162 | |
| 163 | op = container_of(_op, struct fscache_retrieval, op); |
| 164 | object = container_of(op->op.object, |
| 165 | struct cachefiles_object, fscache); |
| 166 | |
David Howells | 466b77b | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 167 | _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 168 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 169 | max = 8; |
| 170 | spin_lock_irq(&object->work_lock); |
| 171 | |
| 172 | while (!list_empty(&op->to_do)) { |
| 173 | monitor = list_entry(op->to_do.next, |
| 174 | struct cachefiles_one_read, op_link); |
| 175 | list_del(&monitor->op_link); |
| 176 | |
| 177 | spin_unlock_irq(&object->work_lock); |
| 178 | |
| 179 | _debug("- copy {%lu}", monitor->back_page->index); |
| 180 | |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 181 | recheck: |
David Howells | 9dc8d9b | 2012-12-20 21:52:36 +0000 | [diff] [blame] | 182 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, |
| 183 | &object->fscache.cookie->flags)) { |
| 184 | error = -ESTALE; |
| 185 | } else if (PageUptodate(monitor->back_page)) { |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 186 | copy_highpage(monitor->netfs_page, monitor->back_page); |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 187 | fscache_mark_page_cached(monitor->op, |
| 188 | monitor->netfs_page); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 189 | error = 0; |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 190 | } else if (!PageError(monitor->back_page)) { |
| 191 | /* the page has probably been truncated */ |
| 192 | error = cachefiles_read_reissue(object, monitor); |
| 193 | if (error == -EINPROGRESS) |
| 194 | goto next; |
| 195 | goto recheck; |
| 196 | } else { |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 197 | cachefiles_io_error_obj( |
| 198 | object, |
| 199 | "Readpage failed on backing file %lx", |
| 200 | (unsigned long) monitor->back_page->flags); |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 201 | error = -EIO; |
| 202 | } |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 203 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 204 | put_page(monitor->back_page); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 205 | |
| 206 | fscache_end_io(op, monitor->netfs_page, error); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 207 | put_page(monitor->netfs_page); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 208 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 209 | fscache_put_retrieval(op); |
| 210 | kfree(monitor); |
| 211 | |
David Howells | 5e929b3 | 2009-11-19 18:11:55 +0000 | [diff] [blame] | 212 | next: |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 213 | /* let the thread pool have some air occasionally */ |
| 214 | max--; |
| 215 | if (max < 0 || need_resched()) { |
| 216 | if (!list_empty(&op->to_do)) |
| 217 | fscache_enqueue_retrieval(op); |
| 218 | _leave(" [maxed out]"); |
| 219 | return; |
| 220 | } |
| 221 | |
| 222 | spin_lock_irq(&object->work_lock); |
| 223 | } |
| 224 | |
| 225 | spin_unlock_irq(&object->work_lock); |
| 226 | _leave(""); |
| 227 | } |
| 228 | |
| 229 | /* |
| 230 | * read the corresponding page to the given set from the backing file |
| 231 | * - an uncertain page is simply discarded, to be tried again another time |
| 232 | */ |
| 233 | static int cachefiles_read_backing_file_one(struct cachefiles_object *object, |
| 234 | struct fscache_retrieval *op, |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 235 | struct page *netpage) |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 236 | { |
| 237 | struct cachefiles_one_read *monitor; |
| 238 | struct address_space *bmapping; |
| 239 | struct page *newpage, *backpage; |
| 240 | int ret; |
| 241 | |
| 242 | _enter(""); |
| 243 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 244 | _debug("read back %p{%lu,%d}", |
| 245 | netpage, netpage->index, page_count(netpage)); |
| 246 | |
David Howells | 5f4f9f4 | 2012-12-20 21:52:33 +0000 | [diff] [blame] | 247 | monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 248 | if (!monitor) |
| 249 | goto nomem; |
| 250 | |
| 251 | monitor->netfs_page = netpage; |
| 252 | monitor->op = fscache_get_retrieval(op); |
| 253 | |
| 254 | init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter); |
| 255 | |
| 256 | /* attempt to get hold of the backing page */ |
David Howells | 466b77b | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 257 | bmapping = d_backing_inode(object->backer)->i_mapping; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 258 | newpage = NULL; |
| 259 | |
| 260 | for (;;) { |
| 261 | backpage = find_get_page(bmapping, netpage->index); |
| 262 | if (backpage) |
| 263 | goto backing_page_already_present; |
| 264 | |
| 265 | if (!newpage) { |
Mel Gorman | 453f85d | 2017-11-15 17:38:03 -0800 | [diff] [blame] | 266 | newpage = __page_cache_alloc(cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 267 | if (!newpage) |
| 268 | goto nomem_monitor; |
| 269 | } |
| 270 | |
Johannes Weiner | 55881bc | 2014-04-03 14:47:36 -0700 | [diff] [blame] | 271 | ret = add_to_page_cache_lru(newpage, bmapping, |
| 272 | netpage->index, cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 273 | if (ret == 0) |
| 274 | goto installed_new_backing_page; |
| 275 | if (ret != -EEXIST) |
| 276 | goto nomem_page; |
| 277 | } |
| 278 | |
Johannes Weiner | 55881bc | 2014-04-03 14:47:36 -0700 | [diff] [blame] | 279 | /* we've installed a new backing page, so now we need to start |
| 280 | * it reading */ |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 281 | installed_new_backing_page: |
| 282 | _debug("- new %p", newpage); |
| 283 | |
| 284 | backpage = newpage; |
| 285 | newpage = NULL; |
| 286 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 287 | read_backing_page: |
| 288 | ret = bmapping->a_ops->readpage(NULL, backpage); |
| 289 | if (ret < 0) |
| 290 | goto read_error; |
| 291 | |
| 292 | /* set the monitor to transfer the data across */ |
| 293 | monitor_backing_page: |
| 294 | _debug("- monitor add"); |
| 295 | |
| 296 | /* install the monitor */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 297 | get_page(monitor->netfs_page); |
| 298 | get_page(backpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 299 | monitor->back_page = backpage; |
| 300 | monitor->monitor.private = backpage; |
| 301 | add_page_wait_queue(backpage, &monitor->monitor); |
| 302 | monitor = NULL; |
| 303 | |
| 304 | /* but the page may have been read before the monitor was installed, so |
| 305 | * the monitor may miss the event - so we have to ensure that we do get |
| 306 | * one in such a case */ |
| 307 | if (trylock_page(backpage)) { |
| 308 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); |
| 309 | unlock_page(backpage); |
| 310 | } |
| 311 | goto success; |
| 312 | |
| 313 | /* if the backing page is already present, it can be in one of |
| 314 | * three states: read in progress, read failed or read okay */ |
| 315 | backing_page_already_present: |
| 316 | _debug("- present"); |
| 317 | |
| 318 | if (newpage) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 319 | put_page(newpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 320 | newpage = NULL; |
| 321 | } |
| 322 | |
| 323 | if (PageError(backpage)) |
| 324 | goto io_error; |
| 325 | |
| 326 | if (PageUptodate(backpage)) |
| 327 | goto backing_page_already_uptodate; |
| 328 | |
| 329 | if (!trylock_page(backpage)) |
| 330 | goto monitor_backing_page; |
| 331 | _debug("read %p {%lx}", backpage, backpage->flags); |
| 332 | goto read_backing_page; |
| 333 | |
| 334 | /* the backing page is already up to date, attach the netfs |
| 335 | * page to the pagecache and LRU and copy the data across */ |
| 336 | backing_page_already_uptodate: |
| 337 | _debug("- uptodate"); |
| 338 | |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 339 | fscache_mark_page_cached(op, netpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 340 | |
| 341 | copy_highpage(netpage, backpage); |
| 342 | fscache_end_io(op, netpage, 0); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 343 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 344 | |
| 345 | success: |
| 346 | _debug("success"); |
| 347 | ret = 0; |
| 348 | |
| 349 | out: |
| 350 | if (backpage) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 351 | put_page(backpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 352 | if (monitor) { |
| 353 | fscache_put_retrieval(monitor->op); |
| 354 | kfree(monitor); |
| 355 | } |
| 356 | _leave(" = %d", ret); |
| 357 | return ret; |
| 358 | |
| 359 | read_error: |
| 360 | _debug("read error %d", ret); |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 361 | if (ret == -ENOMEM) { |
| 362 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 363 | goto out; |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 364 | } |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 365 | io_error: |
| 366 | cachefiles_io_error_obj(object, "Page read error on backing file"); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 367 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 368 | ret = -ENOBUFS; |
| 369 | goto out; |
| 370 | |
| 371 | nomem_page: |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 372 | put_page(newpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 373 | nomem_monitor: |
| 374 | fscache_put_retrieval(monitor->op); |
| 375 | kfree(monitor); |
| 376 | nomem: |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 377 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 378 | _leave(" = -ENOMEM"); |
| 379 | return -ENOMEM; |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * read a page from the cache or allocate a block in which to store it |
| 384 | * - cache withdrawal is prevented by the caller |
| 385 | * - returns -EINTR if interrupted |
| 386 | * - returns -ENOMEM if ran out of memory |
| 387 | * - returns -ENOBUFS if no buffers can be made available |
| 388 | * - returns -ENOBUFS if page is beyond EOF |
| 389 | * - if the page is backed by a block in the cache: |
| 390 | * - a read will be started which will call the callback on completion |
| 391 | * - 0 will be returned |
| 392 | * - else if the page is unbacked: |
| 393 | * - the metadata will be retained |
| 394 | * - -ENODATA will be returned |
| 395 | */ |
| 396 | int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, |
| 397 | struct page *page, |
| 398 | gfp_t gfp) |
| 399 | { |
| 400 | struct cachefiles_object *object; |
| 401 | struct cachefiles_cache *cache; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 402 | struct inode *inode; |
| 403 | sector_t block0, block; |
| 404 | unsigned shift; |
| 405 | int ret; |
| 406 | |
| 407 | object = container_of(op->op.object, |
| 408 | struct cachefiles_object, fscache); |
| 409 | cache = container_of(object->fscache.cache, |
| 410 | struct cachefiles_cache, cache); |
| 411 | |
| 412 | _enter("{%p},{%lx},,,", object, page->index); |
| 413 | |
| 414 | if (!object->backer) |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 415 | goto enobufs; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 416 | |
David Howells | 466b77b | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 417 | inode = d_backing_inode(object->backer); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 418 | ASSERT(S_ISREG(inode->i_mode)); |
| 419 | ASSERT(inode->i_mapping->a_ops->bmap); |
| 420 | ASSERT(inode->i_mapping->a_ops->readpages); |
| 421 | |
| 422 | /* calculate the shift required to use bmap */ |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 423 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; |
| 424 | |
David Howells | 4fbf429 | 2009-11-19 18:11:04 +0000 | [diff] [blame] | 425 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
Tejun Heo | 8af7c12 | 2010-07-20 22:09:01 +0200 | [diff] [blame] | 426 | op->op.flags |= FSCACHE_OP_ASYNC; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 427 | op->op.processor = cachefiles_read_copier; |
| 428 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 429 | /* we assume the absence or presence of the first block is a good |
| 430 | * enough indication for the page as a whole |
| 431 | * - TODO: don't use bmap() for this as it is _not_ actually good |
| 432 | * enough for this as it doesn't indicate errors, but it's all we've |
| 433 | * got for the moment |
| 434 | */ |
| 435 | block0 = page->index; |
| 436 | block0 <<= shift; |
| 437 | |
| 438 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0); |
| 439 | _debug("%llx -> %llx", |
| 440 | (unsigned long long) block0, |
| 441 | (unsigned long long) block); |
| 442 | |
| 443 | if (block) { |
| 444 | /* submit the apparently valid page to the backing fs to be |
| 445 | * read from disk */ |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 446 | ret = cachefiles_read_backing_file_one(object, op, page); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 447 | } else if (cachefiles_has_space(cache, 0, 1) == 0) { |
| 448 | /* there's space in the cache we can use */ |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 449 | fscache_mark_page_cached(op, page); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 450 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 451 | ret = -ENODATA; |
| 452 | } else { |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 453 | goto enobufs; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 454 | } |
| 455 | |
| 456 | _leave(" = %d", ret); |
| 457 | return ret; |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 458 | |
| 459 | enobufs: |
| 460 | fscache_retrieval_complete(op, 1); |
| 461 | _leave(" = -ENOBUFS"); |
| 462 | return -ENOBUFS; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | /* |
| 466 | * read the corresponding pages to the given set from the backing file |
| 467 | * - any uncertain pages are simply discarded, to be tried again another time |
| 468 | */ |
| 469 | static int cachefiles_read_backing_file(struct cachefiles_object *object, |
| 470 | struct fscache_retrieval *op, |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 471 | struct list_head *list) |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 472 | { |
| 473 | struct cachefiles_one_read *monitor = NULL; |
David Howells | 466b77b | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 474 | struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 475 | struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; |
| 476 | int ret = 0; |
| 477 | |
| 478 | _enter(""); |
| 479 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 480 | list_for_each_entry_safe(netpage, _n, list, lru) { |
| 481 | list_del(&netpage->lru); |
| 482 | |
| 483 | _debug("read back %p{%lu,%d}", |
| 484 | netpage, netpage->index, page_count(netpage)); |
| 485 | |
| 486 | if (!monitor) { |
David Howells | 5f4f9f4 | 2012-12-20 21:52:33 +0000 | [diff] [blame] | 487 | monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 488 | if (!monitor) |
| 489 | goto nomem; |
| 490 | |
| 491 | monitor->op = fscache_get_retrieval(op); |
| 492 | init_waitqueue_func_entry(&monitor->monitor, |
| 493 | cachefiles_read_waiter); |
| 494 | } |
| 495 | |
| 496 | for (;;) { |
| 497 | backpage = find_get_page(bmapping, netpage->index); |
| 498 | if (backpage) |
| 499 | goto backing_page_already_present; |
| 500 | |
| 501 | if (!newpage) { |
Mel Gorman | 453f85d | 2017-11-15 17:38:03 -0800 | [diff] [blame] | 502 | newpage = __page_cache_alloc(cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 503 | if (!newpage) |
| 504 | goto nomem; |
| 505 | } |
| 506 | |
Johannes Weiner | 55881bc | 2014-04-03 14:47:36 -0700 | [diff] [blame] | 507 | ret = add_to_page_cache_lru(newpage, bmapping, |
| 508 | netpage->index, |
| 509 | cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 510 | if (ret == 0) |
| 511 | goto installed_new_backing_page; |
| 512 | if (ret != -EEXIST) |
| 513 | goto nomem; |
| 514 | } |
| 515 | |
Johannes Weiner | 55881bc | 2014-04-03 14:47:36 -0700 | [diff] [blame] | 516 | /* we've installed a new backing page, so now we need |
| 517 | * to start it reading */ |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 518 | installed_new_backing_page: |
| 519 | _debug("- new %p", newpage); |
| 520 | |
| 521 | backpage = newpage; |
| 522 | newpage = NULL; |
| 523 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 524 | reread_backing_page: |
| 525 | ret = bmapping->a_ops->readpage(NULL, backpage); |
| 526 | if (ret < 0) |
| 527 | goto read_error; |
| 528 | |
| 529 | /* add the netfs page to the pagecache and LRU, and set the |
| 530 | * monitor to transfer the data across */ |
| 531 | monitor_backing_page: |
| 532 | _debug("- monitor add"); |
| 533 | |
Johannes Weiner | 55881bc | 2014-04-03 14:47:36 -0700 | [diff] [blame] | 534 | ret = add_to_page_cache_lru(netpage, op->mapping, |
| 535 | netpage->index, cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 536 | if (ret < 0) { |
| 537 | if (ret == -EEXIST) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 538 | put_page(netpage); |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 539 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 540 | continue; |
| 541 | } |
| 542 | goto nomem; |
| 543 | } |
| 544 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 545 | /* install a monitor */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 546 | get_page(netpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 547 | monitor->netfs_page = netpage; |
| 548 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 549 | get_page(backpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 550 | monitor->back_page = backpage; |
| 551 | monitor->monitor.private = backpage; |
| 552 | add_page_wait_queue(backpage, &monitor->monitor); |
| 553 | monitor = NULL; |
| 554 | |
| 555 | /* but the page may have been read before the monitor was |
| 556 | * installed, so the monitor may miss the event - so we have to |
| 557 | * ensure that we do get one in such a case */ |
| 558 | if (trylock_page(backpage)) { |
| 559 | _debug("2unlock %p {%lx}", backpage, backpage->flags); |
| 560 | unlock_page(backpage); |
| 561 | } |
| 562 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 563 | put_page(backpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 564 | backpage = NULL; |
| 565 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 566 | put_page(netpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 567 | netpage = NULL; |
| 568 | continue; |
| 569 | |
| 570 | /* if the backing page is already present, it can be in one of |
| 571 | * three states: read in progress, read failed or read okay */ |
| 572 | backing_page_already_present: |
| 573 | _debug("- present %p", backpage); |
| 574 | |
| 575 | if (PageError(backpage)) |
| 576 | goto io_error; |
| 577 | |
| 578 | if (PageUptodate(backpage)) |
| 579 | goto backing_page_already_uptodate; |
| 580 | |
| 581 | _debug("- not ready %p{%lx}", backpage, backpage->flags); |
| 582 | |
| 583 | if (!trylock_page(backpage)) |
| 584 | goto monitor_backing_page; |
| 585 | |
| 586 | if (PageError(backpage)) { |
| 587 | _debug("error %lx", backpage->flags); |
| 588 | unlock_page(backpage); |
| 589 | goto io_error; |
| 590 | } |
| 591 | |
| 592 | if (PageUptodate(backpage)) |
| 593 | goto backing_page_already_uptodate_unlock; |
| 594 | |
| 595 | /* we've locked a page that's neither up to date nor erroneous, |
| 596 | * so we need to attempt to read it again */ |
| 597 | goto reread_backing_page; |
| 598 | |
| 599 | /* the backing page is already up to date, attach the netfs |
| 600 | * page to the pagecache and LRU and copy the data across */ |
| 601 | backing_page_already_uptodate_unlock: |
| 602 | _debug("uptodate %lx", backpage->flags); |
| 603 | unlock_page(backpage); |
| 604 | backing_page_already_uptodate: |
| 605 | _debug("- uptodate"); |
| 606 | |
Johannes Weiner | 55881bc | 2014-04-03 14:47:36 -0700 | [diff] [blame] | 607 | ret = add_to_page_cache_lru(netpage, op->mapping, |
| 608 | netpage->index, cachefiles_gfp); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 609 | if (ret < 0) { |
| 610 | if (ret == -EEXIST) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 611 | put_page(netpage); |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 612 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 613 | continue; |
| 614 | } |
| 615 | goto nomem; |
| 616 | } |
| 617 | |
| 618 | copy_highpage(netpage, backpage); |
| 619 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 620 | put_page(backpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 621 | backpage = NULL; |
| 622 | |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 623 | fscache_mark_page_cached(op, netpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 624 | |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 625 | /* the netpage is unlocked and marked up to date here */ |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 626 | fscache_end_io(op, netpage, 0); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 627 | put_page(netpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 628 | netpage = NULL; |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 629 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 630 | continue; |
| 631 | } |
| 632 | |
| 633 | netpage = NULL; |
| 634 | |
| 635 | _debug("out"); |
| 636 | |
| 637 | out: |
| 638 | /* tidy up */ |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 639 | if (newpage) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 640 | put_page(newpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 641 | if (netpage) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 642 | put_page(netpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 643 | if (backpage) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 644 | put_page(backpage); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 645 | if (monitor) { |
| 646 | fscache_put_retrieval(op); |
| 647 | kfree(monitor); |
| 648 | } |
| 649 | |
| 650 | list_for_each_entry_safe(netpage, _n, list, lru) { |
| 651 | list_del(&netpage->lru); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 652 | put_page(netpage); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 653 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | _leave(" = %d", ret); |
| 657 | return ret; |
| 658 | |
| 659 | nomem: |
| 660 | _debug("nomem"); |
| 661 | ret = -ENOMEM; |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 662 | goto record_page_complete; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 663 | |
| 664 | read_error: |
| 665 | _debug("read error %d", ret); |
| 666 | if (ret == -ENOMEM) |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 667 | goto record_page_complete; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 668 | io_error: |
| 669 | cachefiles_io_error_obj(object, "Page read error on backing file"); |
| 670 | ret = -ENOBUFS; |
David Howells | b4cf1e0 | 2012-12-05 13:34:45 +0000 | [diff] [blame] | 671 | record_page_complete: |
| 672 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 673 | goto out; |
| 674 | } |
| 675 | |
| 676 | /* |
| 677 | * read a list of pages from the cache or allocate blocks in which to store |
| 678 | * them |
| 679 | */ |
| 680 | int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, |
| 681 | struct list_head *pages, |
| 682 | unsigned *nr_pages, |
| 683 | gfp_t gfp) |
| 684 | { |
| 685 | struct cachefiles_object *object; |
| 686 | struct cachefiles_cache *cache; |
| 687 | struct list_head backpages; |
| 688 | struct pagevec pagevec; |
| 689 | struct inode *inode; |
| 690 | struct page *page, *_n; |
| 691 | unsigned shift, nrbackpages; |
| 692 | int ret, ret2, space; |
| 693 | |
| 694 | object = container_of(op->op.object, |
| 695 | struct cachefiles_object, fscache); |
| 696 | cache = container_of(object->fscache.cache, |
| 697 | struct cachefiles_cache, cache); |
| 698 | |
| 699 | _enter("{OBJ%x,%d},,%d,,", |
| 700 | object->fscache.debug_id, atomic_read(&op->op.usage), |
| 701 | *nr_pages); |
| 702 | |
| 703 | if (!object->backer) |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 704 | goto all_enobufs; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 705 | |
| 706 | space = 1; |
| 707 | if (cachefiles_has_space(cache, 0, *nr_pages) < 0) |
| 708 | space = 0; |
| 709 | |
David Howells | 466b77b | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 710 | inode = d_backing_inode(object->backer); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 711 | ASSERT(S_ISREG(inode->i_mode)); |
| 712 | ASSERT(inode->i_mapping->a_ops->bmap); |
| 713 | ASSERT(inode->i_mapping->a_ops->readpages); |
| 714 | |
| 715 | /* calculate the shift required to use bmap */ |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 716 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; |
| 717 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 718 | pagevec_init(&pagevec); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 719 | |
David Howells | 4fbf429 | 2009-11-19 18:11:04 +0000 | [diff] [blame] | 720 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
Tejun Heo | 8af7c12 | 2010-07-20 22:09:01 +0200 | [diff] [blame] | 721 | op->op.flags |= FSCACHE_OP_ASYNC; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 722 | op->op.processor = cachefiles_read_copier; |
| 723 | |
| 724 | INIT_LIST_HEAD(&backpages); |
| 725 | nrbackpages = 0; |
| 726 | |
| 727 | ret = space ? -ENODATA : -ENOBUFS; |
| 728 | list_for_each_entry_safe(page, _n, pages, lru) { |
| 729 | sector_t block0, block; |
| 730 | |
| 731 | /* we assume the absence or presence of the first block is a |
| 732 | * good enough indication for the page as a whole |
| 733 | * - TODO: don't use bmap() for this as it is _not_ actually |
| 734 | * good enough for this as it doesn't indicate errors, but |
| 735 | * it's all we've got for the moment |
| 736 | */ |
| 737 | block0 = page->index; |
| 738 | block0 <<= shift; |
| 739 | |
| 740 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, |
| 741 | block0); |
| 742 | _debug("%llx -> %llx", |
| 743 | (unsigned long long) block0, |
| 744 | (unsigned long long) block); |
| 745 | |
| 746 | if (block) { |
| 747 | /* we have data - add it to the list to give to the |
| 748 | * backing fs */ |
| 749 | list_move(&page->lru, &backpages); |
| 750 | (*nr_pages)--; |
| 751 | nrbackpages++; |
| 752 | } else if (space && pagevec_add(&pagevec, page) == 0) { |
| 753 | fscache_mark_pages_cached(op, &pagevec); |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 754 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 755 | ret = -ENODATA; |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 756 | } else { |
| 757 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 758 | } |
| 759 | } |
| 760 | |
| 761 | if (pagevec_count(&pagevec) > 0) |
| 762 | fscache_mark_pages_cached(op, &pagevec); |
| 763 | |
| 764 | if (list_empty(pages)) |
| 765 | ret = 0; |
| 766 | |
| 767 | /* submit the apparently valid pages to the backing fs to be read from |
| 768 | * disk */ |
| 769 | if (nrbackpages > 0) { |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 770 | ret2 = cachefiles_read_backing_file(object, op, &backpages); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 771 | if (ret2 == -ENOMEM || ret2 == -EINTR) |
| 772 | ret = ret2; |
| 773 | } |
| 774 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 775 | _leave(" = %d [nr=%u%s]", |
| 776 | ret, *nr_pages, list_empty(pages) ? " empty" : ""); |
| 777 | return ret; |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 778 | |
| 779 | all_enobufs: |
| 780 | fscache_retrieval_complete(op, *nr_pages); |
| 781 | return -ENOBUFS; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 782 | } |
| 783 | |
| 784 | /* |
| 785 | * allocate a block in the cache in which to store a page |
| 786 | * - cache withdrawal is prevented by the caller |
| 787 | * - returns -EINTR if interrupted |
| 788 | * - returns -ENOMEM if ran out of memory |
| 789 | * - returns -ENOBUFS if no buffers can be made available |
| 790 | * - returns -ENOBUFS if page is beyond EOF |
| 791 | * - otherwise: |
| 792 | * - the metadata will be retained |
| 793 | * - 0 will be returned |
| 794 | */ |
| 795 | int cachefiles_allocate_page(struct fscache_retrieval *op, |
| 796 | struct page *page, |
| 797 | gfp_t gfp) |
| 798 | { |
| 799 | struct cachefiles_object *object; |
| 800 | struct cachefiles_cache *cache; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 801 | int ret; |
| 802 | |
| 803 | object = container_of(op->op.object, |
| 804 | struct cachefiles_object, fscache); |
| 805 | cache = container_of(object->fscache.cache, |
| 806 | struct cachefiles_cache, cache); |
| 807 | |
| 808 | _enter("%p,{%lx},", object, page->index); |
| 809 | |
| 810 | ret = cachefiles_has_space(cache, 0, 1); |
David Howells | c4d6d8d | 2012-12-20 21:52:32 +0000 | [diff] [blame] | 811 | if (ret == 0) |
| 812 | fscache_mark_page_cached(op, page); |
| 813 | else |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 814 | ret = -ENOBUFS; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 815 | |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 816 | fscache_retrieval_complete(op, 1); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 817 | _leave(" = %d", ret); |
| 818 | return ret; |
| 819 | } |
| 820 | |
| 821 | /* |
| 822 | * allocate blocks in the cache in which to store a set of pages |
| 823 | * - cache withdrawal is prevented by the caller |
| 824 | * - returns -EINTR if interrupted |
| 825 | * - returns -ENOMEM if ran out of memory |
| 826 | * - returns -ENOBUFS if some buffers couldn't be made available |
| 827 | * - returns -ENOBUFS if some pages are beyond EOF |
| 828 | * - otherwise: |
| 829 | * - -ENODATA will be returned |
| 830 | * - metadata will be retained for any page marked |
| 831 | */ |
| 832 | int cachefiles_allocate_pages(struct fscache_retrieval *op, |
| 833 | struct list_head *pages, |
| 834 | unsigned *nr_pages, |
| 835 | gfp_t gfp) |
| 836 | { |
| 837 | struct cachefiles_object *object; |
| 838 | struct cachefiles_cache *cache; |
| 839 | struct pagevec pagevec; |
| 840 | struct page *page; |
| 841 | int ret; |
| 842 | |
| 843 | object = container_of(op->op.object, |
| 844 | struct cachefiles_object, fscache); |
| 845 | cache = container_of(object->fscache.cache, |
| 846 | struct cachefiles_cache, cache); |
| 847 | |
| 848 | _enter("%p,,,%d,", object, *nr_pages); |
| 849 | |
| 850 | ret = cachefiles_has_space(cache, 0, *nr_pages); |
| 851 | if (ret == 0) { |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 852 | pagevec_init(&pagevec); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 853 | |
| 854 | list_for_each_entry(page, pages, lru) { |
| 855 | if (pagevec_add(&pagevec, page) == 0) |
| 856 | fscache_mark_pages_cached(op, &pagevec); |
| 857 | } |
| 858 | |
| 859 | if (pagevec_count(&pagevec) > 0) |
| 860 | fscache_mark_pages_cached(op, &pagevec); |
| 861 | ret = -ENODATA; |
| 862 | } else { |
| 863 | ret = -ENOBUFS; |
| 864 | } |
| 865 | |
David Howells | 9f10523 | 2012-12-20 21:52:35 +0000 | [diff] [blame] | 866 | fscache_retrieval_complete(op, *nr_pages); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 867 | _leave(" = %d", ret); |
| 868 | return ret; |
| 869 | } |
| 870 | |
| 871 | /* |
| 872 | * request a page be stored in the cache |
| 873 | * - cache withdrawal is prevented by the caller |
| 874 | * - this request may be ignored if there's no cache block available, in which |
| 875 | * case -ENOBUFS will be returned |
| 876 | * - if the op is in progress, 0 will be returned |
| 877 | */ |
| 878 | int cachefiles_write_page(struct fscache_storage *op, struct page *page) |
| 879 | { |
| 880 | struct cachefiles_object *object; |
| 881 | struct cachefiles_cache *cache; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 882 | struct file *file; |
Al Viro | 765927b | 2012-06-26 21:58:53 +0400 | [diff] [blame] | 883 | struct path path; |
David Howells | a17754f | 2009-11-19 18:11:52 +0000 | [diff] [blame] | 884 | loff_t pos, eof; |
| 885 | size_t len; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 886 | void *data; |
Geert Uytterhoeven | cf89752 | 2015-11-12 11:46:23 +0000 | [diff] [blame] | 887 | int ret = -ENOBUFS; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 888 | |
| 889 | ASSERT(op != NULL); |
| 890 | ASSERT(page != NULL); |
| 891 | |
| 892 | object = container_of(op->op.object, |
| 893 | struct cachefiles_object, fscache); |
| 894 | |
| 895 | _enter("%p,%p{%lx},,,", object, page, page->index); |
| 896 | |
| 897 | if (!object->backer) { |
| 898 | _leave(" = -ENOBUFS"); |
| 899 | return -ENOBUFS; |
| 900 | } |
| 901 | |
David Howells | ce40fa7 | 2015-01-29 12:02:36 +0000 | [diff] [blame] | 902 | ASSERT(d_is_reg(object->backer)); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 903 | |
| 904 | cache = container_of(object->fscache.cache, |
| 905 | struct cachefiles_cache, cache); |
| 906 | |
David Howells | 102f4d9 | 2015-11-04 15:20:42 +0000 | [diff] [blame] | 907 | pos = (loff_t)page->index << PAGE_SHIFT; |
| 908 | |
| 909 | /* We mustn't write more data than we have, so we have to beware of a |
| 910 | * partial page at EOF. |
| 911 | */ |
| 912 | eof = object->fscache.store_limit_l; |
| 913 | if (pos >= eof) |
| 914 | goto error; |
| 915 | |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 916 | /* write the page to the backing filesystem and let it store it in its |
| 917 | * own time */ |
Al Viro | 765927b | 2012-06-26 21:58:53 +0400 | [diff] [blame] | 918 | path.mnt = cache->mnt; |
| 919 | path.dentry = object->backer; |
Justin Lecher | 98c350c | 2012-07-30 14:42:53 -0700 | [diff] [blame] | 920 | file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 921 | if (IS_ERR(file)) { |
| 922 | ret = PTR_ERR(file); |
David Howells | 102f4d9 | 2015-11-04 15:20:42 +0000 | [diff] [blame] | 923 | goto error_2; |
| 924 | } |
David Howells | a17754f | 2009-11-19 18:11:52 +0000 | [diff] [blame] | 925 | |
David Howells | 102f4d9 | 2015-11-04 15:20:42 +0000 | [diff] [blame] | 926 | len = PAGE_SIZE; |
| 927 | if (eof & ~PAGE_MASK) { |
| 928 | if (eof - pos < PAGE_SIZE) { |
| 929 | _debug("cut short %llx to %llx", |
| 930 | pos, eof); |
| 931 | len = eof - pos; |
| 932 | ASSERTCMP(pos + len, ==, eof); |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 933 | } |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 934 | } |
| 935 | |
David Howells | 102f4d9 | 2015-11-04 15:20:42 +0000 | [diff] [blame] | 936 | data = kmap(page); |
| 937 | ret = __kernel_write(file, data, len, &pos); |
| 938 | kunmap(page); |
| 939 | fput(file); |
| 940 | if (ret != len) |
| 941 | goto error_eio; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 942 | |
David Howells | 102f4d9 | 2015-11-04 15:20:42 +0000 | [diff] [blame] | 943 | _leave(" = 0"); |
| 944 | return 0; |
| 945 | |
| 946 | error_eio: |
| 947 | ret = -EIO; |
| 948 | error_2: |
| 949 | if (ret == -EIO) |
| 950 | cachefiles_io_error_obj(object, |
| 951 | "Write page to backing file failed"); |
| 952 | error: |
| 953 | _leave(" = -ENOBUFS [%d]", ret); |
| 954 | return -ENOBUFS; |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 955 | } |
| 956 | |
| 957 | /* |
| 958 | * detach a backing block from a page |
| 959 | * - cache withdrawal is prevented by the caller |
| 960 | */ |
| 961 | void cachefiles_uncache_page(struct fscache_object *_object, struct page *page) |
David Howells | bfa3837 | 2018-04-04 13:41:26 +0100 | [diff] [blame] | 962 | __releases(&object->fscache.cookie->lock) |
David Howells | 9ae326a | 2009-04-03 16:42:41 +0100 | [diff] [blame] | 963 | { |
| 964 | struct cachefiles_object *object; |
| 965 | struct cachefiles_cache *cache; |
| 966 | |
| 967 | object = container_of(_object, struct cachefiles_object, fscache); |
| 968 | cache = container_of(object->fscache.cache, |
| 969 | struct cachefiles_cache, cache); |
| 970 | |
| 971 | _enter("%p,{%lu}", object, page->index); |
| 972 | |
| 973 | spin_unlock(&object->fscache.cookie->lock); |
| 974 | } |