blob: eccc16198f68363f738c89ce58c4c5880f3c1524 [file] [log] [blame]
David Howells31143d52007-05-09 02:33:46 -07001/* handling of writes to regular files and writing back to the server
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
David Howells4343d002017-11-02 15:27:52 +000011
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070012#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -070013#include <linux/slab.h>
14#include <linux/fs.h>
15#include <linux/pagemap.h>
16#include <linux/writeback.h>
17#include <linux/pagevec.h>
18#include "internal.h"
19
David Howells31143d52007-05-09 02:33:46 -070020/*
21 * mark a page as having been made dirty and thus needing writeback
22 */
23int afs_set_page_dirty(struct page *page)
24{
25 _enter("");
26 return __set_page_dirty_nobuffers(page);
27}
28
29/*
David Howells31143d52007-05-09 02:33:46 -070030 * partly or wholly fill a page that's under preparation for writing
31 */
32static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000033 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070034{
David Howells196ee9c2017-01-05 10:38:34 +000035 struct afs_read *req;
David Howells31143d52007-05-09 02:33:46 -070036 int ret;
37
Anton Blanchard5e7f2332011-06-13 22:31:12 +010038 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070039
David Howells196ee9c2017-01-05 10:38:34 +000040 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
41 GFP_KERNEL);
42 if (!req)
43 return -ENOMEM;
44
David Howellsf3ddee82018-04-06 14:17:25 +010045 refcount_set(&req->usage, 1);
David Howells196ee9c2017-01-05 10:38:34 +000046 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000047 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000048 req->nr_pages = 1;
David Howellsf3ddee82018-04-06 14:17:25 +010049 req->pages = req->array;
David Howells196ee9c2017-01-05 10:38:34 +000050 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000051 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000052
David Howellsd2ddc772017-11-02 15:27:50 +000053 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000054 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070055 if (ret < 0) {
56 if (ret == -ENOENT) {
57 _debug("got NOENT from server"
58 " - marking file deleted and stale");
59 set_bit(AFS_VNODE_DELETED, &vnode->flags);
60 ret = -ESTALE;
61 }
62 }
63
64 _leave(" = %d", ret);
65 return ret;
66}
67
68/*
David Howells31143d52007-05-09 02:33:46 -070069 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070070 */
Nick Piggin15b46502008-10-15 22:04:32 -070071int afs_write_begin(struct file *file, struct address_space *mapping,
72 loff_t pos, unsigned len, unsigned flags,
73 struct page **pagep, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070074{
Al Viro496ad9a2013-01-23 17:07:38 -050075 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070076 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000077 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000078 unsigned long priv;
79 unsigned f, from = pos & (PAGE_SIZE - 1);
80 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030081 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070082 int ret;
83
84 _enter("{%x:%u},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070085 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070086
David Howells4343d002017-11-02 15:27:52 +000087 /* We want to store information about how much of a page is altered in
88 * page->private.
89 */
90 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -070091
Nick Piggin54566b22009-01-04 12:00:53 -080092 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +000093 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -070094 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -070095
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030096 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +000097 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -070098 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +000099 unlock_page(page);
100 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700101 _leave(" = %d [prep]", ret);
102 return ret;
103 }
Nick Piggin15b46502008-10-15 22:04:32 -0700104 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700105 }
106
David Howells6d06b0d2017-03-16 16:27:48 +0000107 /* page won't leak in error case: it eventually gets cleaned off LRU */
108 *pagep = page;
109
David Howells31143d52007-05-09 02:33:46 -0700110try_again:
David Howells4343d002017-11-02 15:27:52 +0000111 /* See if this page is already partially written in a way that we can
112 * merge the new write with.
113 */
114 t = f = 0;
115 if (PagePrivate(page)) {
116 priv = page_private(page);
117 f = priv & AFS_PRIV_MAX;
118 t = priv >> AFS_PRIV_SHIFT;
119 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700120 }
121
David Howells4343d002017-11-02 15:27:52 +0000122 if (f != t) {
David Howells5a039c32017-11-18 00:13:30 +0000123 if (PageWriteback(page)) {
124 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
125 page->index, priv);
126 goto flush_conflicting_write;
127 }
David Howells4343d002017-11-02 15:27:52 +0000128 if (to < f || from > t)
129 goto flush_conflicting_write;
130 if (from < f)
131 f = from;
132 if (to > t)
133 t = to;
134 } else {
135 f = from;
136 t = to;
David Howells31143d52007-05-09 02:33:46 -0700137 }
138
David Howells4343d002017-11-02 15:27:52 +0000139 priv = (unsigned long)t << AFS_PRIV_SHIFT;
140 priv |= f;
David Howells13524ab2017-11-02 15:27:53 +0000141 trace_afs_page_dirty(vnode, tracepoint_string("begin"),
142 page->index, priv);
David Howells31143d52007-05-09 02:33:46 -0700143 SetPagePrivate(page);
David Howells4343d002017-11-02 15:27:52 +0000144 set_page_private(page, priv);
145 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700146 return 0;
147
David Howells4343d002017-11-02 15:27:52 +0000148 /* The previous write and this write aren't adjacent or overlapping, so
149 * flush the page out.
150 */
151flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700152 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000153 ret = write_one_page(page);
154 if (ret < 0) {
155 _leave(" = %d", ret);
156 return ret;
David Howells31143d52007-05-09 02:33:46 -0700157 }
158
David Howells4343d002017-11-02 15:27:52 +0000159 ret = lock_page_killable(page);
160 if (ret < 0) {
161 _leave(" = %d", ret);
162 return ret;
163 }
David Howells31143d52007-05-09 02:33:46 -0700164 goto try_again;
165}
166
167/*
168 * finalise part of a write to a page
169 */
Nick Piggin15b46502008-10-15 22:04:32 -0700170int afs_write_end(struct file *file, struct address_space *mapping,
171 loff_t pos, unsigned len, unsigned copied,
172 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700173{
Al Viro496ad9a2013-01-23 17:07:38 -0500174 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000175 struct key *key = afs_file_key(file);
David Howells31143d52007-05-09 02:33:46 -0700176 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000177 int ret;
David Howells31143d52007-05-09 02:33:46 -0700178
Nick Piggin15b46502008-10-15 22:04:32 -0700179 _enter("{%x:%u},{%lx}",
180 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700181
Nick Piggin15b46502008-10-15 22:04:32 -0700182 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700183
184 i_size = i_size_read(&vnode->vfs_inode);
185 if (maybe_i_size > i_size) {
David Howells4343d002017-11-02 15:27:52 +0000186 spin_lock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700187 i_size = i_size_read(&vnode->vfs_inode);
188 if (maybe_i_size > i_size)
189 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells4343d002017-11-02 15:27:52 +0000190 spin_unlock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700191 }
192
David Howellse8e581a2017-03-16 16:27:44 +0000193 if (!PageUptodate(page)) {
194 if (copied < len) {
195 /* Try and load any missing data from the server. The
196 * unmarshalling routine will take care of clearing any
197 * bits that are beyond the EOF.
198 */
199 ret = afs_fill_page(vnode, key, pos + copied,
200 len - copied, page);
201 if (ret < 0)
David Howellsafae4572018-01-02 10:02:19 +0000202 goto out;
David Howellse8e581a2017-03-16 16:27:44 +0000203 }
204 SetPageUptodate(page);
205 }
206
David Howells31143d52007-05-09 02:33:46 -0700207 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700208 if (PageDirty(page))
209 _debug("dirtied");
David Howellsafae4572018-01-02 10:02:19 +0000210 ret = copied;
211
212out:
Nick Piggin15b46502008-10-15 22:04:32 -0700213 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300214 put_page(page);
David Howellsafae4572018-01-02 10:02:19 +0000215 return ret;
David Howells31143d52007-05-09 02:33:46 -0700216}
217
218/*
219 * kill all the pages in the given range
220 */
David Howells4343d002017-11-02 15:27:52 +0000221static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700222 pgoff_t first, pgoff_t last)
223{
David Howells4343d002017-11-02 15:27:52 +0000224 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700225 struct pagevec pv;
226 unsigned count, loop;
227
228 _enter("{%x:%u},%lx-%lx",
229 vnode->fid.vid, vnode->fid.vnode, first, last);
230
Mel Gorman86679822017-11-15 17:37:52 -0800231 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700232
233 do {
234 _debug("kill %lx-%lx", first, last);
235
236 count = last - first + 1;
237 if (count > PAGEVEC_SIZE)
238 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000239 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700240 ASSERTCMP(pv.nr, ==, count);
241
242 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000243 struct page *page = pv.pages[loop];
244 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000245 SetPageError(page);
246 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000247 if (page->index >= first)
248 first = page->index + 1;
David Howells4343d002017-11-02 15:27:52 +0000249 lock_page(page);
250 generic_error_remove_page(mapping, page);
David Howells31143d52007-05-09 02:33:46 -0700251 }
252
253 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000254 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700255
256 _leave("");
257}
258
259/*
David Howells4343d002017-11-02 15:27:52 +0000260 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700261 */
David Howells4343d002017-11-02 15:27:52 +0000262static void afs_redirty_pages(struct writeback_control *wbc,
263 struct address_space *mapping,
264 pgoff_t first, pgoff_t last)
David Howells31143d52007-05-09 02:33:46 -0700265{
David Howells4343d002017-11-02 15:27:52 +0000266 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
267 struct pagevec pv;
268 unsigned count, loop;
269
270 _enter("{%x:%u},%lx-%lx",
271 vnode->fid.vid, vnode->fid.vnode, first, last);
272
Linus Torvalds487e2c92017-11-16 11:41:22 -0800273 pagevec_init(&pv);
David Howells4343d002017-11-02 15:27:52 +0000274
275 do {
276 _debug("redirty %lx-%lx", first, last);
277
278 count = last - first + 1;
279 if (count > PAGEVEC_SIZE)
280 count = PAGEVEC_SIZE;
281 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
282 ASSERTCMP(pv.nr, ==, count);
283
284 for (loop = 0; loop < count; loop++) {
285 struct page *page = pv.pages[loop];
286
287 redirty_page_for_writepage(wbc, page);
288 end_page_writeback(page);
David Howells31143d52007-05-09 02:33:46 -0700289 if (page->index >= first)
290 first = page->index + 1;
291 }
292
293 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000294 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700295
296 _leave("");
297}
298
299/*
David Howellsd2ddc772017-11-02 15:27:50 +0000300 * write to a file
301 */
David Howells4343d002017-11-02 15:27:52 +0000302static int afs_store_data(struct address_space *mapping,
303 pgoff_t first, pgoff_t last,
David Howellsd2ddc772017-11-02 15:27:50 +0000304 unsigned offset, unsigned to)
305{
David Howells4343d002017-11-02 15:27:52 +0000306 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellsd2ddc772017-11-02 15:27:50 +0000307 struct afs_fs_cursor fc;
David Howells4343d002017-11-02 15:27:52 +0000308 struct afs_wb_key *wbk = NULL;
309 struct list_head *p;
310 int ret = -ENOKEY, ret2;
David Howellsd2ddc772017-11-02 15:27:50 +0000311
David Howells4343d002017-11-02 15:27:52 +0000312 _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000313 vnode->volume->name,
314 vnode->fid.vid,
315 vnode->fid.vnode,
316 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000317 first, last, offset, to);
318
David Howells4343d002017-11-02 15:27:52 +0000319 spin_lock(&vnode->wb_lock);
320 p = vnode->wb_keys.next;
321
322 /* Iterate through the list looking for a valid key to use. */
323try_next_key:
324 while (p != &vnode->wb_keys) {
325 wbk = list_entry(p, struct afs_wb_key, vnode_link);
326 _debug("wbk %u", key_serial(wbk->key));
327 ret2 = key_validate(wbk->key);
328 if (ret2 == 0)
329 goto found_key;
330 if (ret == -ENOKEY)
331 ret = ret2;
332 p = p->next;
333 }
334
335 spin_unlock(&vnode->wb_lock);
336 afs_put_wb_key(wbk);
337 _leave(" = %d [no keys]", ret);
338 return ret;
339
340found_key:
341 refcount_inc(&wbk->usage);
342 spin_unlock(&vnode->wb_lock);
343
344 _debug("USE WB KEY %u", key_serial(wbk->key));
345
David Howellsd2ddc772017-11-02 15:27:50 +0000346 ret = -ERESTARTSYS;
David Howells4343d002017-11-02 15:27:52 +0000347 if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
David Howellsd2ddc772017-11-02 15:27:50 +0000348 while (afs_select_fileserver(&fc)) {
349 fc.cb_break = vnode->cb_break + vnode->cb_s_break;
David Howells4343d002017-11-02 15:27:52 +0000350 afs_fs_store_data(&fc, mapping, first, last, offset, to);
David Howellsd2ddc772017-11-02 15:27:50 +0000351 }
352
353 afs_check_for_remote_deletion(&fc, fc.vnode);
354 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
355 ret = afs_end_vnode_operation(&fc);
356 }
357
David Howells4343d002017-11-02 15:27:52 +0000358 switch (ret) {
David Howells76a5cb62018-04-06 14:17:26 +0100359 case 0:
360 afs_stat_v(vnode, n_stores);
361 atomic_long_add((last * PAGE_SIZE + to) -
362 (first * PAGE_SIZE + offset),
363 &afs_v2net(vnode)->n_store_bytes);
364 break;
David Howells4343d002017-11-02 15:27:52 +0000365 case -EACCES:
366 case -EPERM:
367 case -ENOKEY:
368 case -EKEYEXPIRED:
369 case -EKEYREJECTED:
370 case -EKEYREVOKED:
371 _debug("next");
372 spin_lock(&vnode->wb_lock);
373 p = wbk->vnode_link.next;
374 afs_put_wb_key(wbk);
375 goto try_next_key;
376 }
377
378 afs_put_wb_key(wbk);
David Howellsd2ddc772017-11-02 15:27:50 +0000379 _leave(" = %d", ret);
380 return ret;
381}
382
383/*
David Howells4343d002017-11-02 15:27:52 +0000384 * Synchronously write back the locked page and any subsequent non-locked dirty
385 * pages.
David Howells31143d52007-05-09 02:33:46 -0700386 */
David Howells4343d002017-11-02 15:27:52 +0000387static int afs_write_back_from_locked_page(struct address_space *mapping,
388 struct writeback_control *wbc,
389 struct page *primary_page,
390 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700391{
David Howells13524ab2017-11-02 15:27:53 +0000392 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700393 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000394 unsigned long count, priv;
395 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700396 pgoff_t start, first, last;
397 int loop, ret;
398
399 _enter(",%lx", primary_page->index);
400
401 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700402 if (test_set_page_writeback(primary_page))
403 BUG();
404
David Howells4343d002017-11-02 15:27:52 +0000405 /* Find all consecutive lockable dirty pages that have contiguous
406 * written regions, stopping when we find a page that is not
407 * immediately lockable, is not dirty or is missing, or we reach the
408 * end of the range.
409 */
David Howells31143d52007-05-09 02:33:46 -0700410 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000411 priv = page_private(primary_page);
412 offset = priv & AFS_PRIV_MAX;
413 to = priv >> AFS_PRIV_SHIFT;
David Howells13524ab2017-11-02 15:27:53 +0000414 trace_afs_page_dirty(vnode, tracepoint_string("store"),
415 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000416
417 WARN_ON(offset == to);
David Howells13524ab2017-11-02 15:27:53 +0000418 if (offset == to)
419 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
420 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000421
422 if (start >= final_page || to < PAGE_SIZE)
David Howells31143d52007-05-09 02:33:46 -0700423 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000424
David Howells31143d52007-05-09 02:33:46 -0700425 start++;
426 do {
427 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000428 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700429 if (n > ARRAY_SIZE(pages))
430 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000431 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700432 _debug("fgpc %u", n);
433 if (n == 0)
434 goto no_more;
435 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700436 do {
437 put_page(pages[--n]);
438 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700439 goto no_more;
440 }
441
442 for (loop = 0; loop < n; loop++) {
David Howells4343d002017-11-02 15:27:52 +0000443 if (to != PAGE_SIZE)
444 break;
David Howells31143d52007-05-09 02:33:46 -0700445 page = pages[loop];
David Howells4343d002017-11-02 15:27:52 +0000446 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700447 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200448 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700449 break;
David Howells4343d002017-11-02 15:27:52 +0000450 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700451 unlock_page(page);
452 break;
453 }
David Howells4343d002017-11-02 15:27:52 +0000454
455 priv = page_private(page);
456 f = priv & AFS_PRIV_MAX;
457 t = priv >> AFS_PRIV_SHIFT;
458 if (f != 0) {
459 unlock_page(page);
460 break;
461 }
462 to = t;
463
David Howells13524ab2017-11-02 15:27:53 +0000464 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
465 page->index, priv);
466
David Howells31143d52007-05-09 02:33:46 -0700467 if (!clear_page_dirty_for_io(page))
468 BUG();
469 if (test_set_page_writeback(page))
470 BUG();
471 unlock_page(page);
472 put_page(page);
473 }
474 count += loop;
475 if (loop < n) {
476 for (; loop < n; loop++)
477 put_page(pages[loop]);
478 goto no_more;
479 }
480
481 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000482 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700483
484no_more:
David Howells4343d002017-11-02 15:27:52 +0000485 /* We now have a contiguous set of dirty pages, each with writeback
486 * set; the first page is still locked at this point, but all the rest
487 * have been unlocked.
488 */
489 unlock_page(primary_page);
490
David Howells31143d52007-05-09 02:33:46 -0700491 first = primary_page->index;
492 last = first + count - 1;
493
David Howells31143d52007-05-09 02:33:46 -0700494 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
495
David Howells4343d002017-11-02 15:27:52 +0000496 ret = afs_store_data(mapping, first, last, offset, to);
497 switch (ret) {
498 case 0:
David Howells31143d52007-05-09 02:33:46 -0700499 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000500 break;
501
502 default:
503 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
504 /* Fall through */
505 case -EACCES:
506 case -EPERM:
507 case -ENOKEY:
508 case -EKEYEXPIRED:
509 case -EKEYREJECTED:
510 case -EKEYREVOKED:
511 afs_redirty_pages(wbc, mapping, first, last);
512 mapping_set_error(mapping, ret);
513 break;
514
515 case -EDQUOT:
516 case -ENOSPC:
517 afs_redirty_pages(wbc, mapping, first, last);
518 mapping_set_error(mapping, -ENOSPC);
519 break;
520
521 case -EROFS:
522 case -EIO:
523 case -EREMOTEIO:
524 case -EFBIG:
525 case -ENOENT:
526 case -ENOMEDIUM:
527 case -ENXIO:
528 afs_kill_pages(mapping, first, last);
529 mapping_set_error(mapping, ret);
530 break;
David Howells31143d52007-05-09 02:33:46 -0700531 }
532
533 _leave(" = %d", ret);
534 return ret;
535}
536
537/*
538 * write a page back to the server
539 * - the caller locked the page for us
540 */
541int afs_writepage(struct page *page, struct writeback_control *wbc)
542{
David Howells31143d52007-05-09 02:33:46 -0700543 int ret;
544
545 _enter("{%lx},", page->index);
546
David Howells4343d002017-11-02 15:27:52 +0000547 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
548 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700549 if (ret < 0) {
550 _leave(" = %d", ret);
551 return 0;
552 }
553
554 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700555
556 _leave(" = 0");
557 return 0;
558}
559
560/*
561 * write a region of pages back to the server
562 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700563static int afs_writepages_region(struct address_space *mapping,
564 struct writeback_control *wbc,
565 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700566{
David Howells31143d52007-05-09 02:33:46 -0700567 struct page *page;
568 int ret, n;
569
570 _enter(",,%lx,%lx,", index, end);
571
572 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800573 n = find_get_pages_range_tag(mapping, &index, end,
574 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700575 if (!n)
576 break;
577
578 _debug("wback %lx", page->index);
579
David Howells31143d52007-05-09 02:33:46 -0700580 /* at this point we hold neither mapping->tree_lock nor lock on
581 * the page itself: the page may be truncated or invalidated
582 * (changing page->mapping to NULL), or even swizzled back from
583 * swapper_space to tmpfs file mapping
584 */
David Howells4343d002017-11-02 15:27:52 +0000585 ret = lock_page_killable(page);
586 if (ret < 0) {
587 put_page(page);
588 _leave(" = %d", ret);
589 return ret;
590 }
David Howells31143d52007-05-09 02:33:46 -0700591
David Howellsc5051c72017-03-16 16:27:49 +0000592 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700593 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300594 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700595 continue;
596 }
597
David Howellsc5051c72017-03-16 16:27:49 +0000598 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700599 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000600 if (wbc->sync_mode != WB_SYNC_NONE)
601 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000602 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700603 continue;
604 }
605
David Howells65a15102017-03-16 16:27:49 +0000606 if (!clear_page_dirty_for_io(page))
607 BUG();
David Howells4343d002017-11-02 15:27:52 +0000608 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300609 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700610 if (ret < 0) {
611 _leave(" = %d", ret);
612 return ret;
613 }
614
615 wbc->nr_to_write -= ret;
616
David Howells31143d52007-05-09 02:33:46 -0700617 cond_resched();
618 } while (index < end && wbc->nr_to_write > 0);
619
620 *_next = index;
621 _leave(" = 0 [%lx]", *_next);
622 return 0;
623}
624
625/*
626 * write some of the pending data back to the server
627 */
628int afs_writepages(struct address_space *mapping,
629 struct writeback_control *wbc)
630{
David Howells31143d52007-05-09 02:33:46 -0700631 pgoff_t start, end, next;
632 int ret;
633
634 _enter("");
635
David Howells31143d52007-05-09 02:33:46 -0700636 if (wbc->range_cyclic) {
637 start = mapping->writeback_index;
638 end = -1;
639 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700640 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700641 ret = afs_writepages_region(mapping, wbc, 0, start,
642 &next);
643 mapping->writeback_index = next;
644 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300645 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700646 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
647 if (wbc->nr_to_write > 0)
648 mapping->writeback_index = next;
649 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300650 start = wbc->range_start >> PAGE_SHIFT;
651 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700652 ret = afs_writepages_region(mapping, wbc, start, end, &next);
653 }
654
655 _leave(" = %d", ret);
656 return ret;
657}
658
659/*
David Howells31143d52007-05-09 02:33:46 -0700660 * completion of write to server
661 */
662void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
663{
David Howells31143d52007-05-09 02:33:46 -0700664 struct pagevec pv;
David Howells13524ab2017-11-02 15:27:53 +0000665 unsigned long priv;
David Howells31143d52007-05-09 02:33:46 -0700666 unsigned count, loop;
667 pgoff_t first = call->first, last = call->last;
David Howells31143d52007-05-09 02:33:46 -0700668
669 _enter("{%x:%u},{%lx-%lx}",
670 vnode->fid.vid, vnode->fid.vnode, first, last);
671
Mel Gorman86679822017-11-15 17:37:52 -0800672 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700673
674 do {
David Howells5bbf5d32007-05-10 03:15:23 -0700675 _debug("done %lx-%lx", first, last);
David Howells31143d52007-05-09 02:33:46 -0700676
677 count = last - first + 1;
678 if (count > PAGEVEC_SIZE)
679 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000680 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
681 first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700682 ASSERTCMP(pv.nr, ==, count);
683
David Howells31143d52007-05-09 02:33:46 -0700684 for (loop = 0; loop < count; loop++) {
David Howells13524ab2017-11-02 15:27:53 +0000685 priv = page_private(pv.pages[loop]);
686 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
687 pv.pages[loop]->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000688 set_page_private(pv.pages[loop], 0);
689 end_page_writeback(pv.pages[loop]);
David Howells31143d52007-05-09 02:33:46 -0700690 }
David Howells31143d52007-05-09 02:33:46 -0700691 first += count;
David Howells31143d52007-05-09 02:33:46 -0700692 __pagevec_release(&pv);
David Howells5bbf5d32007-05-10 03:15:23 -0700693 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700694
David Howells4343d002017-11-02 15:27:52 +0000695 afs_prune_wb_keys(vnode);
David Howells31143d52007-05-09 02:33:46 -0700696 _leave("");
697}
698
699/*
700 * write to an AFS file
701 */
Al Viro50b55512014-04-03 14:13:46 -0400702ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700703{
Al Viro496ad9a2013-01-23 17:07:38 -0500704 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700705 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400706 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700707
Al Viro50b55512014-04-03 14:13:46 -0400708 _enter("{%x.%u},{%zu},",
709 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700710
711 if (IS_SWAPFILE(&vnode->vfs_inode)) {
712 printk(KERN_INFO
713 "AFS: Attempt to write to active swap file!\n");
714 return -EBUSY;
715 }
716
717 if (!count)
718 return 0;
719
Al Viro50b55512014-04-03 14:13:46 -0400720 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700721
David Howells31143d52007-05-09 02:33:46 -0700722 _leave(" = %zd", result);
723 return result;
724}
725
726/*
David Howells31143d52007-05-09 02:33:46 -0700727 * flush any dirty pages for this process, and check for write errors.
728 * - the return status from this call provides a reliable indication of
729 * whether any write errors occurred for this process.
730 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400731int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700732{
Al Viro3c981bf2013-09-03 13:37:45 -0400733 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400734 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700735
Al Viro3c981bf2013-09-03 13:37:45 -0400736 _enter("{%x:%u},{n=%pD},%d",
737 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700738 datasync);
739
David Howells4343d002017-11-02 15:27:52 +0000740 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700741}
David Howells9b3f26c2009-04-03 16:42:41 +0100742
743/*
David Howells58fed942017-03-16 16:27:45 +0000744 * Flush out all outstanding writes on a file opened for writing when it is
745 * closed.
746 */
747int afs_flush(struct file *file, fl_owner_t id)
748{
749 _enter("");
750
751 if ((file->f_mode & FMODE_WRITE) == 0)
752 return 0;
753
754 return vfs_fsync(file, 0);
755}
756
757/*
David Howells9b3f26c2009-04-03 16:42:41 +0100758 * notification that a previously read-only page is about to become writable
759 * - if it returns an error, the caller will deliver a bus error signal
760 */
David Howells1cf7a152017-11-02 15:27:52 +0000761int afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100762{
David Howells1cf7a152017-11-02 15:27:52 +0000763 struct file *file = vmf->vma->vm_file;
764 struct inode *inode = file_inode(file);
765 struct afs_vnode *vnode = AFS_FS_I(inode);
766 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100767
768 _enter("{{%x:%u}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000769 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100770
David Howells1cf7a152017-11-02 15:27:52 +0000771 sb_start_pagefault(inode->i_sb);
772
773 /* Wait for the page to be written to the cache before we allow it to
774 * be modified. We then assume the entire page will need writing back.
775 */
David Howells9b3f26c2009-04-03 16:42:41 +0100776#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000777 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100778#endif
779
David Howells1cf7a152017-11-02 15:27:52 +0000780 if (PageWriteback(vmf->page) &&
781 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
782 return VM_FAULT_RETRY;
783
784 if (lock_page_killable(vmf->page) < 0)
785 return VM_FAULT_RETRY;
786
787 /* We mustn't change page->private until writeback is complete as that
788 * details the portion of the page we need to write back and we might
789 * need to redirty the page if there's a problem.
790 */
791 wait_on_page_writeback(vmf->page);
792
793 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
794 priv |= 0; /* From */
David Howells13524ab2017-11-02 15:27:53 +0000795 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
796 vmf->page->index, priv);
David Howells1cf7a152017-11-02 15:27:52 +0000797 SetPagePrivate(vmf->page);
798 set_page_private(vmf->page, priv);
799
800 sb_end_pagefault(inode->i_sb);
801 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100802}
David Howells4343d002017-11-02 15:27:52 +0000803
804/*
805 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
806 */
807void afs_prune_wb_keys(struct afs_vnode *vnode)
808{
809 LIST_HEAD(graveyard);
810 struct afs_wb_key *wbk, *tmp;
811
812 /* Discard unused keys */
813 spin_lock(&vnode->wb_lock);
814
815 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
816 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
817 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
818 if (refcount_read(&wbk->usage) == 1)
819 list_move(&wbk->vnode_link, &graveyard);
820 }
821 }
822
823 spin_unlock(&vnode->wb_lock);
824
825 while (!list_empty(&graveyard)) {
826 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
827 list_del(&wbk->vnode_link);
828 afs_put_wb_key(wbk);
829 }
830}
831
832/*
833 * Clean up a page during invalidation.
834 */
835int afs_launder_page(struct page *page)
836{
837 struct address_space *mapping = page->mapping;
838 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
839 unsigned long priv;
840 unsigned int f, t;
841 int ret = 0;
842
843 _enter("{%lx}", page->index);
844
845 priv = page_private(page);
846 if (clear_page_dirty_for_io(page)) {
847 f = 0;
848 t = PAGE_SIZE;
849 if (PagePrivate(page)) {
850 f = priv & AFS_PRIV_MAX;
851 t = priv >> AFS_PRIV_SHIFT;
852 }
853
David Howells13524ab2017-11-02 15:27:53 +0000854 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
855 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000856 ret = afs_store_data(mapping, page->index, page->index, t, f);
857 }
858
David Howells13524ab2017-11-02 15:27:53 +0000859 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
860 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000861 set_page_private(page, 0);
862 ClearPagePrivate(page);
863
864#ifdef CONFIG_AFS_FSCACHE
865 if (PageFsCache(page)) {
866 fscache_wait_on_page_write(vnode->cache, page);
867 fscache_uncache_page(vnode->cache, page);
868 }
869#endif
870 return ret;
David Howells31143d52007-05-09 02:33:46 -0700871}