blob: 38e403a9e543c794e68fafe245e7a2270ad2e4d7 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson7eabb772008-01-28 11:24:35 -06003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
Steven Whitehousefd88de562006-05-05 16:59:11 -040016#include <linux/pagevec.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000017#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000018#include <linux/fs.h>
Steven Whitehousea8d638e2007-01-15 13:52:17 +000019#include <linux/writeback.h>
Steven Whitehouse7765ec22007-10-16 01:25:07 -070020#include <linux/swap.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050021#include <linux/gfs2_ondisk.h>
Steven Whitehouse47e83b52007-10-18 11:15:50 +010022#include <linux/backing-dev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080023#include <linux/uio.h>
Steven Whitehouse774016b2014-02-06 15:47:47 +000024#include <trace/events/writeback.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000025
26#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050027#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000028#include "bmap.h"
29#include "glock.h"
30#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "log.h"
32#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000033#include "quota.h"
34#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000035#include "rgrp.h"
Robert Petersoncd81a4b2007-05-14 12:42:18 -050036#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050037#include "util.h"
Steven Whitehouse4340fe62006-07-11 09:46:33 -040038#include "glops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000039
Steven Whitehouseba7f7292006-07-26 11:27:10 -040040
Bob Petersonb1201932012-04-11 12:58:07 -040041static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
42 unsigned int from, unsigned int to)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040043{
44 struct buffer_head *head = page_buffers(page);
45 unsigned int bsize = head->b_size;
46 struct buffer_head *bh;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from || start >= to)
53 continue;
Benjamin Marzinskiddf4b422007-06-01 14:21:38 -050054 if (gfs2_is_jdata(ip))
55 set_buffer_uptodate(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +000056 gfs2_trans_add_data(ip->i_gl, bh);
Steven Whitehouseba7f7292006-07-26 11:27:10 -040057 }
58}
59
David Teiglandb3b94fa2006-01-16 16:50:04 +000060/**
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040061 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000062 * @inode: The inode
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
66 *
67 * Returns: errno
68 */
69
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040070static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
71 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000072{
David Teiglandb3b94fa2006-01-16 16:50:04 +000073 int error;
74
Bob Petersone9e1ef22007-12-10 14:13:27 -060075 error = gfs2_block_map(inode, lblock, bh_result, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000076 if (error)
77 return error;
Wendy Chengde986e82007-09-18 09:19:13 -040078 if (!buffer_mapped(bh_result))
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040079 return -EIO;
Steven Whitehouse623d9352006-08-31 12:14:44 -040080 return 0;
81}
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040082
83static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
84 struct buffer_head *bh_result, int create)
85{
Bob Petersone9e1ef22007-12-10 14:13:27 -060086 return gfs2_block_map(inode, lblock, bh_result, 0);
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040087}
88
David Teiglandb3b94fa2006-01-16 16:50:04 +000089/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010090 * gfs2_writepage_common - Common bits of writepage
91 * @page: The page to be written
92 * @wbc: The writeback control
David Teiglandb3b94fa2006-01-16 16:50:04 +000093 *
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010094 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
David Teiglandb3b94fa2006-01-16 16:50:04 +000095 */
96
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010097static int gfs2_writepage_common(struct page *page,
98 struct writeback_control *wbc)
David Teiglandb3b94fa2006-01-16 16:50:04 +000099{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000100 struct inode *inode = page->mapping->host;
Steven Whitehousef4387142006-08-08 13:23:19 -0400101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000103 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300104 pgoff_t end_index = i_size >> PAGE_SHIFT;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000105 unsigned offset;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100106
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
108 goto out;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100109 if (current->journal_info)
110 goto redirty;
111 /* Is the page fully outside i_size? (truncate in progress) */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300112 offset = i_size & (PAGE_SIZE-1);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100113 if (page->index > end_index || (page->index == end_index && !offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100115 goto out;
116 }
117 return 1;
118redirty:
119 redirty_page_for_writepage(wbc, page);
120out:
121 unlock_page(page);
122 return 0;
123}
124
125/**
Steven Whitehouse9d358142013-08-27 21:22:07 +0100126 * gfs2_writepage - Write page for writeback mappings
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100127 * @page: The page
128 * @wbc: The writeback control
129 *
130 */
131
Steven Whitehouse9d358142013-08-27 21:22:07 +0100132static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100133{
134 int ret;
135
136 ret = gfs2_writepage_common(page, wbc);
137 if (ret <= 0)
138 return ret;
139
Steven Whitehouse30116ff2010-06-14 09:58:41 +0100140 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100141}
142
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500143/* This is the same as calling block_write_full_page, but it also
144 * writes pages outside of i_size
145 */
Andrew Pricec548a1c2017-02-03 08:23:47 -0500146static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
147 struct writeback_control *wbc)
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500148{
149 struct inode * const inode = page->mapping->host;
150 loff_t i_size = i_size_read(inode);
151 const pgoff_t end_index = i_size >> PAGE_SHIFT;
152 unsigned offset;
153
154 /*
155 * The page straddles i_size. It must be zeroed out on each and every
156 * writepage invocation because it may be mmapped. "A file is mapped
157 * in multiples of the page size. For a file that is not a multiple of
158 * the page size, the remaining memory is zeroed when mapped, and
159 * writes to that region are not written out to the file."
160 */
161 offset = i_size & (PAGE_SIZE-1);
162 if (page->index == end_index && offset)
163 zero_user_segment(page, offset, PAGE_SIZE);
164
165 return __block_write_full_page(inode, page, get_block, wbc,
166 end_buffer_async_write);
167}
168
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100169/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100170 * __gfs2_jdata_writepage - The core of jdata writepage
171 * @page: The page to write
172 * @wbc: The writeback control
173 *
174 * This is shared between writepage and writepages and implements the
175 * core of the writepage operation. If a transaction is required then
176 * PageChecked will have been set and the transaction will have
177 * already been started before this is called.
178 */
179
180static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
181{
182 struct inode *inode = page->mapping->host;
183 struct gfs2_inode *ip = GFS2_I(inode);
184 struct gfs2_sbd *sdp = GFS2_SB(inode);
185
186 if (PageChecked(page)) {
187 ClearPageChecked(page);
188 if (!page_has_buffers(page)) {
189 create_empty_buffers(page, inode->i_sb->s_blocksize,
Fabian Frederick47a9a522016-08-02 12:05:27 -0500190 BIT(BH_Dirty)|BIT(BH_Uptodate));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100191 }
192 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
193 }
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500194 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100195}
196
197/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100198 * gfs2_jdata_writepage - Write complete page
199 * @page: Page to write
Fabian Frederick12725742015-05-05 13:29:54 -0500200 * @wbc: The writeback control
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100201 *
202 * Returns: errno
203 *
204 */
205
206static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
207{
208 struct inode *inode = page->mapping->host;
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500209 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100210 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100211 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000212
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500213 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
214 goto out;
215 if (PageChecked(page) || current->journal_info)
216 goto out_ignore;
217 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100218 return ret;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000219
220out_ignore:
221 redirty_page_for_writepage(wbc, page);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500222out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000223 unlock_page(page);
224 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000225}
226
227/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000228 * gfs2_writepages - Write a bunch of dirty pages back to disk
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000229 * @mapping: The mapping to write
230 * @wbc: Write-back control
231 *
Steven Whitehouse45138992013-01-28 09:30:07 +0000232 * Used for both ordered and writeback modes.
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000233 */
Steven Whitehouse45138992013-01-28 09:30:07 +0000234static int gfs2_writepages(struct address_space *mapping,
235 struct writeback_control *wbc)
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000236{
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500237 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
238 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
239
240 /*
241 * Even if we didn't write any pages here, we might still be holding
242 * dirty pages in the ail. We forcibly flush the ail because we don't
243 * want balance_dirty_pages() to loop indefinitely trying to write out
244 * pages held in the ail that it can't find.
245 */
246 if (ret == 0)
247 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
248
249 return ret;
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000250}
251
252/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100253 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
254 * @mapping: The mapping
255 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100256 * @pvec: The vector of pages
257 * @nr_pages: The number of pages to write
Fabian Frederick12725742015-05-05 13:29:54 -0500258 * @done_index: Page index
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100259 *
260 * Returns: non-zero if loop should terminate, zero otherwise
261 */
262
263static int gfs2_write_jdata_pagevec(struct address_space *mapping,
264 struct writeback_control *wbc,
265 struct pagevec *pvec,
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600266 int nr_pages,
Steven Whitehouse774016b2014-02-06 15:47:47 +0000267 pgoff_t *done_index)
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100268{
269 struct inode *inode = mapping->host;
270 struct gfs2_sbd *sdp = GFS2_SB(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300271 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100272 int i;
273 int ret;
274
Abhijith Das20b95bf2008-03-06 17:43:52 -0600275 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100276 if (ret < 0)
277 return ret;
278
279 for(i = 0; i < nr_pages; i++) {
280 struct page *page = pvec->pages[i];
281
Steven Whitehouse774016b2014-02-06 15:47:47 +0000282 *done_index = page->index;
283
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100284 lock_page(page);
285
286 if (unlikely(page->mapping != mapping)) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000287continue_unlock:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100288 unlock_page(page);
289 continue;
290 }
291
Steven Whitehouse774016b2014-02-06 15:47:47 +0000292 if (!PageDirty(page)) {
293 /* someone wrote it for us */
294 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100295 }
296
Steven Whitehouse774016b2014-02-06 15:47:47 +0000297 if (PageWriteback(page)) {
298 if (wbc->sync_mode != WB_SYNC_NONE)
299 wait_on_page_writeback(page);
300 else
301 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100302 }
303
Steven Whitehouse774016b2014-02-06 15:47:47 +0000304 BUG_ON(PageWriteback(page));
305 if (!clear_page_dirty_for_io(page))
306 goto continue_unlock;
307
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100308 trace_wbc_writepage(wbc, inode_to_bdi(inode));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100309
310 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000311 if (unlikely(ret)) {
312 if (ret == AOP_WRITEPAGE_ACTIVATE) {
313 unlock_page(page);
314 ret = 0;
315 } else {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100316
Steven Whitehouse774016b2014-02-06 15:47:47 +0000317 /*
318 * done_index is set past this page,
319 * so media errors will not choke
320 * background writeout for the entire
321 * file. This has consequences for
322 * range_cyclic semantics (ie. it may
323 * not be suitable for data integrity
324 * writeout).
325 */
326 *done_index = page->index + 1;
327 ret = 1;
328 break;
329 }
330 }
331
332 /*
333 * We stop writing back only if we are not doing
334 * integrity sync. In case of integrity sync we have to
335 * keep going until we have written all the pages
336 * we tagged for writeback prior to entering this loop.
337 */
338 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100339 ret = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000340 break;
341 }
342
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100343 }
344 gfs2_trans_end(sdp);
345 return ret;
346}
347
348/**
349 * gfs2_write_cache_jdata - Like write_cache_pages but different
350 * @mapping: The mapping to write
351 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100352 *
353 * The reason that we use our own function here is that we need to
354 * start transactions before we grab page locks. This allows us
355 * to get the ordering right.
356 */
357
358static int gfs2_write_cache_jdata(struct address_space *mapping,
359 struct writeback_control *wbc)
360{
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100361 int ret = 0;
362 int done = 0;
363 struct pagevec pvec;
364 int nr_pages;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000365 pgoff_t uninitialized_var(writeback_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100366 pgoff_t index;
367 pgoff_t end;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000368 pgoff_t done_index;
369 int cycled;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100370 int range_whole = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000371 int tag;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100372
Mel Gorman86679822017-11-15 17:37:52 -0800373 pagevec_init(&pvec);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100374 if (wbc->range_cyclic) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000375 writeback_index = mapping->writeback_index; /* prev offset */
376 index = writeback_index;
377 if (index == 0)
378 cycled = 1;
379 else
380 cycled = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100381 end = -1;
382 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300383 index = wbc->range_start >> PAGE_SHIFT;
384 end = wbc->range_end >> PAGE_SHIFT;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100385 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
386 range_whole = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000387 cycled = 1; /* ignore range_cyclic tests */
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100388 }
Steven Whitehouse774016b2014-02-06 15:47:47 +0000389 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
390 tag = PAGECACHE_TAG_TOWRITE;
391 else
392 tag = PAGECACHE_TAG_DIRTY;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100393
394retry:
Steven Whitehouse774016b2014-02-06 15:47:47 +0000395 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
396 tag_pages_for_writeback(mapping, index, end);
397 done_index = index;
398 while (!done && (index <= end)) {
Jan Karad2bc5b32017-11-15 17:34:58 -0800399 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -0800400 tag);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000401 if (nr_pages == 0)
402 break;
403
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600404 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100405 if (ret)
406 done = 1;
407 if (ret > 0)
408 ret = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100409 pagevec_release(&pvec);
410 cond_resched();
411 }
412
Steven Whitehouse774016b2014-02-06 15:47:47 +0000413 if (!cycled && !done) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100414 /*
Steven Whitehouse774016b2014-02-06 15:47:47 +0000415 * range_cyclic:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100416 * We hit the last page and there is more work to be done: wrap
417 * back to the start of the file
418 */
Steven Whitehouse774016b2014-02-06 15:47:47 +0000419 cycled = 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100420 index = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000421 end = writeback_index - 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100422 goto retry;
423 }
424
425 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steven Whitehouse774016b2014-02-06 15:47:47 +0000426 mapping->writeback_index = done_index;
427
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100428 return ret;
429}
430
431
432/**
433 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
434 * @mapping: The mapping to write
435 * @wbc: The writeback control
436 *
437 */
438
439static int gfs2_jdata_writepages(struct address_space *mapping,
440 struct writeback_control *wbc)
441{
442 struct gfs2_inode *ip = GFS2_I(mapping->host);
443 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
444 int ret;
445
446 ret = gfs2_write_cache_jdata(mapping, wbc);
447 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
Benjamin Marzinski24972552014-05-01 22:26:55 -0500448 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100449 ret = gfs2_write_cache_jdata(mapping, wbc);
450 }
451 return ret;
452}
453
454/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000455 * stuffed_readpage - Fill in a Linux page with stuffed file data
456 * @ip: the inode
457 * @page: the page
458 *
459 * Returns: errno
460 */
461
462static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
463{
464 struct buffer_head *dibh;
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000465 u64 dsize = i_size_read(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000466 void *kaddr;
467 int error;
468
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100469 /*
Nick Piggin3c18ddd2008-04-28 02:12:10 -0700470 * Due to the order of unstuffing files and ->fault(), we can be
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100471 * asked for a zero page in the case of a stuffed file being extended,
472 * so we need to supply one here. It doesn't happen often.
473 */
474 if (unlikely(page->index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300475 zero_user(page, 0, PAGE_SIZE);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600476 SetPageUptodate(page);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100477 return 0;
478 }
Steven Whitehousefd88de562006-05-05 16:59:11 -0400479
David Teiglandb3b94fa2006-01-16 16:50:04 +0000480 error = gfs2_meta_inode_buffer(ip, &dibh);
481 if (error)
482 return error;
483
Cong Wangd9349282011-11-25 23:14:30 +0800484 kaddr = kmap_atomic(page);
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100485 if (dsize > gfs2_max_stuffed_size(ip))
486 dsize = gfs2_max_stuffed_size(ip);
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000487 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300488 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
Cong Wangd9349282011-11-25 23:14:30 +0800489 kunmap_atomic(kaddr);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100490 flush_dcache_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000491 brelse(dibh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000492 SetPageUptodate(page);
493
494 return 0;
495}
496
David Teiglandb3b94fa2006-01-16 16:50:04 +0000497
498/**
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100499 * __gfs2_readpage - readpage
500 * @file: The file to read a page for
David Teiglandb3b94fa2006-01-16 16:50:04 +0000501 * @page: The page to read
502 *
Andreas Gruenbacher9db115a2017-11-18 18:46:05 +0100503 * This is the core of gfs2's readpage. It's used by the internal file
504 * reading code as in that case we already hold the glock. Also it's
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100505 * called by gfs2_readpage() once the required lock has been granted.
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100506 */
507
508static int __gfs2_readpage(void *file, struct page *page)
509{
510 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
511 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
512 int error;
513
514 if (gfs2_is_stuffed(ip)) {
515 error = stuffed_readpage(ip, page);
516 unlock_page(page);
517 } else {
Bob Petersone9e1ef22007-12-10 14:13:27 -0600518 error = mpage_readpage(page, gfs2_block_map);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100519 }
520
521 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
522 return -EIO;
523
524 return error;
525}
526
527/**
528 * gfs2_readpage - read a page of a file
529 * @file: The file to read
530 * @page: The page of the file
531 *
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100532 * This deals with the locking required. We have to unlock and
533 * relock the page in order to get the locking in the right
534 * order.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000535 */
536
537static int gfs2_readpage(struct file *file, struct page *page)
538{
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100539 struct address_space *mapping = page->mapping;
540 struct gfs2_inode *ip = GFS2_I(mapping->host);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100541 struct gfs2_holder gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000542 int error;
543
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100544 unlock_page(page);
Steven Whitehouse719ee342008-09-18 13:53:59 +0100545 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
546 error = gfs2_glock_nq(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100547 if (unlikely(error))
Steven Whitehouse6802e342008-05-21 17:03:22 +0100548 goto out;
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100549 error = AOP_TRUNCATED_PAGE;
550 lock_page(page);
551 if (page->mapping == mapping && !PageUptodate(page))
552 error = __gfs2_readpage(file, page);
553 else
554 unlock_page(page);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100555 gfs2_glock_dq(&gh);
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000556out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100557 gfs2_holder_uninit(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100558 if (error && error != AOP_TRUNCATED_PAGE)
559 lock_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100560 return error;
561}
562
563/**
564 * gfs2_internal_read - read an internal file
565 * @ip: The gfs2 inode
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100566 * @buf: The buffer to fill
567 * @pos: The file position
568 * @size: The amount to read
569 *
570 */
571
Andrew Price43066292012-04-16 16:40:55 +0100572int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
573 unsigned size)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100574{
575 struct address_space *mapping = ip->i_inode.i_mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300576 unsigned long index = *pos / PAGE_SIZE;
577 unsigned offset = *pos & (PAGE_SIZE - 1);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100578 unsigned copied = 0;
579 unsigned amt;
580 struct page *page;
581 void *p;
582
583 do {
584 amt = size - copied;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300585 if (offset + size > PAGE_SIZE)
586 amt = PAGE_SIZE - offset;
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100587 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
588 if (IS_ERR(page))
589 return PTR_ERR(page);
Cong Wangd9349282011-11-25 23:14:30 +0800590 p = kmap_atomic(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100591 memcpy(buf + copied, p + offset, amt);
Cong Wangd9349282011-11-25 23:14:30 +0800592 kunmap_atomic(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300593 put_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100594 copied += amt;
595 index++;
596 offset = 0;
597 } while(copied < size);
598 (*pos) += size;
599 return size;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400600}
601
Steven Whitehousefd88de562006-05-05 16:59:11 -0400602/**
603 * gfs2_readpages - Read a bunch of pages at once
Fabian Frederick12725742015-05-05 13:29:54 -0500604 * @file: The file to read from
605 * @mapping: Address space info
606 * @pages: List of pages to read
607 * @nr_pages: Number of pages to read
Steven Whitehousefd88de562006-05-05 16:59:11 -0400608 *
609 * Some notes:
610 * 1. This is only for readahead, so we can simply ignore any things
611 * which are slightly inconvenient (such as locking conflicts between
612 * the page lock and the glock) and return having done no I/O. Its
613 * obviously not something we'd want to do on too regular a basis.
614 * Any I/O we ignore at this time will be done via readpage later.
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500615 * 2. We don't handle stuffed files here we let readpage do the honours.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400616 * 3. mpage_readpages() does most of the heavy lifting in the common case.
Bob Petersone9e1ef22007-12-10 14:13:27 -0600617 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400618 */
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100619
Steven Whitehousefd88de562006-05-05 16:59:11 -0400620static int gfs2_readpages(struct file *file, struct address_space *mapping,
621 struct list_head *pages, unsigned nr_pages)
622{
623 struct inode *inode = mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400624 struct gfs2_inode *ip = GFS2_I(inode);
625 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400626 struct gfs2_holder gh;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100627 int ret;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400628
Steven Whitehouse719ee342008-09-18 13:53:59 +0100629 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
630 ret = gfs2_glock_nq(&gh);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100631 if (unlikely(ret))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100632 goto out_uninit;
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500633 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600634 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100635 gfs2_glock_dq(&gh);
636out_uninit:
637 gfs2_holder_uninit(&gh);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400638 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
639 ret = -EIO;
640 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000641}
642
643/**
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700644 * gfs2_write_begin - Begin to write to a file
David Teiglandb3b94fa2006-01-16 16:50:04 +0000645 * @file: The file to write to
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700646 * @mapping: The mapping in which to write
647 * @pos: The file offset at which to start writing
648 * @len: Length of the write
649 * @flags: Various flags
650 * @pagep: Pointer to return the page
651 * @fsdata: Pointer to return fs data (unused by GFS2)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000652 *
653 * Returns: errno
654 */
655
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700656static int gfs2_write_begin(struct file *file, struct address_space *mapping,
657 loff_t pos, unsigned len, unsigned flags,
658 struct page **pagep, void **fsdata)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000659{
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700660 struct gfs2_inode *ip = GFS2_I(mapping->host);
661 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500662 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Steven Whitehouse7ed122e2008-12-10 10:28:10 +0000663 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100664 unsigned requested = 0;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000665 int alloc_required;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000666 int error = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300667 pgoff_t index = pos >> PAGE_SHIFT;
668 unsigned from = pos & (PAGE_SIZE - 1);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700669 struct page *page;
Russell Cattelan52ae7b72006-10-09 12:11:54 -0500670
Steven Whitehouse719ee342008-09-18 13:53:59 +0100671 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
672 error = gfs2_glock_nq(&ip->i_gh);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700673 if (unlikely(error))
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000674 goto out_uninit;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500675 if (&ip->i_inode == sdp->sd_rindex) {
676 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
677 GL_NOCACHE, &m_ip->i_gh);
678 if (unlikely(error)) {
679 gfs2_glock_dq(&ip->i_gh);
680 goto out_uninit;
681 }
682 }
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000683
Bob Peterson461cb412010-06-24 19:21:20 -0400684 alloc_required = gfs2_write_alloc_required(ip, pos, len);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000685
Steven Whitehouse7ed122e2008-12-10 10:28:10 +0000686 if (alloc_required || gfs2_is_jdata(ip))
687 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
688
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000689 if (alloc_required) {
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100690 struct gfs2_alloc_parms ap = { .aflags = 0, };
Abhi Dasb8fbf472015-03-18 12:03:41 -0500691 requested = data_blocks + ind_blocks;
692 ap.target = requested;
693 error = gfs2_quota_lock_check(ip, &ap);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000694 if (error)
Bob Peterson5407e242012-05-18 09:28:23 -0400695 goto out_unlock;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000696
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100697 error = gfs2_inplace_reserve(ip, &ap);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000698 if (error)
699 goto out_qunlock;
700 }
701
702 rblocks = RES_DINODE + ind_blocks;
703 if (gfs2_is_jdata(ip))
704 rblocks += data_blocks ? data_blocks : 1;
705 if (ind_blocks || data_blocks)
706 rblocks += RES_STATFS + RES_QUOTA;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500707 if (&ip->i_inode == sdp->sd_rindex)
708 rblocks += 2 * RES_STATFS;
Benjamin Marzinskibf97b672010-09-27 16:00:04 -0500709 if (alloc_required)
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100710 rblocks += gfs2_rg_blocks(ip, requested);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000711
Steven Whitehouse16615be2007-09-17 10:59:52 +0100712 error = gfs2_trans_begin(sdp, rblocks,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300713 PAGE_SIZE/sdp->sd_sb.sb_bsize);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000714 if (error)
Steven Whitehousea867bb22007-07-17 10:29:02 +0100715 goto out_trans_fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000716
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100717 error = -ENOMEM;
Steven Whitehousee4fefba2009-01-06 10:08:33 +0000718 flags |= AOP_FLAG_NOFS;
Nick Piggin54566b22009-01-04 12:00:53 -0800719 page = grab_cache_page_write_begin(mapping, index, flags);
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100720 *pagep = page;
721 if (unlikely(!page))
722 goto out_endtrans;
723
David Teiglandb3b94fa2006-01-16 16:50:04 +0000724 if (gfs2_is_stuffed(ip)) {
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100725 error = 0;
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100726 if (pos + len > gfs2_max_stuffed_size(ip)) {
Steven Whitehousef25ef0c2006-07-26 10:51:20 -0400727 error = gfs2_unstuff_dinode(ip, page);
Steven Whitehouse5c4e9e02006-02-15 12:26:19 +0000728 if (error == 0)
729 goto prepare_write;
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100730 } else if (!PageUptodate(page)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000731 error = stuffed_readpage(ip, page);
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100732 }
Steven Whitehouse5c4e9e02006-02-15 12:26:19 +0000733 goto out;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000734 }
735
Steven Whitehouse5c4e9e02006-02-15 12:26:19 +0000736prepare_write:
Christoph Hellwigebdec242010-10-06 10:47:23 +0200737 error = __block_write_begin(page, from, len, gfs2_block_map);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000738out:
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100739 if (error == 0)
740 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000741
Maxim6c474f72011-03-14 13:19:21 +0000742 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300743 put_page(page);
npiggin@suse.de15c6fd92010-05-27 01:05:34 +1000744
Steven Whitehouseff8f33c2010-08-11 09:37:53 +0100745 gfs2_trans_end(sdp);
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100746 if (pos + len > ip->i_inode.i_size)
Steven Whitehouseff8f33c2010-08-11 09:37:53 +0100747 gfs2_trim_blocks(&ip->i_inode);
748 goto out_trans_fail;
749
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100750out_endtrans:
751 gfs2_trans_end(sdp);
752out_trans_fail:
753 if (alloc_required) {
754 gfs2_inplace_release(ip);
755out_qunlock:
756 gfs2_quota_unlock(ip);
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100757 }
758out_unlock:
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500759 if (&ip->i_inode == sdp->sd_rindex) {
760 gfs2_glock_dq(&m_ip->i_gh);
761 gfs2_holder_uninit(&m_ip->i_gh);
762 }
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100763 gfs2_glock_dq(&ip->i_gh);
764out_uninit:
765 gfs2_holder_uninit(&ip->i_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000766 return error;
767}
768
769/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500770 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
771 * @inode: the rindex inode
772 */
773static void adjust_fs_space(struct inode *inode)
774{
775 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500776 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
777 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500778 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
779 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500780 struct buffer_head *m_bh, *l_bh;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500781 u64 fs_total, new_free;
782
783 /* Total up the file system space, according to the latest rindex. */
784 fs_total = gfs2_ri_total(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500785 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
786 return;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500787
788 spin_lock(&sdp->sd_statfs_spin);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500789 gfs2_statfs_change_in(m_sc, m_bh->b_data +
790 sizeof(struct gfs2_dinode));
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500791 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
792 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
793 else
794 new_free = 0;
795 spin_unlock(&sdp->sd_statfs_spin);
Robert Peterson6c532672007-05-10 16:54:38 -0500796 fs_warn(sdp, "File system extended by %llu blocks.\n",
797 (unsigned long long)new_free);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500798 gfs2_statfs_change(sdp, new_free, new_free, 0);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500799
800 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
801 goto out;
802 update_statfs(sdp, m_bh, l_bh);
803 brelse(l_bh);
804out:
805 brelse(m_bh);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500806}
807
808/**
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700809 * gfs2_stuffed_write_end - Write end for stuffed files
810 * @inode: The inode
811 * @dibh: The buffer_head containing the on-disk inode
812 * @pos: The file position
813 * @len: The length of the write
814 * @copied: How much was actually copied by the VFS
815 * @page: The page
816 *
817 * This copies the data from the page into the inode block after
818 * the inode data structure itself.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000819 *
820 * Returns: errno
821 */
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700822static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
823 loff_t pos, unsigned len, unsigned copied,
824 struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000825{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400826 struct gfs2_inode *ip = GFS2_I(inode);
827 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500828 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700829 u64 to = pos + copied;
830 void *kaddr;
831 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000832
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100833 BUG_ON(pos + len > gfs2_max_stuffed_size(ip));
834
Cong Wangd9349282011-11-25 23:14:30 +0800835 kaddr = kmap_atomic(page);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700836 memcpy(buf + pos, kaddr + pos, copied);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700837 flush_dcache_page(page);
Cong Wangd9349282011-11-25 23:14:30 +0800838 kunmap_atomic(kaddr);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000839
Al Viro43388b22016-09-05 22:06:35 -0400840 WARN_ON(!PageUptodate(page));
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700841 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300842 put_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000843
Abhijith Das7537d81a2009-05-12 11:16:20 -0500844 if (copied) {
Steven Whitehousea2e0f792010-08-11 09:53:11 +0100845 if (inode->i_size < to)
Abhijith Das7537d81a2009-05-12 11:16:20 -0500846 i_size_write(inode, to);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700847 mark_inode_dirty(inode);
Steven Whitehouse48516ce2006-10-02 12:39:19 -0400848 }
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000849
Benjamin Marzinski9ae3c6d2009-11-10 12:54:56 -0600850 if (inode == sdp->sd_rindex) {
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500851 adjust_fs_space(inode);
Bob Petersonca9248d2012-04-10 08:56:04 -0400852 sdp->sd_rindex_uptodate = 0;
Benjamin Marzinski9ae3c6d2009-11-10 12:54:56 -0600853 }
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500854
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000855 brelse(dibh);
856 gfs2_trans_end(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500857 if (inode == sdp->sd_rindex) {
858 gfs2_glock_dq(&m_ip->i_gh);
859 gfs2_holder_uninit(&m_ip->i_gh);
860 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700861 gfs2_glock_dq(&ip->i_gh);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000862 gfs2_holder_uninit(&ip->i_gh);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700863 return copied;
864}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000865
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700866/**
867 * gfs2_write_end
868 * @file: The file to write to
869 * @mapping: The address space to write to
870 * @pos: The file position
871 * @len: The length of the data
Fabian Frederick12725742015-05-05 13:29:54 -0500872 * @copied: How much was actually copied by the VFS
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700873 * @page: The page that has been written
874 * @fsdata: The fsdata (unused in GFS2)
875 *
876 * The main write_end function for GFS2. We have a separate one for
877 * stuffed files as they are slightly different, otherwise we just
878 * put our locking around the VFS provided functions.
879 *
880 * Returns: errno
881 */
882
883static int gfs2_write_end(struct file *file, struct address_space *mapping,
884 loff_t pos, unsigned len, unsigned copied,
885 struct page *page, void *fsdata)
886{
887 struct inode *inode = page->mapping->host;
888 struct gfs2_inode *ip = GFS2_I(inode);
889 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500890 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700891 struct buffer_head *dibh;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300892 unsigned int from = pos & (PAGE_SIZE - 1);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700893 unsigned int to = from + len;
894 int ret;
Benjamin Marzinski0c901802013-09-03 16:59:42 -0500895 struct gfs2_trans *tr = current->journal_info;
896 BUG_ON(!tr);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700897
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000898 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700899
900 ret = gfs2_meta_inode_buffer(ip, &dibh);
901 if (unlikely(ret)) {
902 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300903 put_page(page);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700904 goto failed;
905 }
906
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700907 if (gfs2_is_stuffed(ip))
908 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
909
Steven Whitehousebf36a712007-10-17 08:35:19 +0100910 if (!gfs2_is_writeback(ip))
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700911 gfs2_page_add_databufs(ip, page, from, to);
912
913 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
Benjamin Marzinski0c901802013-09-03 16:59:42 -0500914 if (tr->tr_num_buf_new)
915 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
916 else
917 gfs2_trans_add_meta(ip->i_gl, dibh);
918
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700919
Benjamin Marzinski9ae3c6d2009-11-10 12:54:56 -0600920 if (inode == sdp->sd_rindex) {
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700921 adjust_fs_space(inode);
Bob Petersonca9248d2012-04-10 08:56:04 -0400922 sdp->sd_rindex_uptodate = 0;
Benjamin Marzinski9ae3c6d2009-11-10 12:54:56 -0600923 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700924
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000925 brelse(dibh);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700926failed:
Bob Petersondeab72d2011-03-16 16:32:39 -0400927 gfs2_trans_end(sdp);
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100928 gfs2_inplace_release(ip);
Bob Petersonb54e9a02015-10-26 10:40:28 -0500929 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000930 gfs2_quota_unlock(ip);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500931 if (inode == sdp->sd_rindex) {
932 gfs2_glock_dq(&m_ip->i_gh);
933 gfs2_holder_uninit(&m_ip->i_gh);
934 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700935 gfs2_glock_dq(&ip->i_gh);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000936 gfs2_holder_uninit(&ip->i_gh);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700937 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000938}
939
940/**
Robert Peterson8fb68592007-06-12 11:24:36 -0500941 * gfs2_set_page_dirty - Page dirtying function
942 * @page: The page to dirty
943 *
944 * Returns: 1 if it dirtyed the page, or 0 otherwise
945 */
946
947static int gfs2_set_page_dirty(struct page *page)
948{
Steven Whitehouse55610932007-10-17 08:47:38 +0100949 SetPageChecked(page);
Robert Peterson8fb68592007-06-12 11:24:36 -0500950 return __set_page_dirty_buffers(page);
951}
952
953/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000954 * gfs2_bmap - Block map function
955 * @mapping: Address space info
956 * @lblock: The block to map
957 *
958 * Returns: The disk address for the block or 0 on hole or error
959 */
960
961static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
962{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400963 struct gfs2_inode *ip = GFS2_I(mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000964 struct gfs2_holder i_gh;
965 sector_t dblock = 0;
966 int error;
967
David Teiglandb3b94fa2006-01-16 16:50:04 +0000968 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
969 if (error)
970 return 0;
971
972 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600973 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000974
975 gfs2_glock_dq_uninit(&i_gh);
976
977 return dblock;
978}
979
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100980static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
981{
982 struct gfs2_bufdata *bd;
983
984 lock_buffer(bh);
985 gfs2_log_lock(sdp);
986 clear_buffer_dirty(bh);
987 bd = bh->b_private;
988 if (bd) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400989 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
990 list_del_init(&bd->bd_list);
Steven Whitehouse16615be2007-09-17 10:59:52 +0100991 else
Bob Peterson68cd4ce2016-05-02 11:53:35 -0500992 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100993 }
994 bh->b_bdev = NULL;
995 clear_buffer_mapped(bh);
996 clear_buffer_req(bh);
997 clear_buffer_new(bh);
998 gfs2_log_unlock(sdp);
999 unlock_buffer(bh);
1000}
1001
Lukas Czernerd47992f2013-05-21 23:17:23 -04001002static void gfs2_invalidatepage(struct page *page, unsigned int offset,
1003 unsigned int length)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001004{
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001005 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001006 unsigned int stop = offset + length;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001007 int partial_page = (offset || length < PAGE_SIZE);
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001008 struct buffer_head *bh, *head;
1009 unsigned long pos = 0;
1010
David Teiglandb3b94fa2006-01-16 16:50:04 +00001011 BUG_ON(!PageLocked(page));
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001012 if (!partial_page)
Robert Peterson8fb68592007-06-12 11:24:36 -05001013 ClearPageChecked(page);
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001014 if (!page_has_buffers(page))
1015 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001016
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001017 bh = head = page_buffers(page);
1018 do {
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001019 if (pos + bh->b_size > stop)
1020 return;
1021
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001022 if (offset <= pos)
1023 gfs2_discard(sdp, bh);
1024 pos += bh->b_size;
1025 bh = bh->b_this_page;
1026 } while (bh != head);
1027out:
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001028 if (!partial_page)
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001029 try_to_release_page(page, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001030}
1031
Steven Whitehousec7b33832006-12-14 18:24:26 +00001032/**
1033 * gfs2_ok_for_dio - check that dio is valid on this file
1034 * @ip: The inode
Steven Whitehousec7b33832006-12-14 18:24:26 +00001035 * @offset: The offset at which we are reading or writing
1036 *
1037 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
1038 * 1 (to accept the i/o request)
1039 */
Omar Sandoval6f673762015-03-16 04:33:52 -07001040static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
Steven Whitehousec7b33832006-12-14 18:24:26 +00001041{
1042 /*
1043 * Should we return an error here? I can't see that O_DIRECT for
Steven Whitehouse55610932007-10-17 08:47:38 +01001044 * a stuffed file makes any sense. For now we'll silently fall
1045 * back to buffered I/O
Steven Whitehousec7b33832006-12-14 18:24:26 +00001046 */
Steven Whitehousec7b33832006-12-14 18:24:26 +00001047 if (gfs2_is_stuffed(ip))
1048 return 0;
1049
Bob Petersonacb57a32008-09-11 15:35:37 -04001050 if (offset >= i_size_read(&ip->i_inode))
Steven Whitehousec7b33832006-12-14 18:24:26 +00001051 return 0;
1052 return 1;
1053}
1054
1055
1056
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001057static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Steven Whitehoused1665e42006-02-14 11:54:42 +00001058{
1059 struct file *file = iocb->ki_filp;
1060 struct inode *inode = file->f_mapping->host;
Steven Whitehousedfd11182013-12-18 14:14:52 +00001061 struct address_space *mapping = inode->i_mapping;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001062 struct gfs2_inode *ip = GFS2_I(inode);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001063 loff_t offset = iocb->ki_pos;
Steven Whitehoused1665e42006-02-14 11:54:42 +00001064 struct gfs2_holder gh;
1065 int rv;
1066
1067 /*
Steven Whitehousec7b33832006-12-14 18:24:26 +00001068 * Deferred lock, even if its a write, since we do no allocation
1069 * on this path. All we need change is atime, and this lock mode
1070 * ensures that other nodes have flushed their buffered read caches
1071 * (i.e. their page cache entries for this inode). We do not,
1072 * unfortunately have the option of only flushing a range like
1073 * the VFS does.
Steven Whitehoused1665e42006-02-14 11:54:42 +00001074 */
Steven Whitehouse719ee342008-09-18 13:53:59 +01001075 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1076 rv = gfs2_glock_nq(&gh);
Steven Whitehoused1665e42006-02-14 11:54:42 +00001077 if (rv)
Daniel DeFreez9c7fe832016-04-19 19:57:45 -04001078 goto out_uninit;
Omar Sandoval6f673762015-03-16 04:33:52 -07001079 rv = gfs2_ok_for_dio(ip, offset);
Steven Whitehousec7b33832006-12-14 18:24:26 +00001080 if (rv != 1)
1081 goto out; /* dio not valid, fall back to buffered i/o */
Steven Whitehoused1665e42006-02-14 11:54:42 +00001082
Steven Whitehousedfd11182013-12-18 14:14:52 +00001083 /*
1084 * Now since we are holding a deferred (CW) lock at this point, you
1085 * might be wondering why this is ever needed. There is a case however
1086 * where we've granted a deferred local lock against a cached exclusive
1087 * glock. That is ok provided all granted local locks are deferred, but
1088 * it also means that it is possible to encounter pages which are
1089 * cached and possibly also mapped. So here we check for that and sort
1090 * them out ahead of the dio. The glock state machine will take care of
1091 * everything else.
1092 *
1093 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1094 * the first place, mapping->nr_pages will always be zero.
1095 */
1096 if (mapping->nrpages) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001097 loff_t lstart = offset & ~(PAGE_SIZE - 1);
Al Viroa6cbcd42014-03-04 22:38:00 -05001098 loff_t len = iov_iter_count(iter);
Steven Whitehousedfd11182013-12-18 14:14:52 +00001099 loff_t end = PAGE_ALIGN(offset + len) - 1;
1100
1101 rv = 0;
1102 if (len == 0)
1103 goto out;
1104 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
1105 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
1106 rv = filemap_write_and_wait_range(mapping, lstart, end);
1107 if (rv)
Steven Whitehouse086352f2014-01-14 13:46:51 +00001108 goto out;
Omar Sandoval6f673762015-03-16 04:33:52 -07001109 if (iov_iter_rw(iter) == WRITE)
Steven Whitehouse086352f2014-01-14 13:46:51 +00001110 truncate_inode_pages_range(mapping, lstart, end);
Steven Whitehousedfd11182013-12-18 14:14:52 +00001111 }
1112
Omar Sandoval17f8c842015-03-16 04:33:50 -07001113 rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001114 gfs2_get_block_direct, NULL, NULL, 0);
Steven Whitehoused1665e42006-02-14 11:54:42 +00001115out:
Bob Peterson8e711e102012-08-09 12:48:42 -05001116 gfs2_glock_dq(&gh);
Daniel DeFreez9c7fe832016-04-19 19:57:45 -04001117out_uninit:
Steven Whitehoused1665e42006-02-14 11:54:42 +00001118 gfs2_holder_uninit(&gh);
Steven Whitehoused1665e42006-02-14 11:54:42 +00001119 return rv;
1120}
1121
1122/**
Steven Whitehouse623d9352006-08-31 12:14:44 -04001123 * gfs2_releasepage - free the metadata associated with a page
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001124 * @page: the page that's being released
1125 * @gfp_mask: passed from Linux VFS, ignored by us
1126 *
1127 * Call try_to_free_buffers() if the buffers in this page can be
1128 * released.
1129 *
1130 * Returns: 0
1131 */
1132
1133int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1134{
Steven Whitehouse009d8512009-12-08 12:12:13 +00001135 struct address_space *mapping = page->mapping;
1136 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001137 struct buffer_head *bh, *head;
1138 struct gfs2_bufdata *bd;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001139
1140 if (!page_has_buffers(page))
Steven Whitehouse891ba6d2007-09-20 15:26:33 +01001141 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001142
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -05001143 /*
1144 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
1145 * clean pages might not have had the dirty bit cleared. Thus, it can
1146 * send actual dirty pages to ->releasepage() via shrink_active_list().
1147 *
1148 * As a workaround, we skip pages that contain dirty buffers below.
1149 * Once ->releasepage isn't called on dirty pages anymore, we can warn
1150 * on dirty buffers like we used to here again.
1151 */
1152
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001153 gfs2_log_lock(sdp);
Steven Whitehouse380f7c62011-07-14 08:59:44 +01001154 spin_lock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001155 head = bh = page_buffers(page);
1156 do {
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001157 if (atomic_read(&bh->b_count))
1158 goto cannot_release;
1159 bd = bh->b_private;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -05001160 if (bd && bd->bd_tr)
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001161 goto cannot_release;
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -05001162 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
1163 goto cannot_release;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001164 bh = bh->b_this_page;
1165 } while(bh != head);
Steven Whitehouse380f7c62011-07-14 08:59:44 +01001166 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001167
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001168 head = bh = page_buffers(page);
1169 do {
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001170 bd = bh->b_private;
1171 if (bd) {
1172 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Steven Whitehousee4f29202013-11-26 13:21:08 +00001173 if (!list_empty(&bd->bd_list))
1174 list_del_init(&bd->bd_list);
1175 bd->bd_bh = NULL;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001176 bh->b_private = NULL;
Steven Whitehouse623d9352006-08-31 12:14:44 -04001177 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehousee4f29202013-11-26 13:21:08 +00001178 }
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001179
1180 bh = bh->b_this_page;
Steven Whitehouse166afcc2006-08-24 15:59:40 -04001181 } while (bh != head);
Steven Whitehousee4f29202013-11-26 13:21:08 +00001182 gfs2_log_unlock(sdp);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001183
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001184 return try_to_free_buffers(page);
Steven Whitehouse8f065d32011-05-03 11:49:19 +01001185
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001186cannot_release:
Steven Whitehouse380f7c62011-07-14 08:59:44 +01001187 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001188 gfs2_log_unlock(sdp);
1189 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001190}
1191
Steven Whitehouse55610932007-10-17 08:47:38 +01001192static const struct address_space_operations gfs2_writeback_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +01001193 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +00001194 .writepages = gfs2_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001195 .readpage = gfs2_readpage,
1196 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001197 .write_begin = gfs2_write_begin,
1198 .write_end = gfs2_write_end,
1199 .bmap = gfs2_bmap,
1200 .invalidatepage = gfs2_invalidatepage,
1201 .releasepage = gfs2_releasepage,
1202 .direct_IO = gfs2_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +00001203 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +09001204 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001205 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +01001206};
1207
1208static const struct address_space_operations gfs2_ordered_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +01001209 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +00001210 .writepages = gfs2_writepages,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001211 .readpage = gfs2_readpage,
Steven Whitehousefd88de562006-05-05 16:59:11 -04001212 .readpages = gfs2_readpages,
Steven Whitehouse7765ec22007-10-16 01:25:07 -07001213 .write_begin = gfs2_write_begin,
1214 .write_end = gfs2_write_end,
Robert Peterson8fb68592007-06-12 11:24:36 -05001215 .set_page_dirty = gfs2_set_page_dirty,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001216 .bmap = gfs2_bmap,
1217 .invalidatepage = gfs2_invalidatepage,
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001218 .releasepage = gfs2_releasepage,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001219 .direct_IO = gfs2_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +00001220 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +09001221 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001222 .error_remove_page = generic_error_remove_page,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001223};
1224
Steven Whitehouse55610932007-10-17 08:47:38 +01001225static const struct address_space_operations gfs2_jdata_aops = {
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +01001226 .writepage = gfs2_jdata_writepage,
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +01001227 .writepages = gfs2_jdata_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001228 .readpage = gfs2_readpage,
1229 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001230 .write_begin = gfs2_write_begin,
1231 .write_end = gfs2_write_end,
1232 .set_page_dirty = gfs2_set_page_dirty,
1233 .bmap = gfs2_bmap,
1234 .invalidatepage = gfs2_invalidatepage,
1235 .releasepage = gfs2_releasepage,
Hisashi Hifumi229615d2009-03-03 11:45:20 +09001236 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001237 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +01001238};
1239
1240void gfs2_set_aops(struct inode *inode)
1241{
1242 struct gfs2_inode *ip = GFS2_I(inode);
1243
1244 if (gfs2_is_writeback(ip))
1245 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1246 else if (gfs2_is_ordered(ip))
1247 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1248 else if (gfs2_is_jdata(ip))
1249 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1250 else
1251 BUG();
1252}
1253