blob: ecfbca9c88ff877b8d319a45c2fb79f9d24836ef [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson7eabb772008-01-28 11:24:35 -06003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
Steven Whitehousefd88de562006-05-05 16:59:11 -040016#include <linux/pagevec.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000017#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000018#include <linux/fs.h>
Steven Whitehousea8d638e2007-01-15 13:52:17 +000019#include <linux/writeback.h>
Steven Whitehouse7765ec22007-10-16 01:25:07 -070020#include <linux/swap.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050021#include <linux/gfs2_ondisk.h>
Steven Whitehouse47e83b52007-10-18 11:15:50 +010022#include <linux/backing-dev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080023#include <linux/uio.h>
Steven Whitehouse774016b2014-02-06 15:47:47 +000024#include <trace/events/writeback.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010025#include <linux/sched/signal.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000026
27#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050028#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "bmap.h"
30#include "glock.h"
31#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000032#include "log.h"
33#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000034#include "quota.h"
35#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000036#include "rgrp.h"
Robert Petersoncd81a4b2007-05-14 12:42:18 -050037#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050038#include "util.h"
Steven Whitehouse4340fe62006-07-11 09:46:33 -040039#include "glops.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010040#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000041
Steven Whitehouseba7f7292006-07-26 11:27:10 -040042
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010043void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
44 unsigned int from, unsigned int len)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040045{
46 struct buffer_head *head = page_buffers(page);
47 unsigned int bsize = head->b_size;
48 struct buffer_head *bh;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010049 unsigned int to = from + len;
Steven Whitehouseba7f7292006-07-26 11:27:10 -040050 unsigned int start, end;
51
52 for (bh = head, start = 0; bh != head || !start;
53 bh = bh->b_this_page, start = end) {
54 end = start + bsize;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010055 if (end <= from)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040056 continue;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010057 if (start >= to)
58 break;
Andreas Gruenbacher845802b2018-06-04 07:50:16 -050059 set_buffer_uptodate(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +000060 gfs2_trans_add_data(ip->i_gl, bh);
Steven Whitehouseba7f7292006-07-26 11:27:10 -040061 }
62}
63
David Teiglandb3b94fa2006-01-16 16:50:04 +000064/**
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040065 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000066 * @inode: The inode
67 * @lblock: The block number to look up
68 * @bh_result: The buffer head to return the result in
69 * @create: Non-zero if we may add block to the file
70 *
71 * Returns: errno
72 */
73
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040074static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
75 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000076{
David Teiglandb3b94fa2006-01-16 16:50:04 +000077 int error;
78
Bob Petersone9e1ef22007-12-10 14:13:27 -060079 error = gfs2_block_map(inode, lblock, bh_result, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000080 if (error)
81 return error;
Wendy Chengde986e82007-09-18 09:19:13 -040082 if (!buffer_mapped(bh_result))
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040083 return -EIO;
Steven Whitehouse623d9352006-08-31 12:14:44 -040084 return 0;
85}
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040086
87static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
88 struct buffer_head *bh_result, int create)
89{
Bob Petersone9e1ef22007-12-10 14:13:27 -060090 return gfs2_block_map(inode, lblock, bh_result, 0);
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040091}
92
David Teiglandb3b94fa2006-01-16 16:50:04 +000093/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010094 * gfs2_writepage_common - Common bits of writepage
95 * @page: The page to be written
96 * @wbc: The writeback control
David Teiglandb3b94fa2006-01-16 16:50:04 +000097 *
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010098 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
David Teiglandb3b94fa2006-01-16 16:50:04 +000099 */
100
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100101static int gfs2_writepage_common(struct page *page,
102 struct writeback_control *wbc)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000103{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000104 struct inode *inode = page->mapping->host;
Steven Whitehousef4387142006-08-08 13:23:19 -0400105 struct gfs2_inode *ip = GFS2_I(inode);
106 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000107 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300108 pgoff_t end_index = i_size >> PAGE_SHIFT;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000109 unsigned offset;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100110
111 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
112 goto out;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100113 if (current->journal_info)
114 goto redirty;
115 /* Is the page fully outside i_size? (truncate in progress) */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300116 offset = i_size & (PAGE_SIZE-1);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100117 if (page->index > end_index || (page->index == end_index && !offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300118 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100119 goto out;
120 }
121 return 1;
122redirty:
123 redirty_page_for_writepage(wbc, page);
124out:
125 unlock_page(page);
126 return 0;
127}
128
129/**
Steven Whitehouse9d358142013-08-27 21:22:07 +0100130 * gfs2_writepage - Write page for writeback mappings
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100131 * @page: The page
132 * @wbc: The writeback control
133 *
134 */
135
Steven Whitehouse9d358142013-08-27 21:22:07 +0100136static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100137{
138 int ret;
139
140 ret = gfs2_writepage_common(page, wbc);
141 if (ret <= 0)
142 return ret;
143
Steven Whitehouse30116ff2010-06-14 09:58:41 +0100144 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100145}
146
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500147/* This is the same as calling block_write_full_page, but it also
148 * writes pages outside of i_size
149 */
Andrew Pricec548a1c2017-02-03 08:23:47 -0500150static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
151 struct writeback_control *wbc)
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500152{
153 struct inode * const inode = page->mapping->host;
154 loff_t i_size = i_size_read(inode);
155 const pgoff_t end_index = i_size >> PAGE_SHIFT;
156 unsigned offset;
157
158 /*
159 * The page straddles i_size. It must be zeroed out on each and every
160 * writepage invocation because it may be mmapped. "A file is mapped
161 * in multiples of the page size. For a file that is not a multiple of
162 * the page size, the remaining memory is zeroed when mapped, and
163 * writes to that region are not written out to the file."
164 */
165 offset = i_size & (PAGE_SIZE-1);
166 if (page->index == end_index && offset)
167 zero_user_segment(page, offset, PAGE_SIZE);
168
169 return __block_write_full_page(inode, page, get_block, wbc,
170 end_buffer_async_write);
171}
172
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100173/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100174 * __gfs2_jdata_writepage - The core of jdata writepage
175 * @page: The page to write
176 * @wbc: The writeback control
177 *
178 * This is shared between writepage and writepages and implements the
179 * core of the writepage operation. If a transaction is required then
180 * PageChecked will have been set and the transaction will have
181 * already been started before this is called.
182 */
183
184static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
185{
186 struct inode *inode = page->mapping->host;
187 struct gfs2_inode *ip = GFS2_I(inode);
188 struct gfs2_sbd *sdp = GFS2_SB(inode);
189
190 if (PageChecked(page)) {
191 ClearPageChecked(page);
192 if (!page_has_buffers(page)) {
193 create_empty_buffers(page, inode->i_sb->s_blocksize,
Fabian Frederick47a9a522016-08-02 12:05:27 -0500194 BIT(BH_Dirty)|BIT(BH_Uptodate));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100195 }
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +0100196 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100197 }
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500198 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100199}
200
201/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100202 * gfs2_jdata_writepage - Write complete page
203 * @page: Page to write
Fabian Frederick12725742015-05-05 13:29:54 -0500204 * @wbc: The writeback control
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100205 *
206 * Returns: errno
207 *
208 */
209
210static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
211{
212 struct inode *inode = page->mapping->host;
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500213 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100214 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100215 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000216
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500217 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
218 goto out;
219 if (PageChecked(page) || current->journal_info)
220 goto out_ignore;
221 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100222 return ret;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000223
224out_ignore:
225 redirty_page_for_writepage(wbc, page);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500226out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000227 unlock_page(page);
228 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000229}
230
231/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000232 * gfs2_writepages - Write a bunch of dirty pages back to disk
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000233 * @mapping: The mapping to write
234 * @wbc: Write-back control
235 *
Steven Whitehouse45138992013-01-28 09:30:07 +0000236 * Used for both ordered and writeback modes.
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000237 */
Steven Whitehouse45138992013-01-28 09:30:07 +0000238static int gfs2_writepages(struct address_space *mapping,
239 struct writeback_control *wbc)
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000240{
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500241 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
242 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
243
244 /*
245 * Even if we didn't write any pages here, we might still be holding
246 * dirty pages in the ail. We forcibly flush the ail because we don't
247 * want balance_dirty_pages() to loop indefinitely trying to write out
248 * pages held in the ail that it can't find.
249 */
250 if (ret == 0)
251 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
252
253 return ret;
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000254}
255
256/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100257 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
258 * @mapping: The mapping
259 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100260 * @pvec: The vector of pages
261 * @nr_pages: The number of pages to write
Fabian Frederick12725742015-05-05 13:29:54 -0500262 * @done_index: Page index
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100263 *
264 * Returns: non-zero if loop should terminate, zero otherwise
265 */
266
267static int gfs2_write_jdata_pagevec(struct address_space *mapping,
268 struct writeback_control *wbc,
269 struct pagevec *pvec,
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600270 int nr_pages,
Steven Whitehouse774016b2014-02-06 15:47:47 +0000271 pgoff_t *done_index)
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100272{
273 struct inode *inode = mapping->host;
274 struct gfs2_sbd *sdp = GFS2_SB(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300275 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100276 int i;
277 int ret;
278
Abhijith Das20b95bf2008-03-06 17:43:52 -0600279 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100280 if (ret < 0)
281 return ret;
282
283 for(i = 0; i < nr_pages; i++) {
284 struct page *page = pvec->pages[i];
285
Steven Whitehouse774016b2014-02-06 15:47:47 +0000286 *done_index = page->index;
287
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100288 lock_page(page);
289
290 if (unlikely(page->mapping != mapping)) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000291continue_unlock:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100292 unlock_page(page);
293 continue;
294 }
295
Steven Whitehouse774016b2014-02-06 15:47:47 +0000296 if (!PageDirty(page)) {
297 /* someone wrote it for us */
298 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100299 }
300
Steven Whitehouse774016b2014-02-06 15:47:47 +0000301 if (PageWriteback(page)) {
302 if (wbc->sync_mode != WB_SYNC_NONE)
303 wait_on_page_writeback(page);
304 else
305 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100306 }
307
Steven Whitehouse774016b2014-02-06 15:47:47 +0000308 BUG_ON(PageWriteback(page));
309 if (!clear_page_dirty_for_io(page))
310 goto continue_unlock;
311
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100312 trace_wbc_writepage(wbc, inode_to_bdi(inode));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100313
314 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000315 if (unlikely(ret)) {
316 if (ret == AOP_WRITEPAGE_ACTIVATE) {
317 unlock_page(page);
318 ret = 0;
319 } else {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100320
Steven Whitehouse774016b2014-02-06 15:47:47 +0000321 /*
322 * done_index is set past this page,
323 * so media errors will not choke
324 * background writeout for the entire
325 * file. This has consequences for
326 * range_cyclic semantics (ie. it may
327 * not be suitable for data integrity
328 * writeout).
329 */
330 *done_index = page->index + 1;
331 ret = 1;
332 break;
333 }
334 }
335
336 /*
337 * We stop writing back only if we are not doing
338 * integrity sync. In case of integrity sync we have to
339 * keep going until we have written all the pages
340 * we tagged for writeback prior to entering this loop.
341 */
342 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100343 ret = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000344 break;
345 }
346
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100347 }
348 gfs2_trans_end(sdp);
349 return ret;
350}
351
352/**
353 * gfs2_write_cache_jdata - Like write_cache_pages but different
354 * @mapping: The mapping to write
355 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100356 *
357 * The reason that we use our own function here is that we need to
358 * start transactions before we grab page locks. This allows us
359 * to get the ordering right.
360 */
361
362static int gfs2_write_cache_jdata(struct address_space *mapping,
363 struct writeback_control *wbc)
364{
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100365 int ret = 0;
366 int done = 0;
367 struct pagevec pvec;
368 int nr_pages;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000369 pgoff_t uninitialized_var(writeback_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100370 pgoff_t index;
371 pgoff_t end;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000372 pgoff_t done_index;
373 int cycled;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100374 int range_whole = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000375 int tag;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100376
Mel Gorman86679822017-11-15 17:37:52 -0800377 pagevec_init(&pvec);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100378 if (wbc->range_cyclic) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000379 writeback_index = mapping->writeback_index; /* prev offset */
380 index = writeback_index;
381 if (index == 0)
382 cycled = 1;
383 else
384 cycled = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100385 end = -1;
386 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300387 index = wbc->range_start >> PAGE_SHIFT;
388 end = wbc->range_end >> PAGE_SHIFT;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100389 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
390 range_whole = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000391 cycled = 1; /* ignore range_cyclic tests */
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100392 }
Steven Whitehouse774016b2014-02-06 15:47:47 +0000393 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
394 tag = PAGECACHE_TAG_TOWRITE;
395 else
396 tag = PAGECACHE_TAG_DIRTY;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100397
398retry:
Steven Whitehouse774016b2014-02-06 15:47:47 +0000399 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
400 tag_pages_for_writeback(mapping, index, end);
401 done_index = index;
402 while (!done && (index <= end)) {
Jan Karad2bc5b32017-11-15 17:34:58 -0800403 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -0800404 tag);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000405 if (nr_pages == 0)
406 break;
407
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600408 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100409 if (ret)
410 done = 1;
411 if (ret > 0)
412 ret = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100413 pagevec_release(&pvec);
414 cond_resched();
415 }
416
Steven Whitehouse774016b2014-02-06 15:47:47 +0000417 if (!cycled && !done) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100418 /*
Steven Whitehouse774016b2014-02-06 15:47:47 +0000419 * range_cyclic:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100420 * We hit the last page and there is more work to be done: wrap
421 * back to the start of the file
422 */
Steven Whitehouse774016b2014-02-06 15:47:47 +0000423 cycled = 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100424 index = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000425 end = writeback_index - 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100426 goto retry;
427 }
428
429 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steven Whitehouse774016b2014-02-06 15:47:47 +0000430 mapping->writeback_index = done_index;
431
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100432 return ret;
433}
434
435
436/**
437 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
438 * @mapping: The mapping to write
439 * @wbc: The writeback control
440 *
441 */
442
443static int gfs2_jdata_writepages(struct address_space *mapping,
444 struct writeback_control *wbc)
445{
446 struct gfs2_inode *ip = GFS2_I(mapping->host);
447 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
448 int ret;
449
450 ret = gfs2_write_cache_jdata(mapping, wbc);
451 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
Bob Peterson805c09072018-01-08 10:34:17 -0500452 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
453 GFS2_LFC_JDATA_WPAGES);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100454 ret = gfs2_write_cache_jdata(mapping, wbc);
455 }
456 return ret;
457}
458
459/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000460 * stuffed_readpage - Fill in a Linux page with stuffed file data
461 * @ip: the inode
462 * @page: the page
463 *
464 * Returns: errno
465 */
466
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100467int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000468{
469 struct buffer_head *dibh;
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000470 u64 dsize = i_size_read(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000471 void *kaddr;
472 int error;
473
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100474 /*
Nick Piggin3c18ddd2008-04-28 02:12:10 -0700475 * Due to the order of unstuffing files and ->fault(), we can be
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100476 * asked for a zero page in the case of a stuffed file being extended,
477 * so we need to supply one here. It doesn't happen often.
478 */
479 if (unlikely(page->index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300480 zero_user(page, 0, PAGE_SIZE);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600481 SetPageUptodate(page);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100482 return 0;
483 }
Steven Whitehousefd88de562006-05-05 16:59:11 -0400484
David Teiglandb3b94fa2006-01-16 16:50:04 +0000485 error = gfs2_meta_inode_buffer(ip, &dibh);
486 if (error)
487 return error;
488
Cong Wangd9349282011-11-25 23:14:30 +0800489 kaddr = kmap_atomic(page);
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100490 if (dsize > gfs2_max_stuffed_size(ip))
491 dsize = gfs2_max_stuffed_size(ip);
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000492 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300493 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
Cong Wangd9349282011-11-25 23:14:30 +0800494 kunmap_atomic(kaddr);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100495 flush_dcache_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000496 brelse(dibh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000497 SetPageUptodate(page);
498
499 return 0;
500}
501
David Teiglandb3b94fa2006-01-16 16:50:04 +0000502
503/**
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100504 * __gfs2_readpage - readpage
505 * @file: The file to read a page for
David Teiglandb3b94fa2006-01-16 16:50:04 +0000506 * @page: The page to read
507 *
Andreas Gruenbacher9db115a2017-11-18 18:46:05 +0100508 * This is the core of gfs2's readpage. It's used by the internal file
509 * reading code as in that case we already hold the glock. Also it's
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100510 * called by gfs2_readpage() once the required lock has been granted.
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100511 */
512
513static int __gfs2_readpage(void *file, struct page *page)
514{
515 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
516 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
517 int error;
518
519 if (gfs2_is_stuffed(ip)) {
520 error = stuffed_readpage(ip, page);
521 unlock_page(page);
522 } else {
Bob Petersone9e1ef22007-12-10 14:13:27 -0600523 error = mpage_readpage(page, gfs2_block_map);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100524 }
525
526 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
527 return -EIO;
528
529 return error;
530}
531
532/**
533 * gfs2_readpage - read a page of a file
534 * @file: The file to read
535 * @page: The page of the file
536 *
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100537 * This deals with the locking required. We have to unlock and
538 * relock the page in order to get the locking in the right
539 * order.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000540 */
541
542static int gfs2_readpage(struct file *file, struct page *page)
543{
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100544 struct address_space *mapping = page->mapping;
545 struct gfs2_inode *ip = GFS2_I(mapping->host);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100546 struct gfs2_holder gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000547 int error;
548
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100549 unlock_page(page);
Steven Whitehouse719ee342008-09-18 13:53:59 +0100550 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
551 error = gfs2_glock_nq(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100552 if (unlikely(error))
Steven Whitehouse6802e342008-05-21 17:03:22 +0100553 goto out;
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100554 error = AOP_TRUNCATED_PAGE;
555 lock_page(page);
556 if (page->mapping == mapping && !PageUptodate(page))
557 error = __gfs2_readpage(file, page);
558 else
559 unlock_page(page);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100560 gfs2_glock_dq(&gh);
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000561out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100562 gfs2_holder_uninit(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100563 if (error && error != AOP_TRUNCATED_PAGE)
564 lock_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100565 return error;
566}
567
568/**
569 * gfs2_internal_read - read an internal file
570 * @ip: The gfs2 inode
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100571 * @buf: The buffer to fill
572 * @pos: The file position
573 * @size: The amount to read
574 *
575 */
576
Andrew Price43066292012-04-16 16:40:55 +0100577int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
578 unsigned size)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100579{
580 struct address_space *mapping = ip->i_inode.i_mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300581 unsigned long index = *pos / PAGE_SIZE;
582 unsigned offset = *pos & (PAGE_SIZE - 1);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100583 unsigned copied = 0;
584 unsigned amt;
585 struct page *page;
586 void *p;
587
588 do {
589 amt = size - copied;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300590 if (offset + size > PAGE_SIZE)
591 amt = PAGE_SIZE - offset;
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100592 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
593 if (IS_ERR(page))
594 return PTR_ERR(page);
Cong Wangd9349282011-11-25 23:14:30 +0800595 p = kmap_atomic(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100596 memcpy(buf + copied, p + offset, amt);
Cong Wangd9349282011-11-25 23:14:30 +0800597 kunmap_atomic(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300598 put_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100599 copied += amt;
600 index++;
601 offset = 0;
602 } while(copied < size);
603 (*pos) += size;
604 return size;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400605}
606
Steven Whitehousefd88de562006-05-05 16:59:11 -0400607/**
608 * gfs2_readpages - Read a bunch of pages at once
Fabian Frederick12725742015-05-05 13:29:54 -0500609 * @file: The file to read from
610 * @mapping: Address space info
611 * @pages: List of pages to read
612 * @nr_pages: Number of pages to read
Steven Whitehousefd88de562006-05-05 16:59:11 -0400613 *
614 * Some notes:
615 * 1. This is only for readahead, so we can simply ignore any things
616 * which are slightly inconvenient (such as locking conflicts between
617 * the page lock and the glock) and return having done no I/O. Its
618 * obviously not something we'd want to do on too regular a basis.
619 * Any I/O we ignore at this time will be done via readpage later.
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500620 * 2. We don't handle stuffed files here we let readpage do the honours.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400621 * 3. mpage_readpages() does most of the heavy lifting in the common case.
Bob Petersone9e1ef22007-12-10 14:13:27 -0600622 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400623 */
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100624
Steven Whitehousefd88de562006-05-05 16:59:11 -0400625static int gfs2_readpages(struct file *file, struct address_space *mapping,
626 struct list_head *pages, unsigned nr_pages)
627{
628 struct inode *inode = mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400629 struct gfs2_inode *ip = GFS2_I(inode);
630 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400631 struct gfs2_holder gh;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100632 int ret;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400633
Steven Whitehouse719ee342008-09-18 13:53:59 +0100634 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
635 ret = gfs2_glock_nq(&gh);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100636 if (unlikely(ret))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100637 goto out_uninit;
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500638 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600639 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100640 gfs2_glock_dq(&gh);
641out_uninit:
642 gfs2_holder_uninit(&gh);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400643 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
644 ret = -EIO;
645 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000646}
647
648/**
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700649 * gfs2_write_begin - Begin to write to a file
David Teiglandb3b94fa2006-01-16 16:50:04 +0000650 * @file: The file to write to
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700651 * @mapping: The mapping in which to write
652 * @pos: The file offset at which to start writing
653 * @len: Length of the write
654 * @flags: Various flags
655 * @pagep: Pointer to return the page
656 * @fsdata: Pointer to return fs data (unused by GFS2)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000657 *
658 * Returns: errno
659 */
660
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700661static int gfs2_write_begin(struct file *file, struct address_space *mapping,
662 loff_t pos, unsigned len, unsigned flags,
663 struct page **pagep, void **fsdata)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000664{
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700665 struct gfs2_inode *ip = GFS2_I(mapping->host);
666 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500667 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Steven Whitehouse7ed122e2008-12-10 10:28:10 +0000668 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100669 unsigned requested = 0;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000670 int alloc_required;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000671 int error = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300672 pgoff_t index = pos >> PAGE_SHIFT;
673 unsigned from = pos & (PAGE_SIZE - 1);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700674 struct page *page;
Russell Cattelan52ae7b72006-10-09 12:11:54 -0500675
Steven Whitehouse719ee342008-09-18 13:53:59 +0100676 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
677 error = gfs2_glock_nq(&ip->i_gh);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700678 if (unlikely(error))
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000679 goto out_uninit;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500680 if (&ip->i_inode == sdp->sd_rindex) {
681 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
682 GL_NOCACHE, &m_ip->i_gh);
683 if (unlikely(error)) {
684 gfs2_glock_dq(&ip->i_gh);
685 goto out_uninit;
686 }
687 }
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000688
Bob Peterson461cb412010-06-24 19:21:20 -0400689 alloc_required = gfs2_write_alloc_required(ip, pos, len);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000690
Steven Whitehouse7ed122e2008-12-10 10:28:10 +0000691 if (alloc_required || gfs2_is_jdata(ip))
692 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
693
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000694 if (alloc_required) {
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100695 struct gfs2_alloc_parms ap = { .aflags = 0, };
Abhi Dasb8fbf472015-03-18 12:03:41 -0500696 requested = data_blocks + ind_blocks;
697 ap.target = requested;
698 error = gfs2_quota_lock_check(ip, &ap);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000699 if (error)
Bob Peterson5407e242012-05-18 09:28:23 -0400700 goto out_unlock;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000701
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100702 error = gfs2_inplace_reserve(ip, &ap);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000703 if (error)
704 goto out_qunlock;
705 }
706
707 rblocks = RES_DINODE + ind_blocks;
708 if (gfs2_is_jdata(ip))
709 rblocks += data_blocks ? data_blocks : 1;
710 if (ind_blocks || data_blocks)
711 rblocks += RES_STATFS + RES_QUOTA;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500712 if (&ip->i_inode == sdp->sd_rindex)
713 rblocks += 2 * RES_STATFS;
Benjamin Marzinskibf97b672010-09-27 16:00:04 -0500714 if (alloc_required)
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100715 rblocks += gfs2_rg_blocks(ip, requested);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000716
Steven Whitehouse16615be2007-09-17 10:59:52 +0100717 error = gfs2_trans_begin(sdp, rblocks,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300718 PAGE_SIZE/sdp->sd_sb.sb_bsize);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000719 if (error)
Steven Whitehousea867bb22007-07-17 10:29:02 +0100720 goto out_trans_fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000721
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100722 error = -ENOMEM;
Steven Whitehousee4fefba2009-01-06 10:08:33 +0000723 flags |= AOP_FLAG_NOFS;
Nick Piggin54566b22009-01-04 12:00:53 -0800724 page = grab_cache_page_write_begin(mapping, index, flags);
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100725 *pagep = page;
726 if (unlikely(!page))
727 goto out_endtrans;
728
David Teiglandb3b94fa2006-01-16 16:50:04 +0000729 if (gfs2_is_stuffed(ip)) {
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100730 error = 0;
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100731 if (pos + len > gfs2_max_stuffed_size(ip)) {
Steven Whitehousef25ef0c2006-07-26 10:51:20 -0400732 error = gfs2_unstuff_dinode(ip, page);
Steven Whitehouse5c4e9e02006-02-15 12:26:19 +0000733 if (error == 0)
734 goto prepare_write;
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100735 } else if (!PageUptodate(page)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000736 error = stuffed_readpage(ip, page);
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100737 }
Steven Whitehouse5c4e9e02006-02-15 12:26:19 +0000738 goto out;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000739 }
740
Steven Whitehouse5c4e9e02006-02-15 12:26:19 +0000741prepare_write:
Christoph Hellwigebdec242010-10-06 10:47:23 +0200742 error = __block_write_begin(page, from, len, gfs2_block_map);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000743out:
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100744 if (error == 0)
745 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000746
Maxim6c474f72011-03-14 13:19:21 +0000747 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300748 put_page(page);
npiggin@suse.de15c6fd92010-05-27 01:05:34 +1000749
Steven Whitehouseff8f33c2010-08-11 09:37:53 +0100750 gfs2_trans_end(sdp);
Andreas Gruenbacher7b5747f2018-05-31 07:20:55 -0500751 if (alloc_required) {
752 gfs2_inplace_release(ip);
753 if (pos + len > ip->i_inode.i_size)
754 gfs2_trim_blocks(&ip->i_inode);
755 }
756 goto out_qunlock;
Steven Whitehouseff8f33c2010-08-11 09:37:53 +0100757
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100758out_endtrans:
759 gfs2_trans_end(sdp);
760out_trans_fail:
Andreas Gruenbacher7b5747f2018-05-31 07:20:55 -0500761 if (alloc_required)
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100762 gfs2_inplace_release(ip);
763out_qunlock:
Andreas Gruenbacher7b5747f2018-05-31 07:20:55 -0500764 if (alloc_required)
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100765 gfs2_quota_unlock(ip);
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100766out_unlock:
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500767 if (&ip->i_inode == sdp->sd_rindex) {
768 gfs2_glock_dq(&m_ip->i_gh);
769 gfs2_holder_uninit(&m_ip->i_gh);
770 }
Steven Whitehousec41d4f02007-10-17 14:05:41 +0100771 gfs2_glock_dq(&ip->i_gh);
772out_uninit:
773 gfs2_holder_uninit(&ip->i_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000774 return error;
775}
776
777/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500778 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
779 * @inode: the rindex inode
780 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100781void adjust_fs_space(struct inode *inode)
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500782{
783 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500784 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
785 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500786 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
787 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500788 struct buffer_head *m_bh, *l_bh;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500789 u64 fs_total, new_free;
790
791 /* Total up the file system space, according to the latest rindex. */
792 fs_total = gfs2_ri_total(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500793 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
794 return;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500795
796 spin_lock(&sdp->sd_statfs_spin);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500797 gfs2_statfs_change_in(m_sc, m_bh->b_data +
798 sizeof(struct gfs2_dinode));
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500799 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
800 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
801 else
802 new_free = 0;
803 spin_unlock(&sdp->sd_statfs_spin);
Robert Peterson6c532672007-05-10 16:54:38 -0500804 fs_warn(sdp, "File system extended by %llu blocks.\n",
805 (unsigned long long)new_free);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500806 gfs2_statfs_change(sdp, new_free, new_free, 0);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500807
808 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
809 goto out;
810 update_statfs(sdp, m_bh, l_bh);
811 brelse(l_bh);
812out:
813 brelse(m_bh);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500814}
815
816/**
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700817 * gfs2_stuffed_write_end - Write end for stuffed files
818 * @inode: The inode
819 * @dibh: The buffer_head containing the on-disk inode
820 * @pos: The file position
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700821 * @copied: How much was actually copied by the VFS
822 * @page: The page
823 *
824 * This copies the data from the page into the inode block after
825 * the inode data structure itself.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000826 *
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100827 * Returns: copied bytes or errno
David Teiglandb3b94fa2006-01-16 16:50:04 +0000828 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100829int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
830 loff_t pos, unsigned copied,
831 struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000832{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400833 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700834 u64 to = pos + copied;
835 void *kaddr;
836 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000837
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500838 BUG_ON(pos + copied > gfs2_max_stuffed_size(ip));
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100839
Cong Wangd9349282011-11-25 23:14:30 +0800840 kaddr = kmap_atomic(page);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700841 memcpy(buf + pos, kaddr + pos, copied);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700842 flush_dcache_page(page);
Cong Wangd9349282011-11-25 23:14:30 +0800843 kunmap_atomic(kaddr);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000844
Al Viro43388b22016-09-05 22:06:35 -0400845 WARN_ON(!PageUptodate(page));
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700846 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300847 put_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000848
Abhijith Das7537d81a2009-05-12 11:16:20 -0500849 if (copied) {
Steven Whitehousea2e0f792010-08-11 09:53:11 +0100850 if (inode->i_size < to)
Abhijith Das7537d81a2009-05-12 11:16:20 -0500851 i_size_write(inode, to);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700852 mark_inode_dirty(inode);
Steven Whitehouse48516ce2006-10-02 12:39:19 -0400853 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700854 return copied;
855}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000856
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700857/**
858 * gfs2_write_end
859 * @file: The file to write to
860 * @mapping: The address space to write to
861 * @pos: The file position
862 * @len: The length of the data
Fabian Frederick12725742015-05-05 13:29:54 -0500863 * @copied: How much was actually copied by the VFS
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700864 * @page: The page that has been written
865 * @fsdata: The fsdata (unused in GFS2)
866 *
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500867 * The main write_end function for GFS2. We just put our locking around the VFS
868 * provided functions.
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700869 *
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100870 * Returns: copied bytes or errno
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700871 */
872
873static int gfs2_write_end(struct file *file, struct address_space *mapping,
874 loff_t pos, unsigned len, unsigned copied,
875 struct page *page, void *fsdata)
876{
877 struct inode *inode = page->mapping->host;
878 struct gfs2_inode *ip = GFS2_I(inode);
879 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500880 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700881 struct buffer_head *dibh;
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700882 int ret;
Benjamin Marzinski0c901802013-09-03 16:59:42 -0500883 struct gfs2_trans *tr = current->journal_info;
884 BUG_ON(!tr);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700885
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000886 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700887
888 ret = gfs2_meta_inode_buffer(ip, &dibh);
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500889 if (unlikely(ret))
890 goto out;
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700891
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500892 if (gfs2_is_stuffed(ip)) {
893 ret = gfs2_stuffed_write_end(inode, dibh, pos, copied, page);
894 page = NULL;
895 goto out2;
896 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700897
Andreas Gruenbacher845802b2018-06-04 07:50:16 -0500898 if (gfs2_is_jdata(ip))
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +0100899 gfs2_page_add_databufs(ip, page, pos & ~PAGE_MASK, len);
Andreas Gruenbacher845802b2018-06-04 07:50:16 -0500900 else
901 gfs2_ordered_add_inode(ip);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700902
903 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500904 page = NULL;
Benjamin Marzinski0c901802013-09-03 16:59:42 -0500905 if (tr->tr_num_buf_new)
906 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
907 else
908 gfs2_trans_add_meta(ip->i_gl, dibh);
909
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500910out2:
Benjamin Marzinski9ae3c6d2009-11-10 12:54:56 -0600911 if (inode == sdp->sd_rindex) {
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700912 adjust_fs_space(inode);
Bob Petersonca9248d2012-04-10 08:56:04 -0400913 sdp->sd_rindex_uptodate = 0;
Benjamin Marzinski9ae3c6d2009-11-10 12:54:56 -0600914 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700915
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000916 brelse(dibh);
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500917out:
918 if (page) {
919 unlock_page(page);
920 put_page(page);
921 }
Bob Petersondeab72d2011-03-16 16:32:39 -0400922 gfs2_trans_end(sdp);
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100923 gfs2_inplace_release(ip);
Bob Petersonb54e9a02015-10-26 10:40:28 -0500924 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000925 gfs2_quota_unlock(ip);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500926 if (inode == sdp->sd_rindex) {
927 gfs2_glock_dq(&m_ip->i_gh);
928 gfs2_holder_uninit(&m_ip->i_gh);
929 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700930 gfs2_glock_dq(&ip->i_gh);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000931 gfs2_holder_uninit(&ip->i_gh);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700932 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000933}
934
935/**
Bob Petersonb9e03f12018-02-14 09:32:39 -0700936 * jdata_set_page_dirty - Page dirtying function
Robert Peterson8fb68592007-06-12 11:24:36 -0500937 * @page: The page to dirty
938 *
939 * Returns: 1 if it dirtyed the page, or 0 otherwise
940 */
941
Bob Petersonb9e03f12018-02-14 09:32:39 -0700942static int jdata_set_page_dirty(struct page *page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500943{
Steven Whitehouse55610932007-10-17 08:47:38 +0100944 SetPageChecked(page);
Robert Peterson8fb68592007-06-12 11:24:36 -0500945 return __set_page_dirty_buffers(page);
946}
947
948/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000949 * gfs2_bmap - Block map function
950 * @mapping: Address space info
951 * @lblock: The block to map
952 *
953 * Returns: The disk address for the block or 0 on hole or error
954 */
955
956static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
957{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400958 struct gfs2_inode *ip = GFS2_I(mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000959 struct gfs2_holder i_gh;
960 sector_t dblock = 0;
961 int error;
962
David Teiglandb3b94fa2006-01-16 16:50:04 +0000963 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
964 if (error)
965 return 0;
966
967 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600968 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000969
970 gfs2_glock_dq_uninit(&i_gh);
971
972 return dblock;
973}
974
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100975static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
976{
977 struct gfs2_bufdata *bd;
978
979 lock_buffer(bh);
980 gfs2_log_lock(sdp);
981 clear_buffer_dirty(bh);
982 bd = bh->b_private;
983 if (bd) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400984 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
985 list_del_init(&bd->bd_list);
Steven Whitehouse16615be2007-09-17 10:59:52 +0100986 else
Bob Peterson68cd4ce2016-05-02 11:53:35 -0500987 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100988 }
989 bh->b_bdev = NULL;
990 clear_buffer_mapped(bh);
991 clear_buffer_req(bh);
992 clear_buffer_new(bh);
993 gfs2_log_unlock(sdp);
994 unlock_buffer(bh);
995}
996
Lukas Czernerd47992f2013-05-21 23:17:23 -0400997static void gfs2_invalidatepage(struct page *page, unsigned int offset,
998 unsigned int length)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000999{
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001000 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001001 unsigned int stop = offset + length;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001002 int partial_page = (offset || length < PAGE_SIZE);
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001003 struct buffer_head *bh, *head;
1004 unsigned long pos = 0;
1005
David Teiglandb3b94fa2006-01-16 16:50:04 +00001006 BUG_ON(!PageLocked(page));
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001007 if (!partial_page)
Robert Peterson8fb68592007-06-12 11:24:36 -05001008 ClearPageChecked(page);
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001009 if (!page_has_buffers(page))
1010 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001011
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001012 bh = head = page_buffers(page);
1013 do {
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001014 if (pos + bh->b_size > stop)
1015 return;
1016
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001017 if (offset <= pos)
1018 gfs2_discard(sdp, bh);
1019 pos += bh->b_size;
1020 bh = bh->b_this_page;
1021 } while (bh != head);
1022out:
Lukas Czerner5c0bb972013-05-21 23:58:49 -04001023 if (!partial_page)
Steven Whitehoused7b616e2007-09-02 10:48:13 +01001024 try_to_release_page(page, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001025}
1026
Steven Whitehousec7b33832006-12-14 18:24:26 +00001027/**
1028 * gfs2_ok_for_dio - check that dio is valid on this file
1029 * @ip: The inode
Steven Whitehousec7b33832006-12-14 18:24:26 +00001030 * @offset: The offset at which we are reading or writing
1031 *
1032 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
1033 * 1 (to accept the i/o request)
1034 */
Omar Sandoval6f673762015-03-16 04:33:52 -07001035static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
Steven Whitehousec7b33832006-12-14 18:24:26 +00001036{
1037 /*
1038 * Should we return an error here? I can't see that O_DIRECT for
Steven Whitehouse55610932007-10-17 08:47:38 +01001039 * a stuffed file makes any sense. For now we'll silently fall
1040 * back to buffered I/O
Steven Whitehousec7b33832006-12-14 18:24:26 +00001041 */
Steven Whitehousec7b33832006-12-14 18:24:26 +00001042 if (gfs2_is_stuffed(ip))
1043 return 0;
1044
Bob Petersonacb57a32008-09-11 15:35:37 -04001045 if (offset >= i_size_read(&ip->i_inode))
Steven Whitehousec7b33832006-12-14 18:24:26 +00001046 return 0;
1047 return 1;
1048}
1049
1050
1051
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001052static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Steven Whitehoused1665e42006-02-14 11:54:42 +00001053{
1054 struct file *file = iocb->ki_filp;
1055 struct inode *inode = file->f_mapping->host;
Steven Whitehousedfd11182013-12-18 14:14:52 +00001056 struct address_space *mapping = inode->i_mapping;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001057 struct gfs2_inode *ip = GFS2_I(inode);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001058 loff_t offset = iocb->ki_pos;
Steven Whitehoused1665e42006-02-14 11:54:42 +00001059 struct gfs2_holder gh;
1060 int rv;
1061
1062 /*
Steven Whitehousec7b33832006-12-14 18:24:26 +00001063 * Deferred lock, even if its a write, since we do no allocation
1064 * on this path. All we need change is atime, and this lock mode
1065 * ensures that other nodes have flushed their buffered read caches
1066 * (i.e. their page cache entries for this inode). We do not,
1067 * unfortunately have the option of only flushing a range like
1068 * the VFS does.
Steven Whitehoused1665e42006-02-14 11:54:42 +00001069 */
Steven Whitehouse719ee342008-09-18 13:53:59 +01001070 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1071 rv = gfs2_glock_nq(&gh);
Steven Whitehoused1665e42006-02-14 11:54:42 +00001072 if (rv)
Daniel DeFreez9c7fe832016-04-19 19:57:45 -04001073 goto out_uninit;
Omar Sandoval6f673762015-03-16 04:33:52 -07001074 rv = gfs2_ok_for_dio(ip, offset);
Steven Whitehousec7b33832006-12-14 18:24:26 +00001075 if (rv != 1)
1076 goto out; /* dio not valid, fall back to buffered i/o */
Steven Whitehoused1665e42006-02-14 11:54:42 +00001077
Steven Whitehousedfd11182013-12-18 14:14:52 +00001078 /*
1079 * Now since we are holding a deferred (CW) lock at this point, you
1080 * might be wondering why this is ever needed. There is a case however
1081 * where we've granted a deferred local lock against a cached exclusive
1082 * glock. That is ok provided all granted local locks are deferred, but
1083 * it also means that it is possible to encounter pages which are
1084 * cached and possibly also mapped. So here we check for that and sort
1085 * them out ahead of the dio. The glock state machine will take care of
1086 * everything else.
1087 *
1088 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1089 * the first place, mapping->nr_pages will always be zero.
1090 */
1091 if (mapping->nrpages) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001092 loff_t lstart = offset & ~(PAGE_SIZE - 1);
Al Viroa6cbcd42014-03-04 22:38:00 -05001093 loff_t len = iov_iter_count(iter);
Steven Whitehousedfd11182013-12-18 14:14:52 +00001094 loff_t end = PAGE_ALIGN(offset + len) - 1;
1095
1096 rv = 0;
1097 if (len == 0)
1098 goto out;
1099 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
1100 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
1101 rv = filemap_write_and_wait_range(mapping, lstart, end);
1102 if (rv)
Steven Whitehouse086352f2014-01-14 13:46:51 +00001103 goto out;
Omar Sandoval6f673762015-03-16 04:33:52 -07001104 if (iov_iter_rw(iter) == WRITE)
Steven Whitehouse086352f2014-01-14 13:46:51 +00001105 truncate_inode_pages_range(mapping, lstart, end);
Steven Whitehousedfd11182013-12-18 14:14:52 +00001106 }
1107
Omar Sandoval17f8c842015-03-16 04:33:50 -07001108 rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001109 gfs2_get_block_direct, NULL, NULL, 0);
Steven Whitehoused1665e42006-02-14 11:54:42 +00001110out:
Bob Peterson8e711e102012-08-09 12:48:42 -05001111 gfs2_glock_dq(&gh);
Daniel DeFreez9c7fe832016-04-19 19:57:45 -04001112out_uninit:
Steven Whitehoused1665e42006-02-14 11:54:42 +00001113 gfs2_holder_uninit(&gh);
Steven Whitehoused1665e42006-02-14 11:54:42 +00001114 return rv;
1115}
1116
1117/**
Steven Whitehouse623d9352006-08-31 12:14:44 -04001118 * gfs2_releasepage - free the metadata associated with a page
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001119 * @page: the page that's being released
1120 * @gfp_mask: passed from Linux VFS, ignored by us
1121 *
1122 * Call try_to_free_buffers() if the buffers in this page can be
1123 * released.
1124 *
1125 * Returns: 0
1126 */
1127
1128int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1129{
Steven Whitehouse009d8512009-12-08 12:12:13 +00001130 struct address_space *mapping = page->mapping;
1131 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001132 struct buffer_head *bh, *head;
1133 struct gfs2_bufdata *bd;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001134
1135 if (!page_has_buffers(page))
Steven Whitehouse891ba6d2007-09-20 15:26:33 +01001136 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001137
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -05001138 /*
1139 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
1140 * clean pages might not have had the dirty bit cleared. Thus, it can
1141 * send actual dirty pages to ->releasepage() via shrink_active_list().
1142 *
1143 * As a workaround, we skip pages that contain dirty buffers below.
1144 * Once ->releasepage isn't called on dirty pages anymore, we can warn
1145 * on dirty buffers like we used to here again.
1146 */
1147
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001148 gfs2_log_lock(sdp);
Steven Whitehouse380f7c62011-07-14 08:59:44 +01001149 spin_lock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001150 head = bh = page_buffers(page);
1151 do {
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001152 if (atomic_read(&bh->b_count))
1153 goto cannot_release;
1154 bd = bh->b_private;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -05001155 if (bd && bd->bd_tr)
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001156 goto cannot_release;
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -05001157 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
1158 goto cannot_release;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001159 bh = bh->b_this_page;
1160 } while(bh != head);
Steven Whitehouse380f7c62011-07-14 08:59:44 +01001161 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001162
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001163 head = bh = page_buffers(page);
1164 do {
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001165 bd = bh->b_private;
1166 if (bd) {
1167 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Steven Whitehousee4f29202013-11-26 13:21:08 +00001168 if (!list_empty(&bd->bd_list))
1169 list_del_init(&bd->bd_list);
1170 bd->bd_bh = NULL;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001171 bh->b_private = NULL;
Steven Whitehouse623d9352006-08-31 12:14:44 -04001172 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehousee4f29202013-11-26 13:21:08 +00001173 }
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001174
1175 bh = bh->b_this_page;
Steven Whitehouse166afcc2006-08-24 15:59:40 -04001176 } while (bh != head);
Steven Whitehousee4f29202013-11-26 13:21:08 +00001177 gfs2_log_unlock(sdp);
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001178
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001179 return try_to_free_buffers(page);
Steven Whitehouse8f065d32011-05-03 11:49:19 +01001180
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001181cannot_release:
Steven Whitehouse380f7c62011-07-14 08:59:44 +01001182 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehousebb3b0e32007-08-16 16:03:57 +01001183 gfs2_log_unlock(sdp);
1184 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001185}
1186
Steven Whitehouse55610932007-10-17 08:47:38 +01001187static const struct address_space_operations gfs2_writeback_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +01001188 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +00001189 .writepages = gfs2_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001190 .readpage = gfs2_readpage,
1191 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001192 .write_begin = gfs2_write_begin,
1193 .write_end = gfs2_write_end,
1194 .bmap = gfs2_bmap,
1195 .invalidatepage = gfs2_invalidatepage,
1196 .releasepage = gfs2_releasepage,
1197 .direct_IO = gfs2_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +00001198 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +09001199 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001200 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +01001201};
1202
1203static const struct address_space_operations gfs2_ordered_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +01001204 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +00001205 .writepages = gfs2_writepages,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001206 .readpage = gfs2_readpage,
Steven Whitehousefd88de562006-05-05 16:59:11 -04001207 .readpages = gfs2_readpages,
Steven Whitehouse7765ec22007-10-16 01:25:07 -07001208 .write_begin = gfs2_write_begin,
1209 .write_end = gfs2_write_end,
Bob Petersonb9e03f12018-02-14 09:32:39 -07001210 .set_page_dirty = __set_page_dirty_buffers,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001211 .bmap = gfs2_bmap,
1212 .invalidatepage = gfs2_invalidatepage,
Steven Whitehouse4340fe62006-07-11 09:46:33 -04001213 .releasepage = gfs2_releasepage,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001214 .direct_IO = gfs2_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +00001215 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +09001216 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001217 .error_remove_page = generic_error_remove_page,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001218};
1219
Steven Whitehouse55610932007-10-17 08:47:38 +01001220static const struct address_space_operations gfs2_jdata_aops = {
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +01001221 .writepage = gfs2_jdata_writepage,
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +01001222 .writepages = gfs2_jdata_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001223 .readpage = gfs2_readpage,
1224 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +01001225 .write_begin = gfs2_write_begin,
1226 .write_end = gfs2_write_end,
Bob Petersonb9e03f12018-02-14 09:32:39 -07001227 .set_page_dirty = jdata_set_page_dirty,
Steven Whitehouse55610932007-10-17 08:47:38 +01001228 .bmap = gfs2_bmap,
1229 .invalidatepage = gfs2_invalidatepage,
1230 .releasepage = gfs2_releasepage,
Hisashi Hifumi229615d2009-03-03 11:45:20 +09001231 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001232 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +01001233};
1234
1235void gfs2_set_aops(struct inode *inode)
1236{
1237 struct gfs2_inode *ip = GFS2_I(inode);
1238
1239 if (gfs2_is_writeback(ip))
1240 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1241 else if (gfs2_is_ordered(ip))
1242 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1243 else if (gfs2_is_jdata(ip))
1244 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1245 else
1246 BUG();
1247}
1248