blob: 807744b19233d3745f8f752674333e8b6f8812a9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110019#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110020#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110025#include "xfs_trans.h"
Christoph Hellwig281627d2012-03-13 08:41:05 +000026#include "xfs_inode_item.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include "xfs_iomap.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000030#include "xfs_trace.h"
Dave Chinner3ed3a432010-03-05 02:00:42 +000031#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100032#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110033#include "xfs_bmap_btree.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110036#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/writeback.h>
38
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000039void
Nathan Scottf51623b2006-03-14 13:26:27 +110040xfs_count_page_state(
41 struct page *page,
42 int *delalloc,
Nathan Scottf51623b2006-03-14 13:26:27 +110043 int *unwritten)
44{
45 struct buffer_head *bh, *head;
46
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100047 *delalloc = *unwritten = 0;
Nathan Scottf51623b2006-03-14 13:26:27 +110048
49 bh = head = page_buffers(page);
50 do {
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100051 if (buffer_unwritten(bh))
Nathan Scottf51623b2006-03-14 13:26:27 +110052 (*unwritten) = 1;
53 else if (buffer_delay(bh))
54 (*delalloc) = 1;
55 } while ((bh = bh->b_this_page) != head);
56}
57
Christoph Hellwig6214ed42007-09-14 15:23:17 +100058STATIC struct block_device *
59xfs_find_bdev_for_inode(
Christoph Hellwig046f1682010-04-28 12:28:52 +000060 struct inode *inode)
Christoph Hellwig6214ed42007-09-14 15:23:17 +100061{
Christoph Hellwig046f1682010-04-28 12:28:52 +000062 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig6214ed42007-09-14 15:23:17 +100063 struct xfs_mount *mp = ip->i_mount;
64
Eric Sandeen71ddabb2007-11-23 16:29:42 +110065 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +100066 return mp->m_rtdev_targp->bt_bdev;
67 else
68 return mp->m_ddev_targp->bt_bdev;
69}
70
Christoph Hellwig0829c362005-09-02 16:58:49 +100071/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110072 * We're now finished for good with this ioend structure.
73 * Update the page state via the associated buffer_heads,
74 * release holds on the inode and bio, and finally free
75 * up memory. Do not use the ioend after this.
76 */
Christoph Hellwig0829c362005-09-02 16:58:49 +100077STATIC void
78xfs_destroy_ioend(
79 xfs_ioend_t *ioend)
80{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110081 struct buffer_head *bh, *next;
82
83 for (bh = ioend->io_buffer_head; bh; bh = next) {
84 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +100085 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110086 }
Christoph Hellwig583fa582008-12-03 12:20:38 +010087
Christoph Hellwig0829c362005-09-02 16:58:49 +100088 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91/*
Christoph Hellwigfc0063c2011-08-23 08:28:11 +000092 * Fast and loose check if this write could update the on-disk inode size.
93 */
94static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
95{
96 return ioend->io_offset + ioend->io_size >
97 XFS_I(ioend->io_inode)->i_d.di_size;
98}
99
Christoph Hellwig281627d2012-03-13 08:41:05 +0000100STATIC int
101xfs_setfilesize_trans_alloc(
102 struct xfs_ioend *ioend)
103{
104 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
105 struct xfs_trans *tp;
106 int error;
107
108 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
109
Jie Liu3d3c8b52013-08-12 20:49:59 +1000110 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000111 if (error) {
Christoph Hellwig4906e212015-06-04 13:47:56 +1000112 xfs_trans_cancel(tp);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000113 return error;
114 }
115
116 ioend->io_append_trans = tp;
117
118 /*
Dave Chinner437a2552012-11-28 13:01:00 +1100119 * We may pass freeze protection with a transaction. So tell lockdep
Jan Karad9457dc2012-06-12 16:20:39 +0200120 * we released it.
121 */
Oleg Nesterovbee91822015-07-19 23:48:20 +0200122 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
Jan Karad9457dc2012-06-12 16:20:39 +0200123 /*
Christoph Hellwig281627d2012-03-13 08:41:05 +0000124 * We hand off the transaction to the completion thread now, so
125 * clear the flag here.
126 */
127 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
128 return 0;
129}
130
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000131/*
Christoph Hellwig2813d682011-12-18 20:00:12 +0000132 * Update on-disk file size now that data has been written to disk.
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000133 */
Christoph Hellwig281627d2012-03-13 08:41:05 +0000134STATIC int
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000135xfs_setfilesize(
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100136 struct xfs_inode *ip,
137 struct xfs_trans *tp,
138 xfs_off_t offset,
139 size_t size)
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000140{
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000141 xfs_fsize_t isize;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000142
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000143 xfs_ilock(ip, XFS_ILOCK_EXCL);
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100144 isize = xfs_new_eof(ip, offset + size);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000145 if (!isize) {
146 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwig4906e212015-06-04 13:47:56 +1000147 xfs_trans_cancel(tp);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000148 return 0;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000149 }
150
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100151 trace_xfs_setfilesize(ip, offset, size);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000152
153 ip->i_d.di_size = isize;
154 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
155 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
156
Christoph Hellwig70393312015-06-04 13:48:08 +1000157 return xfs_trans_commit(tp);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000158}
159
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100160STATIC int
161xfs_setfilesize_ioend(
162 struct xfs_ioend *ioend)
163{
164 struct xfs_inode *ip = XFS_I(ioend->io_inode);
165 struct xfs_trans *tp = ioend->io_append_trans;
166
167 /*
168 * The transaction may have been allocated in the I/O submission thread,
169 * thus we need to mark ourselves as being in a transaction manually.
170 * Similarly for freeze protection.
171 */
172 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
Oleg Nesterovbee91822015-07-19 23:48:20 +0200173 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100174
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100175 /* we abort the update if there was an IO error */
176 if (ioend->io_error) {
177 xfs_trans_cancel(tp);
178 return ioend->io_error;
179 }
180
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100181 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
182}
183
Christoph Hellwig0829c362005-09-02 16:58:49 +1000184/*
Christoph Hellwig209fb872010-07-18 21:17:11 +0000185 * Schedule IO completion handling on the final put of an ioend.
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000186 *
187 * If there is no work to do we might as well call it a day and free the
188 * ioend right now.
Dave Chinnerc626d172009-04-06 18:42:11 +0200189 */
190STATIC void
191xfs_finish_ioend(
Christoph Hellwig209fb872010-07-18 21:17:11 +0000192 struct xfs_ioend *ioend)
Dave Chinnerc626d172009-04-06 18:42:11 +0200193{
194 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000195 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
196
Alain Renaud0d882a32012-05-22 15:56:21 -0500197 if (ioend->io_type == XFS_IO_UNWRITTEN)
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000198 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100199 else if (ioend->io_append_trans)
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000200 queue_work(mp->m_data_workqueue, &ioend->io_work);
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000201 else
202 xfs_destroy_ioend(ioend);
Dave Chinnerc626d172009-04-06 18:42:11 +0200203 }
204}
205
206/*
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000207 * IO write completion.
208 */
209STATIC void
210xfs_end_io(
211 struct work_struct *work)
212{
213 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
214 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Dave Chinner69418932010-03-04 00:57:09 +0000215 int error = 0;
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000216
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000217 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Christoph Hellwig810627d2011-11-08 08:56:15 +0000218 ioend->io_error = -EIO;
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000219 goto done;
220 }
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000221
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000222 /*
223 * For unwritten extents we need to issue transactions to convert a
224 * range to normal written extens after the data I/O has finished.
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100225 * Detecting and handling completion IO errors is done individually
226 * for each case as different cleanup operations need to be performed
227 * on error.
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000228 */
Alain Renaud0d882a32012-05-22 15:56:21 -0500229 if (ioend->io_type == XFS_IO_UNWRITTEN) {
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100230 if (ioend->io_error)
231 goto done;
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000232 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
Dave Chinner437a2552012-11-28 13:01:00 +1100233 ioend->io_size);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000234 } else if (ioend->io_append_trans) {
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100235 error = xfs_setfilesize_ioend(ioend);
Christoph Hellwig84803fb2012-02-29 09:53:50 +0000236 } else {
Christoph Hellwig281627d2012-03-13 08:41:05 +0000237 ASSERT(!xfs_ioend_is_append(ioend));
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000238 }
239
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000240done:
Dave Chinner437a2552012-11-28 13:01:00 +1100241 if (error)
Dave Chinner24513372014-06-25 14:58:08 +1000242 ioend->io_error = error;
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000243 xfs_destroy_ioend(ioend);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000244}
245
246/*
Christoph Hellwig0829c362005-09-02 16:58:49 +1000247 * Allocate and initialise an IO completion structure.
248 * We need to track unwritten extent write completion here initially.
249 * We'll need to extend this for updating the ondisk inode size later
250 * (vs. incore size).
251 */
252STATIC xfs_ioend_t *
253xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100254 struct inode *inode,
255 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000256{
257 xfs_ioend_t *ioend;
258
259 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
260
261 /*
262 * Set the count to 1 initially, which will prevent an I/O
263 * completion callback from happening before we have started
264 * all the I/O from calling the completion routine too early.
265 */
266 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000267 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100268 ioend->io_list = NULL;
269 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000270 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000271 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100272 ioend->io_buffer_tail = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000273 ioend->io_offset = 0;
274 ioend->io_size = 0;
Christoph Hellwig281627d2012-03-13 08:41:05 +0000275 ioend->io_append_trans = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000276
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000277 INIT_WORK(&ioend->io_work, xfs_end_io);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000278 return ioend;
279}
280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281STATIC int
282xfs_map_blocks(
283 struct inode *inode,
284 loff_t offset,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000285 struct xfs_bmbt_irec *imap,
Dave Chinner988ef922016-02-15 17:20:50 +1100286 int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Christoph Hellwiga206c812010-12-10 08:42:20 +0000288 struct xfs_inode *ip = XFS_I(inode);
289 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000290 ssize_t count = 1 << inode->i_blkbits;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000291 xfs_fileoff_t offset_fsb, end_fsb;
292 int error = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000293 int bmapi_flags = XFS_BMAPI_ENTIRE;
294 int nimaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Christoph Hellwiga206c812010-12-10 08:42:20 +0000296 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000297 return -EIO;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000298
Alain Renaud0d882a32012-05-22 15:56:21 -0500299 if (type == XFS_IO_UNWRITTEN)
Christoph Hellwiga206c812010-12-10 08:42:20 +0000300 bmapi_flags |= XFS_BMAPI_IGSTATE;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000301
Dave Chinner988ef922016-02-15 17:20:50 +1100302 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000303 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
304 (ip->i_df.if_flags & XFS_IFEXTENTS));
Dave Chinnerd2c28192012-06-08 15:44:53 +1000305 ASSERT(offset <= mp->m_super->s_maxbytes);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000306
Dave Chinnerd2c28192012-06-08 15:44:53 +1000307 if (offset + count > mp->m_super->s_maxbytes)
308 count = mp->m_super->s_maxbytes - offset;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000309 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
310 offset_fsb = XFS_B_TO_FSBT(mp, offset);
Dave Chinner5c8ed202011-09-18 20:40:45 +0000311 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
312 imap, &nimaps, bmapi_flags);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000313 xfs_iunlock(ip, XFS_ILOCK_SHARED);
314
Christoph Hellwiga206c812010-12-10 08:42:20 +0000315 if (error)
Dave Chinner24513372014-06-25 14:58:08 +1000316 return error;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000317
Alain Renaud0d882a32012-05-22 15:56:21 -0500318 if (type == XFS_IO_DELALLOC &&
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000319 (!nimaps || isnullstartblock(imap->br_startblock))) {
Jie Liu0799a3e2013-09-29 18:56:04 +0800320 error = xfs_iomap_write_allocate(ip, offset, imap);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000321 if (!error)
322 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
Dave Chinner24513372014-06-25 14:58:08 +1000323 return error;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000324 }
325
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000326#ifdef DEBUG
Alain Renaud0d882a32012-05-22 15:56:21 -0500327 if (type == XFS_IO_UNWRITTEN) {
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000328 ASSERT(nimaps);
329 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
330 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
331 }
332#endif
333 if (nimaps)
334 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
335 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
Christoph Hellwigb8f82a42009-11-14 16:17:22 +0000338STATIC int
Christoph Hellwig558e6892010-04-28 12:28:58 +0000339xfs_imap_valid(
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000340 struct inode *inode,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000341 struct xfs_bmbt_irec *imap,
Christoph Hellwig558e6892010-04-28 12:28:58 +0000342 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
Christoph Hellwig558e6892010-04-28 12:28:58 +0000344 offset >>= inode->i_blkbits;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000345
Christoph Hellwig558e6892010-04-28 12:28:58 +0000346 return offset >= imap->br_startoff &&
347 offset < imap->br_startoff + imap->br_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100350/*
351 * BIO completion handler for buffered IO.
352 */
Al Viro782e3b32007-10-12 07:17:47 +0100353STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100354xfs_end_bio(
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200355 struct bio *bio)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100356{
357 xfs_ioend_t *ioend = bio->bi_private;
358
Linus Torvalds77a78802015-09-07 13:28:32 -0700359 if (!ioend->io_error)
360 ioend->io_error = bio->bi_error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100361
362 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100363 bio->bi_private = NULL;
364 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100365 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000366
Christoph Hellwig209fb872010-07-18 21:17:11 +0000367 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100368}
369
370STATIC void
371xfs_submit_ioend_bio(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000372 struct writeback_control *wbc,
373 xfs_ioend_t *ioend,
374 struct bio *bio)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100375{
376 atomic_inc(&ioend->io_remaining);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100377 bio->bi_private = ioend;
378 bio->bi_end_io = xfs_end_bio;
Jens Axboe721a9602011-03-09 11:56:30 +0100379 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100380}
381
382STATIC struct bio *
383xfs_alloc_ioend_bio(
384 struct buffer_head *bh)
385{
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200386 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100387
388 ASSERT(bio->bi_private == NULL);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700389 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100390 bio->bi_bdev = bh->b_bdev;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100391 return bio;
392}
393
394STATIC void
395xfs_start_buffer_writeback(
396 struct buffer_head *bh)
397{
398 ASSERT(buffer_mapped(bh));
399 ASSERT(buffer_locked(bh));
400 ASSERT(!buffer_delay(bh));
401 ASSERT(!buffer_unwritten(bh));
402
403 mark_buffer_async_write(bh);
404 set_buffer_uptodate(bh);
405 clear_buffer_dirty(bh);
406}
407
408STATIC void
409xfs_start_page_writeback(
410 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100411 int clear_dirty,
412 int buffers)
413{
414 ASSERT(PageLocked(page));
415 ASSERT(!PageWriteback(page));
Dave Chinner0d085a52014-09-23 15:36:27 +1000416
417 /*
418 * if the page was not fully cleaned, we need to ensure that the higher
419 * layers come back to it correctly. That means we need to keep the page
420 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
421 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
422 * write this page in this writeback sweep will be made.
423 */
424 if (clear_dirty) {
David Chinner92132022006-12-21 10:24:01 +1100425 clear_page_dirty_for_io(page);
Dave Chinner0d085a52014-09-23 15:36:27 +1000426 set_page_writeback(page);
427 } else
428 set_page_writeback_keepwrite(page);
429
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100430 unlock_page(page);
Dave Chinner0d085a52014-09-23 15:36:27 +1000431
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700432 /* If no buffers on the page are to be written, finish it here */
433 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100434 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100435}
436
Zhi Yong Wuc7c1a7d2013-08-07 10:11:09 +0000437static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100438{
439 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
440}
441
442/*
David Chinnerd88992f2006-01-18 13:38:12 +1100443 * Submit all of the bios for all of the ioends we have saved up, covering the
444 * initial writepage page and also any probed pages.
445 *
446 * Because we may have multiple ioends spanning a page, we need to start
447 * writeback on all the buffers before we submit them for I/O. If we mark the
448 * buffers as we got, then we can end up with a page that only has buffers
449 * marked async write and I/O complete on can occur before we mark the other
450 * buffers async write.
451 *
452 * The end result of this is that we trip a bug in end_page_writeback() because
453 * we call it twice for the one page as the code in end_buffer_async_write()
454 * assumes that all buffers on the page are started at the same time.
455 *
456 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000457 * buffer_heads, and then submit them for I/O on the second pass.
Dave Chinner7bf7f352012-11-12 22:09:45 +1100458 *
459 * If @fail is non-zero, it means that we have a situation where some part of
460 * the submission process has failed after we have marked paged for writeback
461 * and unlocked them. In this situation, we need to fail the ioend chain rather
462 * than submit it to IO. This typically only happens on a filesystem shutdown.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100463 */
464STATIC void
465xfs_submit_ioend(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000466 struct writeback_control *wbc,
Dave Chinner7bf7f352012-11-12 22:09:45 +1100467 xfs_ioend_t *ioend,
468 int fail)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100469{
David Chinnerd88992f2006-01-18 13:38:12 +1100470 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100471 xfs_ioend_t *next;
472 struct buffer_head *bh;
473 struct bio *bio;
474 sector_t lastblock = 0;
475
David Chinnerd88992f2006-01-18 13:38:12 +1100476 /* Pass 1 - start writeback */
477 do {
478 next = ioend->io_list;
Christoph Hellwig221cb252010-12-10 08:42:17 +0000479 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
David Chinnerd88992f2006-01-18 13:38:12 +1100480 xfs_start_buffer_writeback(bh);
David Chinnerd88992f2006-01-18 13:38:12 +1100481 } while ((ioend = next) != NULL);
482
483 /* Pass 2 - submit I/O */
484 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100485 do {
486 next = ioend->io_list;
487 bio = NULL;
488
Dave Chinner7bf7f352012-11-12 22:09:45 +1100489 /*
490 * If we are failing the IO now, just mark the ioend with an
491 * error and finish it. This will run IO completion immediately
492 * as there is only one reference to the ioend at this point in
493 * time.
494 */
495 if (fail) {
Dave Chinner24513372014-06-25 14:58:08 +1000496 ioend->io_error = fail;
Dave Chinner7bf7f352012-11-12 22:09:45 +1100497 xfs_finish_ioend(ioend);
498 continue;
499 }
500
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100501 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100502
503 if (!bio) {
504 retry:
505 bio = xfs_alloc_ioend_bio(bh);
506 } else if (bh->b_blocknr != lastblock + 1) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000507 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100508 goto retry;
509 }
510
Zhi Yong Wuc7c1a7d2013-08-07 10:11:09 +0000511 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000512 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100513 goto retry;
514 }
515
516 lastblock = bh->b_blocknr;
517 }
518 if (bio)
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000519 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwig209fb872010-07-18 21:17:11 +0000520 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100521 } while ((ioend = next) != NULL);
522}
523
524/*
525 * Cancel submission of all buffer_heads so far in this endio.
526 * Toss the endio too. Only ever called for the initial page
527 * in a writepage request, so only ever one page.
528 */
529STATIC void
530xfs_cancel_ioend(
531 xfs_ioend_t *ioend)
532{
533 xfs_ioend_t *next;
534 struct buffer_head *bh, *next_bh;
535
536 do {
537 next = ioend->io_list;
538 bh = ioend->io_buffer_head;
539 do {
540 next_bh = bh->b_private;
541 clear_buffer_async_write(bh);
Brian Foster07d08682014-10-02 09:42:06 +1000542 /*
543 * The unwritten flag is cleared when added to the
544 * ioend. We're not submitting for I/O so mark the
545 * buffer unwritten again for next time around.
546 */
547 if (ioend->io_type == XFS_IO_UNWRITTEN)
548 set_buffer_unwritten(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100549 unlock_buffer(bh);
550 } while ((bh = next_bh) != NULL);
551
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100552 mempool_free(ioend, xfs_ioend_pool);
553 } while ((ioend = next) != NULL);
554}
555
556/*
557 * Test to see if we've been building up a completion structure for
558 * earlier buffers -- if so, we try to append to this ioend if we
559 * can, otherwise we finish off any current ioend and start another.
560 * Return true if we've finished the given ioend.
561 */
562STATIC void
563xfs_add_to_ioend(
564 struct inode *inode,
565 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100566 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100567 unsigned int type,
568 xfs_ioend_t **result,
569 int need_ioend)
570{
571 xfs_ioend_t *ioend = *result;
572
573 if (!ioend || need_ioend || type != ioend->io_type) {
574 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100575
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100576 ioend = xfs_alloc_ioend(inode, type);
577 ioend->io_offset = offset;
578 ioend->io_buffer_head = bh;
579 ioend->io_buffer_tail = bh;
580 if (previous)
581 previous->io_list = ioend;
582 *result = ioend;
583 } else {
584 ioend->io_buffer_tail->b_private = bh;
585 ioend->io_buffer_tail = bh;
586 }
587
588 bh->b_private = NULL;
589 ioend->io_size += bh->b_size;
590}
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100593xfs_map_buffer(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000594 struct inode *inode,
Nathan Scott87cbc492006-03-14 13:26:43 +1100595 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000596 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000597 xfs_off_t offset)
Nathan Scott87cbc492006-03-14 13:26:43 +1100598{
599 sector_t bn;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000600 struct xfs_mount *m = XFS_I(inode)->i_mount;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000601 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
602 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
Nathan Scott87cbc492006-03-14 13:26:43 +1100603
Christoph Hellwig207d0412010-04-28 12:28:56 +0000604 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
605 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Nathan Scott87cbc492006-03-14 13:26:43 +1100606
Christoph Hellwige5131822010-04-28 12:28:55 +0000607 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000608 ((offset - iomap_offset) >> inode->i_blkbits);
Nathan Scott87cbc492006-03-14 13:26:43 +1100609
Christoph Hellwig046f1682010-04-28 12:28:52 +0000610 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
Nathan Scott87cbc492006-03-14 13:26:43 +1100611
612 bh->b_blocknr = bn;
613 set_buffer_mapped(bh);
614}
615
616STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617xfs_map_at_offset(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000618 struct inode *inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000620 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000621 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622{
Christoph Hellwig207d0412010-04-28 12:28:56 +0000623 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
624 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Christoph Hellwig207d0412010-04-28 12:28:56 +0000626 xfs_map_buffer(inode, bh, imap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 set_buffer_mapped(bh);
628 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100629 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
632/*
Dave Chinnera49935f2014-03-07 16:19:14 +1100633 * Test if a given page contains at least one buffer of a given @type.
634 * If @check_all_buffers is true, then we walk all the buffers in the page to
635 * try to find one of the type passed in. If it is not set, then the caller only
636 * needs to check the first buffer on the page for a match.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 */
Dave Chinnera49935f2014-03-07 16:19:14 +1100638STATIC bool
Dave Chinner6ffc4db2012-04-23 15:58:43 +1000639xfs_check_page_type(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100640 struct page *page,
Dave Chinnera49935f2014-03-07 16:19:14 +1100641 unsigned int type,
642 bool check_all_buffers)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
Dave Chinnera49935f2014-03-07 16:19:14 +1100644 struct buffer_head *bh;
645 struct buffer_head *head;
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (PageWriteback(page))
Dave Chinnera49935f2014-03-07 16:19:14 +1100648 return false;
649 if (!page->mapping)
650 return false;
651 if (!page_has_buffers(page))
652 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
Dave Chinnera49935f2014-03-07 16:19:14 +1100654 bh = head = page_buffers(page);
655 do {
656 if (buffer_unwritten(bh)) {
657 if (type == XFS_IO_UNWRITTEN)
658 return true;
659 } else if (buffer_delay(bh)) {
Dan Carpenter805eeb82014-04-04 06:56:30 +1100660 if (type == XFS_IO_DELALLOC)
Dave Chinnera49935f2014-03-07 16:19:14 +1100661 return true;
662 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
Dan Carpenter805eeb82014-04-04 06:56:30 +1100663 if (type == XFS_IO_OVERWRITE)
Dave Chinnera49935f2014-03-07 16:19:14 +1100664 return true;
665 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Dave Chinnera49935f2014-03-07 16:19:14 +1100667 /* If we are only checking the first buffer, we are done now. */
668 if (!check_all_buffers)
669 break;
670 } while ((bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Dave Chinnera49935f2014-03-07 16:19:14 +1100672 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675/*
676 * Allocate & map buffers for page given the extent map. Write it out.
677 * except for the original page of a writepage, this is called on
678 * delalloc/unwritten pages only, for the original page it is possible
679 * that the page has no mapping at all.
680 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100681STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682xfs_convert_page(
683 struct inode *inode,
684 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100685 loff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000686 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100687 xfs_ioend_t **ioendp,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000688 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100690 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100691 xfs_off_t end_offset;
692 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100693 unsigned int type;
Nathan Scott24e17b52005-05-05 13:33:20 -0700694 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100695 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100696 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100698 if (page->index != tindex)
699 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200700 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100701 goto fail;
702 if (PageWriteback(page))
703 goto fail_unlock_page;
704 if (page->mapping != inode->i_mapping)
705 goto fail_unlock_page;
Dave Chinnera49935f2014-03-07 16:19:14 +1100706 if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100707 goto fail_unlock_page;
708
Nathan Scott24e17b52005-05-05 13:33:20 -0700709 /*
710 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000711 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100712 *
713 * Derivation:
714 *
715 * End offset is the highest offset that this page should represent.
716 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
717 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
718 * hence give us the correct page_dirty count. On any other page,
719 * it will be zero and in that case we need page_dirty to be the
720 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700721 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100722 end_offset = min_t(unsigned long long,
723 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
724 i_size_read(inode));
725
Dave Chinner480d7462013-05-20 09:51:08 +1000726 /*
727 * If the current map does not span the entire page we are about to try
728 * to write, then give up. The only way we can write a page that spans
729 * multiple mappings in a single writeback iteration is via the
730 * xfs_vm_writepage() function. Data integrity writeback requires the
731 * entire page to be written in a single attempt, otherwise the part of
732 * the page we don't write here doesn't get written as part of the data
733 * integrity sync.
734 *
735 * For normal writeback, we also don't attempt to write partial pages
736 * here as it simply means that write_cache_pages() will see it under
737 * writeback and ignore the page until some point in the future, at
738 * which time this will be the only page in the file that needs
739 * writeback. Hence for more optimal IO patterns, we should always
740 * avoid partial page writeback due to multiple mappings on a page here.
741 */
742 if (!xfs_imap_valid(inode, imap, end_offset))
743 goto fail_unlock_page;
744
Nathan Scott24e17b52005-05-05 13:33:20 -0700745 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100746 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
747 PAGE_CACHE_SIZE);
748 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
749 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700750
Dave Chinnera49935f2014-03-07 16:19:14 +1100751 /*
752 * The moment we find a buffer that doesn't match our current type
753 * specification or can't be written, abort the loop and start
754 * writeback. As per the above xfs_imap_valid() check, only
755 * xfs_vm_writepage() can handle partial page writeback fully - we are
756 * limited here to the buffers that are contiguous with the current
757 * ioend, and hence a buffer we can't write breaks that contiguity and
758 * we have to defer the rest of the IO to xfs_vm_writepage().
759 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 bh = head = page_buffers(page);
761 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100762 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100764 if (!buffer_uptodate(bh))
765 uptodate = 0;
766 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
767 done = 1;
Dave Chinnera49935f2014-03-07 16:19:14 +1100768 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100769 }
770
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000771 if (buffer_unwritten(bh) || buffer_delay(bh) ||
772 buffer_mapped(bh)) {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100773 if (buffer_unwritten(bh))
Alain Renaud0d882a32012-05-22 15:56:21 -0500774 type = XFS_IO_UNWRITTEN;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000775 else if (buffer_delay(bh))
Alain Renaud0d882a32012-05-22 15:56:21 -0500776 type = XFS_IO_DELALLOC;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000777 else
Alain Renaud0d882a32012-05-22 15:56:21 -0500778 type = XFS_IO_OVERWRITE;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100779
Dave Chinnera49935f2014-03-07 16:19:14 +1100780 /*
781 * imap should always be valid because of the above
782 * partial page end_offset check on the imap.
783 */
784 ASSERT(xfs_imap_valid(inode, imap, offset));
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100785
Christoph Hellwigecff71e2010-12-10 08:42:25 +0000786 lock_buffer(bh);
Alain Renaud0d882a32012-05-22 15:56:21 -0500787 if (type != XFS_IO_OVERWRITE)
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000788 xfs_map_at_offset(inode, bh, imap, offset);
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000789 xfs_add_to_ioend(inode, bh, offset, type,
790 ioendp, done);
791
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100792 page_dirty--;
793 count++;
794 } else {
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000795 done = 1;
Dave Chinnera49935f2014-03-07 16:19:14 +1100796 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100798 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100800 if (uptodate && bh == head)
801 SetPageUptodate(page);
802
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000803 if (count) {
Dave Chinnerefceab12010-08-24 11:44:56 +1000804 if (--wbc->nr_to_write <= 0 &&
805 wbc->sync_mode == WB_SYNC_NONE)
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000806 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 }
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000808 xfs_start_page_writeback(page, !page_dirty, count);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100809
810 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100811 fail_unlock_page:
812 unlock_page(page);
813 fail:
814 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815}
816
817/*
818 * Convert & write out a cluster of pages in the same extent as defined
819 * by mp and following the start page.
820 */
821STATIC void
822xfs_cluster_write(
823 struct inode *inode,
824 pgoff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000825 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100826 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 pgoff_t tlast)
829{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100830 struct pagevec pvec;
831 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100833 pagevec_init(&pvec, 0);
834 while (!done && tindex <= tlast) {
835 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
836
837 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100839
840 for (i = 0; i < pagevec_count(&pvec); i++) {
841 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000842 imap, ioendp, wbc);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100843 if (done)
844 break;
845 }
846
847 pagevec_release(&pvec);
848 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
850}
851
Dave Chinner3ed3a432010-03-05 02:00:42 +0000852STATIC void
853xfs_vm_invalidatepage(
854 struct page *page,
Lukas Czernerd47992f2013-05-21 23:17:23 -0400855 unsigned int offset,
856 unsigned int length)
Dave Chinner3ed3a432010-03-05 02:00:42 +0000857{
Lukas Czerner34097df2013-05-21 23:58:01 -0400858 trace_xfs_invalidatepage(page->mapping->host, page, offset,
859 length);
860 block_invalidatepage(page, offset, length);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000861}
862
863/*
864 * If the page has delalloc buffers on it, we need to punch them out before we
865 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
866 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
867 * is done on that same region - the delalloc extent is returned when none is
868 * supposed to be there.
869 *
870 * We prevent this by truncating away the delalloc regions on the page before
871 * invalidating it. Because they are delalloc, we can do this without needing a
872 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
873 * truncation without a transaction as there is no space left for block
874 * reservation (typically why we see a ENOSPC in writeback).
875 *
876 * This is not a performance critical path, so for now just do the punching a
877 * buffer head at a time.
878 */
879STATIC void
880xfs_aops_discard_page(
881 struct page *page)
882{
883 struct inode *inode = page->mapping->host;
884 struct xfs_inode *ip = XFS_I(inode);
885 struct buffer_head *bh, *head;
886 loff_t offset = page_offset(page);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000887
Dave Chinnera49935f2014-03-07 16:19:14 +1100888 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
Dave Chinner3ed3a432010-03-05 02:00:42 +0000889 goto out_invalidate;
890
Dave Chinnere8c37532010-03-15 02:36:35 +0000891 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
892 goto out_invalidate;
893
Dave Chinner4f107002011-03-07 10:00:35 +1100894 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000895 "page discard on page %p, inode 0x%llx, offset %llu.",
896 page, ip->i_ino, offset);
897
898 xfs_ilock(ip, XFS_ILOCK_EXCL);
899 bh = head = page_buffers(page);
900 do {
Dave Chinner3ed3a432010-03-05 02:00:42 +0000901 int error;
Dave Chinnerc726de42010-11-30 15:14:39 +1100902 xfs_fileoff_t start_fsb;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000903
904 if (!buffer_delay(bh))
905 goto next_buffer;
906
Dave Chinnerc726de42010-11-30 15:14:39 +1100907 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
908 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000909 if (error) {
910 /* something screwed, just bail */
Dave Chinnere8c37532010-03-15 02:36:35 +0000911 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100912 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000913 "page discard unable to remove delalloc mapping.");
Dave Chinnere8c37532010-03-15 02:36:35 +0000914 }
Dave Chinner3ed3a432010-03-05 02:00:42 +0000915 break;
916 }
917next_buffer:
Dave Chinnerc726de42010-11-30 15:14:39 +1100918 offset += 1 << inode->i_blkbits;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000919
920 } while ((bh = bh->b_this_page) != head);
921
922 xfs_iunlock(ip, XFS_ILOCK_EXCL);
923out_invalidate:
Lukas Czernerd47992f2013-05-21 23:17:23 -0400924 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000925 return;
926}
927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928/*
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000929 * Write out a dirty page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 *
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000931 * For delalloc space on the page we need to allocate space and flush it.
932 * For unwritten space on the page we need to start the conversion to
933 * regular allocated space.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000934 * For any other dirty buffer heads on the page we should flush them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936STATIC int
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000937xfs_vm_writepage(
938 struct page *page,
939 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000941 struct inode *inode = page->mapping->host;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100942 struct buffer_head *bh, *head;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000943 struct xfs_bmbt_irec imap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100944 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 loff_t offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100946 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 __uint64_t end_offset;
Christoph Hellwigbd1556a2010-04-28 12:29:00 +0000948 pgoff_t end_index, last_index;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000949 ssize_t len;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000950 int err, imap_valid = 0, uptodate = 1;
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000951 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Lukas Czerner34097df2013-05-21 23:58:01 -0400953 trace_xfs_writepage(inode, page, 0, 0);
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000954
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000955 ASSERT(page_has_buffers(page));
956
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000957 /*
958 * Refuse to write the page out if we are called from reclaim context.
959 *
Christoph Hellwigd4f7a5c2010-06-28 10:34:44 -0400960 * This avoids stack overflows when called from deeply used stacks in
961 * random callers for direct reclaim or memcg reclaim. We explicitly
962 * allow reclaim from kswapd as the stack usage there is relatively low.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000963 *
Mel Gorman94054fa2011-10-31 17:07:45 -0700964 * This should never happen except in the case of a VM regression so
965 * warn about it.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000966 */
Mel Gorman94054fa2011-10-31 17:07:45 -0700967 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
968 PF_MEMALLOC))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000969 goto redirty;
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000970
971 /*
Christoph Hellwig680a6472011-07-08 14:34:05 +0200972 * Given that we do not allow direct reclaim to call us, we should
973 * never be called while in a filesystem transaction.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000974 */
Christoph Hellwig448011e2014-06-06 16:05:15 +1000975 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000976 goto redirty;
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 /* Is this page beyond the end of the file? */
979 offset = i_size_read(inode);
980 end_index = offset >> PAGE_CACHE_SHIFT;
981 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
Jie Liu8695d272014-05-20 08:24:26 +1000982
983 /*
984 * The page index is less than the end_index, adjust the end_offset
985 * to the highest offset that this page should represent.
986 * -----------------------------------------------------
987 * | file mapping | <EOF> |
988 * -----------------------------------------------------
989 * | Page ... | Page N-2 | Page N-1 | Page N | |
990 * ^--------------------------------^----------|--------
991 * | desired writeback range | see else |
992 * ---------------------------------^------------------|
993 */
994 if (page->index < end_index)
995 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
996 else {
997 /*
998 * Check whether the page to write out is beyond or straddles
999 * i_size or not.
1000 * -------------------------------------------------------
1001 * | file mapping | <EOF> |
1002 * -------------------------------------------------------
1003 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1004 * ^--------------------------------^-----------|---------
1005 * | | Straddles |
1006 * ---------------------------------^-----------|--------|
1007 */
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -04001008 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
1009
1010 /*
Jan Karaff9a28f2013-03-14 14:30:54 +01001011 * Skip the page if it is fully outside i_size, e.g. due to a
1012 * truncate operation that is in progress. We must redirty the
1013 * page so that reclaim stops reclaiming it. Otherwise
1014 * xfs_vm_releasepage() is called on it and gets confused.
Jie Liu8695d272014-05-20 08:24:26 +10001015 *
1016 * Note that the end_index is unsigned long, it would overflow
1017 * if the given offset is greater than 16TB on 32-bit system
1018 * and if we do check the page is fully outside i_size or not
1019 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1020 * will be evaluated to 0. Hence this page will be redirtied
1021 * and be written out repeatedly which would result in an
1022 * infinite loop, the user program that perform this operation
1023 * will hang. Instead, we can verify this situation by checking
1024 * if the page to write is totally beyond the i_size or if it's
1025 * offset is just equal to the EOF.
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -04001026 */
Jie Liu8695d272014-05-20 08:24:26 +10001027 if (page->index > end_index ||
1028 (page->index == end_index && offset_into_page == 0))
Jan Karaff9a28f2013-03-14 14:30:54 +01001029 goto redirty;
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -04001030
1031 /*
1032 * The page straddles i_size. It must be zeroed out on each
1033 * and every writepage invocation because it may be mmapped.
1034 * "A file is mapped in multiples of the page size. For a file
Jie Liu8695d272014-05-20 08:24:26 +10001035 * that is not a multiple of the page size, the remaining
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -04001036 * memory is zeroed when mapped, and writes to that region are
1037 * not written out to the file."
1038 */
1039 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
Jie Liu8695d272014-05-20 08:24:26 +10001040
1041 /* Adjust the end_offset to the end of file */
1042 end_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 }
1044
Nathan Scott24e17b52005-05-05 13:33:20 -07001045 len = 1 << inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -07001046
Nathan Scott24e17b52005-05-05 13:33:20 -07001047 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001048 offset = page_offset(page);
Alain Renaud0d882a32012-05-22 15:56:21 -05001049 type = XFS_IO_OVERWRITE;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001050
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 do {
Christoph Hellwig6ac72482010-12-10 08:42:18 +00001052 int new_ioend = 0;
1053
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 if (offset >= end_offset)
1055 break;
1056 if (!buffer_uptodate(bh))
1057 uptodate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Eric Sandeen3d9b02e2010-06-24 09:45:30 +10001059 /*
Christoph Hellwigece413f2010-11-10 21:39:11 +00001060 * set_page_dirty dirties all buffers in a page, independent
1061 * of their state. The dirty state however is entirely
1062 * meaningless for holes (!mapped && uptodate), so skip
1063 * buffers covering holes here.
Eric Sandeen3d9b02e2010-06-24 09:45:30 +10001064 */
1065 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
Eric Sandeen3d9b02e2010-06-24 09:45:30 +10001066 imap_valid = 0;
1067 continue;
1068 }
1069
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001070 if (buffer_unwritten(bh)) {
Alain Renaud0d882a32012-05-22 15:56:21 -05001071 if (type != XFS_IO_UNWRITTEN) {
1072 type = XFS_IO_UNWRITTEN;
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001073 imap_valid = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001074 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001075 } else if (buffer_delay(bh)) {
Alain Renaud0d882a32012-05-22 15:56:21 -05001076 if (type != XFS_IO_DELALLOC) {
1077 type = XFS_IO_DELALLOC;
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001078 imap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 }
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001080 } else if (buffer_uptodate(bh)) {
Alain Renaud0d882a32012-05-22 15:56:21 -05001081 if (type != XFS_IO_OVERWRITE) {
1082 type = XFS_IO_OVERWRITE;
Christoph Hellwig85da94c2010-12-10 08:42:16 +00001083 imap_valid = 0;
1084 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001085 } else {
Alain Renaud7d0fa3e2012-06-08 15:34:46 -04001086 if (PageUptodate(page))
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001087 ASSERT(buffer_mapped(bh));
Alain Renaud7d0fa3e2012-06-08 15:34:46 -04001088 /*
1089 * This buffer is not uptodate and will not be
1090 * written to disk. Ensure that we will put any
1091 * subsequent writeable buffers into a new
1092 * ioend.
1093 */
1094 imap_valid = 0;
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001095 continue;
1096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001098 if (imap_valid)
1099 imap_valid = xfs_imap_valid(inode, &imap, offset);
1100 if (!imap_valid) {
1101 /*
1102 * If we didn't have a valid mapping then we need to
1103 * put the new mapping into a separate ioend structure.
1104 * This ensures non-contiguous extents always have
1105 * separate ioends, which is particularly important
1106 * for unwritten extent conversion at I/O completion
1107 * time.
1108 */
1109 new_ioend = 1;
Dave Chinner988ef922016-02-15 17:20:50 +11001110 err = xfs_map_blocks(inode, offset, &imap, type);
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001111 if (err)
1112 goto error;
1113 imap_valid = xfs_imap_valid(inode, &imap, offset);
1114 }
1115 if (imap_valid) {
Christoph Hellwigecff71e2010-12-10 08:42:25 +00001116 lock_buffer(bh);
Alain Renaud0d882a32012-05-22 15:56:21 -05001117 if (type != XFS_IO_OVERWRITE)
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001118 xfs_map_at_offset(inode, bh, &imap, offset);
1119 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1120 new_ioend);
1121 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001123
1124 if (!iohead)
1125 iohead = ioend;
1126
1127 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
1129 if (uptodate && bh == head)
1130 SetPageUptodate(page);
1131
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001132 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Dave Chinner7bf7f352012-11-12 22:09:45 +11001134 /* if there is no IO to be submitted for this page, we are done */
1135 if (!ioend)
1136 return 0;
1137
1138 ASSERT(iohead);
1139
1140 /*
1141 * Any errors from this point onwards need tobe reported through the IO
1142 * completion path as we have marked the initial page as under writeback
1143 * and unlocked it.
1144 */
1145 if (imap_valid) {
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001146 xfs_off_t end_index;
Christoph Hellwig8699bb02010-04-28 12:28:54 +00001147
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001148 end_index = imap.br_startoff + imap.br_blockcount;
1149
1150 /* to bytes */
1151 end_index <<= inode->i_blkbits;
1152
1153 /* to pages */
1154 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1155
1156 /* check against file size */
1157 if (end_index > last_index)
1158 end_index = last_index;
1159
Christoph Hellwig207d0412010-04-28 12:28:56 +00001160 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +00001161 wbc, end_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
1163
Christoph Hellwig281627d2012-03-13 08:41:05 +00001164
Dave Chinner7bf7f352012-11-12 22:09:45 +11001165 /*
1166 * Reserve log space if we might write beyond the on-disk inode size.
1167 */
1168 err = 0;
1169 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1170 err = xfs_setfilesize_trans_alloc(ioend);
1171
1172 xfs_submit_ioend(wbc, iohead, err);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001173
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001174 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
1176error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001177 if (iohead)
1178 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001180 xfs_aops_discard_page(page);
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001181 ClearPageUptodate(page);
1182 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 return err;
Nathan Scottf51623b2006-03-14 13:26:27 +11001184
Christoph Hellwigb5420f22010-08-24 11:47:51 +10001185redirty:
Nathan Scottf51623b2006-03-14 13:26:27 +11001186 redirty_page_for_writepage(wbc, page);
1187 unlock_page(page);
1188 return 0;
Nathan Scottf51623b2006-03-14 13:26:27 +11001189}
1190
Nathan Scott7d4fb402006-06-09 15:27:16 +10001191STATIC int
1192xfs_vm_writepages(
1193 struct address_space *mapping,
1194 struct writeback_control *wbc)
1195{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001196 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001197 return generic_writepages(mapping, wbc);
1198}
1199
Nathan Scottf51623b2006-03-14 13:26:27 +11001200/*
1201 * Called to move a page into cleanable state - and from there
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001202 * to be released. The page should already be clean. We always
Nathan Scottf51623b2006-03-14 13:26:27 +11001203 * have buffer heads in this call.
1204 *
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001205 * Returns 1 if the page is ok to release, 0 otherwise.
Nathan Scottf51623b2006-03-14 13:26:27 +11001206 */
1207STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001208xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001209 struct page *page,
1210 gfp_t gfp_mask)
1211{
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001212 int delalloc, unwritten;
Nathan Scottf51623b2006-03-14 13:26:27 +11001213
Lukas Czerner34097df2013-05-21 23:58:01 -04001214 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
Nathan Scott238f4c52006-03-17 17:26:25 +11001215
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001216 xfs_count_page_state(page, &delalloc, &unwritten);
Nathan Scottf51623b2006-03-14 13:26:27 +11001217
Christoph Hellwig448011e2014-06-06 16:05:15 +10001218 if (WARN_ON_ONCE(delalloc))
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001219 return 0;
Christoph Hellwig448011e2014-06-06 16:05:15 +10001220 if (WARN_ON_ONCE(unwritten))
Nathan Scottf51623b2006-03-14 13:26:27 +11001221 return 0;
1222
Nathan Scottf51623b2006-03-14 13:26:27 +11001223 return try_to_free_buffers(page);
1224}
1225
Dave Chinnera7193702015-04-16 21:57:48 +10001226/*
Dave Chinnera06c2772015-04-16 22:00:00 +10001227 * When we map a DIO buffer, we may need to attach an ioend that describes the
1228 * type of write IO we are doing. This passes to the completion function the
1229 * operations it needs to perform. If the mapping is for an overwrite wholly
1230 * within the EOF then we don't need an ioend and so we don't allocate one.
1231 * This avoids the unnecessary overhead of allocating and freeing ioends for
1232 * workloads that don't require transactions on IO completion.
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001233 *
1234 * If we get multiple mappings in a single IO, we might be mapping different
1235 * types. But because the direct IO can only have a single private pointer, we
1236 * need to ensure that:
1237 *
Dave Chinnera06c2772015-04-16 22:00:00 +10001238 * a) i) the ioend spans the entire region of unwritten mappings; or
1239 * ii) the ioend spans all the mappings that cross or are beyond EOF; and
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001240 * b) if it contains unwritten extents, it is *permanently* marked as such
1241 *
1242 * We could do this by chaining ioends like buffered IO does, but we only
1243 * actually get one IO completion callback from the direct IO, and that spans
1244 * the entire IO regardless of how many mappings and IOs are needed to complete
1245 * the DIO. There is only going to be one reference to the ioend and its life
1246 * cycle is constrained by the DIO completion code. hence we don't need
1247 * reference counting here.
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001248 *
1249 * Note that for DIO, an IO to the highest supported file block offset (i.e.
1250 * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1251 * bit variable. Hence if we see this overflow, we have to assume that the IO is
1252 * extending the file size. We won't know for sure until IO completion is run
1253 * and the actual max write offset is communicated to the IO completion
1254 * routine.
1255 *
1256 * For DAX page faults, we are preparing to never see unwritten extents here,
1257 * nor should we ever extend the inode size. Hence we will soon have nothing to
1258 * do here for this case, ensuring we don't have to provide an IO completion
1259 * callback to free an ioend that we don't actually need for a fault into the
1260 * page at offset (2^63 - 1FSB) bytes.
Dave Chinnera7193702015-04-16 21:57:48 +10001261 */
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001262
Dave Chinnera7193702015-04-16 21:57:48 +10001263static void
1264xfs_map_direct(
1265 struct inode *inode,
1266 struct buffer_head *bh_result,
1267 struct xfs_bmbt_irec *imap,
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001268 xfs_off_t offset,
1269 bool dax_fault)
Dave Chinnera7193702015-04-16 21:57:48 +10001270{
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001271 struct xfs_ioend *ioend;
1272 xfs_off_t size = bh_result->b_size;
1273 int type;
1274
1275 if (ISUNWRITTEN(imap))
1276 type = XFS_IO_UNWRITTEN;
1277 else
1278 type = XFS_IO_OVERWRITE;
1279
1280 trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
1281
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001282 if (dax_fault) {
1283 ASSERT(type == XFS_IO_OVERWRITE);
1284 trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1285 imap);
1286 return;
1287 }
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001288
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001289 if (bh_result->b_private) {
1290 ioend = bh_result->b_private;
1291 ASSERT(ioend->io_size > 0);
1292 ASSERT(offset >= ioend->io_offset);
1293 if (offset + size > ioend->io_offset + ioend->io_size)
1294 ioend->io_size = offset - ioend->io_offset + size;
1295
1296 if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
1297 ioend->io_type = XFS_IO_UNWRITTEN;
1298
1299 trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
1300 ioend->io_size, ioend->io_type,
1301 imap);
Dave Chinnera06c2772015-04-16 22:00:00 +10001302 } else if (type == XFS_IO_UNWRITTEN ||
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001303 offset + size > i_size_read(inode) ||
1304 offset + size < 0) {
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001305 ioend = xfs_alloc_ioend(inode, type);
1306 ioend->io_offset = offset;
1307 ioend->io_size = size;
Dave Chinnera06c2772015-04-16 22:00:00 +10001308
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001309 bh_result->b_private = ioend;
Dave Chinnera06c2772015-04-16 22:00:00 +10001310 set_buffer_defer_completion(bh_result);
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001311
1312 trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
1313 imap);
Dave Chinnera06c2772015-04-16 22:00:00 +10001314 } else {
1315 trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1316 imap);
Dave Chinnera7193702015-04-16 21:57:48 +10001317 }
1318}
1319
Dave Chinner1fdca9c2015-04-16 21:58:21 +10001320/*
1321 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1322 * is, so that we can avoid repeated get_blocks calls.
1323 *
1324 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1325 * for blocks beyond EOF must be marked new so that sub block regions can be
1326 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1327 * was just allocated or is unwritten, otherwise the callers would overwrite
1328 * existing data with zeros. Hence we have to split the mapping into a range up
1329 * to and including EOF, and a second mapping for beyond EOF.
1330 */
1331static void
1332xfs_map_trim_size(
1333 struct inode *inode,
1334 sector_t iblock,
1335 struct buffer_head *bh_result,
1336 struct xfs_bmbt_irec *imap,
1337 xfs_off_t offset,
1338 ssize_t size)
1339{
1340 xfs_off_t mapping_size;
1341
1342 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1343 mapping_size <<= inode->i_blkbits;
1344
1345 ASSERT(mapping_size > 0);
1346 if (mapping_size > size)
1347 mapping_size = size;
1348 if (offset < i_size_read(inode) &&
1349 offset + mapping_size >= i_size_read(inode)) {
1350 /* limit mapping to block that spans EOF */
1351 mapping_size = roundup_64(i_size_read(inode) - offset,
1352 1 << inode->i_blkbits);
1353 }
1354 if (mapping_size > LONG_MAX)
1355 mapping_size = LONG_MAX;
1356
1357 bh_result->b_size = mapping_size;
1358}
1359
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001361__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 struct inode *inode,
1363 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 struct buffer_head *bh_result,
1365 int create,
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001366 bool direct,
1367 bool dax_fault)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368{
Christoph Hellwiga206c812010-12-10 08:42:20 +00001369 struct xfs_inode *ip = XFS_I(inode);
1370 struct xfs_mount *mp = ip->i_mount;
1371 xfs_fileoff_t offset_fsb, end_fsb;
1372 int error = 0;
1373 int lockmode = 0;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001374 struct xfs_bmbt_irec imap;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001375 int nimaps = 1;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001376 xfs_off_t offset;
1377 ssize_t size;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001378 int new = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001379
1380 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10001381 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001383 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001384 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1385 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001386
1387 if (!create && direct && offset >= i_size_read(inode))
1388 return 0;
1389
Dave Chinner507630b2012-03-27 10:34:50 -04001390 /*
1391 * Direct I/O is usually done on preallocated files, so try getting
1392 * a block mapping without an exclusive lock first. For buffered
1393 * writes we already have the exclusive iolock anyway, so avoiding
1394 * a lock roundtrip here by taking the ilock exclusive from the
1395 * beginning is a useful micro optimization.
1396 */
1397 if (create && !direct) {
Christoph Hellwiga206c812010-12-10 08:42:20 +00001398 lockmode = XFS_ILOCK_EXCL;
1399 xfs_ilock(ip, lockmode);
1400 } else {
Christoph Hellwig309ecac82013-12-06 12:30:09 -08001401 lockmode = xfs_ilock_data_map_shared(ip);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001402 }
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001403
Dave Chinnerd2c28192012-06-08 15:44:53 +10001404 ASSERT(offset <= mp->m_super->s_maxbytes);
1405 if (offset + size > mp->m_super->s_maxbytes)
1406 size = mp->m_super->s_maxbytes - offset;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001407 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1408 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1409
Dave Chinner5c8ed202011-09-18 20:40:45 +00001410 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1411 &imap, &nimaps, XFS_BMAPI_ENTIRE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 if (error)
Christoph Hellwiga206c812010-12-10 08:42:20 +00001413 goto out_unlock;
1414
Dave Chinner1ca19152015-11-03 12:37:00 +11001415 /* for DAX, we convert unwritten extents directly */
Christoph Hellwiga206c812010-12-10 08:42:20 +00001416 if (create &&
1417 (!nimaps ||
1418 (imap.br_startblock == HOLESTARTBLOCK ||
Dave Chinner1ca19152015-11-03 12:37:00 +11001419 imap.br_startblock == DELAYSTARTBLOCK) ||
1420 (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
Dave Chinneraff3a9e2012-04-23 15:58:44 +10001421 if (direct || xfs_get_extsz_hint(ip)) {
Dave Chinner507630b2012-03-27 10:34:50 -04001422 /*
Brian Foster009c6e82015-10-12 15:34:20 +11001423 * xfs_iomap_write_direct() expects the shared lock. It
1424 * is unlocked on return.
Dave Chinner507630b2012-03-27 10:34:50 -04001425 */
Brian Foster009c6e82015-10-12 15:34:20 +11001426 if (lockmode == XFS_ILOCK_EXCL)
1427 xfs_ilock_demote(ip, lockmode);
1428
Christoph Hellwiga206c812010-12-10 08:42:20 +00001429 error = xfs_iomap_write_direct(ip, offset, size,
1430 &imap, nimaps);
Dave Chinner507630b2012-03-27 10:34:50 -04001431 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10001432 return error;
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001433 new = 1;
Dave Chinner6b698ed2015-06-04 09:18:53 +10001434
Christoph Hellwiga206c812010-12-10 08:42:20 +00001435 } else {
Dave Chinner507630b2012-03-27 10:34:50 -04001436 /*
1437 * Delalloc reservations do not require a transaction,
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001438 * we can go on without dropping the lock here. If we
1439 * are allocating a new delalloc block, make sure that
1440 * we set the new flag so that we mark the buffer new so
1441 * that we know that it is newly allocated if the write
1442 * fails.
Dave Chinner507630b2012-03-27 10:34:50 -04001443 */
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001444 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1445 new = 1;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001446 error = xfs_iomap_write_delay(ip, offset, size, &imap);
Dave Chinner507630b2012-03-27 10:34:50 -04001447 if (error)
1448 goto out_unlock;
1449
1450 xfs_iunlock(ip, lockmode);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001451 }
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001452 trace_xfs_get_blocks_alloc(ip, offset, size,
1453 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1454 : XFS_IO_DELALLOC, &imap);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001455 } else if (nimaps) {
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001456 trace_xfs_get_blocks_found(ip, offset, size,
1457 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1458 : XFS_IO_OVERWRITE, &imap);
Dave Chinner507630b2012-03-27 10:34:50 -04001459 xfs_iunlock(ip, lockmode);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001460 } else {
1461 trace_xfs_get_blocks_notfound(ip, offset, size);
1462 goto out_unlock;
1463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
Dave Chinner1ca19152015-11-03 12:37:00 +11001465 if (IS_DAX(inode) && create) {
1466 ASSERT(!ISUNWRITTEN(&imap));
1467 /* zeroing is not needed at a higher layer */
1468 new = 0;
1469 }
1470
Dave Chinner1fdca9c2015-04-16 21:58:21 +10001471 /* trim mapping down to size requested */
1472 if (direct || size > (1 << inode->i_blkbits))
1473 xfs_map_trim_size(inode, iblock, bh_result,
1474 &imap, offset, size);
1475
Dave Chinnera7193702015-04-16 21:57:48 +10001476 /*
1477 * For unwritten extents do not report a disk address in the buffered
1478 * read case (treat as if we're reading into a hole).
1479 */
Christoph Hellwig207d0412010-04-28 12:28:56 +00001480 if (imap.br_startblock != HOLESTARTBLOCK &&
Dave Chinnera7193702015-04-16 21:57:48 +10001481 imap.br_startblock != DELAYSTARTBLOCK &&
1482 (create || !ISUNWRITTEN(&imap))) {
1483 xfs_map_buffer(inode, bh_result, &imap, offset);
1484 if (ISUNWRITTEN(&imap))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 set_buffer_unwritten(bh_result);
Dave Chinnera7193702015-04-16 21:57:48 +10001486 /* direct IO needs special help */
1487 if (create && direct)
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001488 xfs_map_direct(inode, bh_result, &imap, offset,
1489 dax_fault);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 }
1491
Nathan Scottc2536662006-03-29 10:44:40 +10001492 /*
1493 * If this is a realtime file, data may be on a different device.
1494 * to that pointed to from the buffer_head b_bdev currently.
1495 */
Christoph Hellwig046f1682010-04-28 12:28:52 +00001496 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Nathan Scottc2536662006-03-29 10:44:40 +10001498 /*
David Chinner549054a2007-02-10 18:36:35 +11001499 * If we previously allocated a block out beyond eof and we are now
1500 * coming back to use it then we will need to flag it as new even if it
1501 * has a disk address.
1502 *
1503 * With sub-block writes into unwritten extents we also need to mark
1504 * the buffer as new so that the unwritten parts of the buffer gets
1505 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 */
1507 if (create &&
1508 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001509 (offset >= i_size_read(inode)) ||
Christoph Hellwig207d0412010-04-28 12:28:56 +00001510 (new || ISUNWRITTEN(&imap))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Christoph Hellwig207d0412010-04-28 12:28:56 +00001513 if (imap.br_startblock == DELAYSTARTBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 BUG_ON(direct);
1515 if (create) {
1516 set_buffer_uptodate(bh_result);
1517 set_buffer_mapped(bh_result);
1518 set_buffer_delay(bh_result);
1519 }
1520 }
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 return 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001523
1524out_unlock:
1525 xfs_iunlock(ip, lockmode);
Dave Chinner24513372014-06-25 14:58:08 +10001526 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527}
1528
1529int
Nathan Scottc2536662006-03-29 10:44:40 +10001530xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 struct inode *inode,
1532 sector_t iblock,
1533 struct buffer_head *bh_result,
1534 int create)
1535{
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001536 return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537}
1538
Dave Chinner6b698ed2015-06-04 09:18:53 +10001539int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001540xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 struct inode *inode,
1542 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 struct buffer_head *bh_result,
1544 int create)
1545{
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001546 return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1547}
1548
1549int
1550xfs_get_blocks_dax_fault(
1551 struct inode *inode,
1552 sector_t iblock,
1553 struct buffer_head *bh_result,
1554 int create)
1555{
1556 return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
1558
Dave Chinner6b698ed2015-06-04 09:18:53 +10001559static void
1560__xfs_end_io_direct_write(
1561 struct inode *inode,
1562 struct xfs_ioend *ioend,
Christoph Hellwig209fb872010-07-18 21:17:11 +00001563 loff_t offset,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001564 ssize_t size)
Christoph Hellwigf0973862005-09-05 08:22:52 +10001565{
Dave Chinner6b698ed2015-06-04 09:18:53 +10001566 struct xfs_mount *mp = XFS_I(inode)->i_mount;
Christoph Hellwig2ba66232015-02-02 10:02:09 +11001567
Dave Chinner6b698ed2015-06-04 09:18:53 +10001568 if (XFS_FORCED_SHUTDOWN(mp) || ioend->io_error)
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001569 goto out_end_io;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001570
1571 /*
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001572 * dio completion end_io functions are only called on writes if more
1573 * than 0 bytes was written.
Christoph Hellwig2813d682011-12-18 20:00:12 +00001574 */
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001575 ASSERT(size > 0);
1576
1577 /*
1578 * The ioend only maps whole blocks, while the IO may be sector aligned.
Dave Chinnera06c2772015-04-16 22:00:00 +10001579 * Hence the ioend offset/size may not match the IO offset/size exactly.
1580 * Because we don't map overwrites within EOF into the ioend, the offset
1581 * may not match, but only if the endio spans EOF. Either way, write
1582 * the IO sizes into the ioend so that completion processing does the
1583 * right thing.
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001584 */
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001585 ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
1586 ioend->io_size = size;
1587 ioend->io_offset = offset;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001588
1589 /*
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001590 * The ioend tells us whether we are doing unwritten extent conversion
1591 * or an append transaction that updates the on-disk file size. These
1592 * cases are the only cases where we should *potentially* be needing
Dave Chinnera06c2772015-04-16 22:00:00 +10001593 * to update the VFS inode size.
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001594 *
1595 * We need to update the in-core inode size here so that we don't end up
Dave Chinnera06c2772015-04-16 22:00:00 +10001596 * with the on-disk inode size being outside the in-core inode size. We
1597 * have no other method of updating EOF for AIO, so always do it here
1598 * if necessary.
Dave Chinnerb9d59842015-04-16 22:03:07 +10001599 *
1600 * We need to lock the test/set EOF update as we can be racing with
1601 * other IO completions here to update the EOF. Failing to serialise
1602 * here can result in EOF moving backwards and Bad Things Happen when
1603 * that occurs.
Christoph Hellwig2813d682011-12-18 20:00:12 +00001604 */
Dave Chinner6b698ed2015-06-04 09:18:53 +10001605 spin_lock(&XFS_I(inode)->i_flags_lock);
Christoph Hellwig2ba66232015-02-02 10:02:09 +11001606 if (offset + size > i_size_read(inode))
1607 i_size_write(inode, offset + size);
Dave Chinner6b698ed2015-06-04 09:18:53 +10001608 spin_unlock(&XFS_I(inode)->i_flags_lock);
Christoph Hellwig2813d682011-12-18 20:00:12 +00001609
1610 /*
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001611 * If we are doing an append IO that needs to update the EOF on disk,
1612 * do the transaction reserve now so we can use common end io
1613 * processing. Stashing the error (if there is one) in the ioend will
1614 * result in the ioend processing passing on the error if it is
1615 * possible as we can't return it from here.
Christoph Hellwigf0973862005-09-05 08:22:52 +10001616 */
Dave Chinnera06c2772015-04-16 22:00:00 +10001617 if (ioend->io_type == XFS_IO_OVERWRITE)
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001618 ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
Christoph Hellwig40e2e972010-07-18 21:17:09 +00001619
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001620out_end_io:
1621 xfs_end_io(&ioend->io_work);
1622 return;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001623}
1624
Dave Chinner6b698ed2015-06-04 09:18:53 +10001625/*
1626 * Complete a direct I/O write request.
1627 *
1628 * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
1629 * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
1630 * wholly within the EOF and so there is nothing for us to do. Note that in this
1631 * case the completion can be called in interrupt context, whereas if we have an
1632 * ioend we will always be called in task context (i.e. from a workqueue).
1633 */
1634STATIC void
1635xfs_end_io_direct_write(
1636 struct kiocb *iocb,
1637 loff_t offset,
1638 ssize_t size,
1639 void *private)
1640{
1641 struct inode *inode = file_inode(iocb->ki_filp);
1642 struct xfs_ioend *ioend = private;
1643
1644 trace_xfs_gbmap_direct_endio(XFS_I(inode), offset, size,
1645 ioend ? ioend->io_type : 0, NULL);
1646
1647 if (!ioend) {
1648 ASSERT(offset + size <= i_size_read(inode));
1649 return;
1650 }
1651
1652 __xfs_end_io_direct_write(inode, ioend, offset, size);
1653}
1654
Dave Chinner6e1ba0b2015-06-04 09:19:15 +10001655static inline ssize_t
1656xfs_vm_do_dio(
1657 struct inode *inode,
1658 struct kiocb *iocb,
1659 struct iov_iter *iter,
1660 loff_t offset,
1661 void (*endio)(struct kiocb *iocb,
1662 loff_t offset,
1663 ssize_t size,
1664 void *private),
1665 int flags)
1666{
1667 struct block_device *bdev;
1668
1669 if (IS_DAX(inode))
1670 return dax_do_io(iocb, inode, iter, offset,
1671 xfs_get_blocks_direct, endio, 0);
1672
1673 bdev = xfs_find_bdev_for_inode(inode);
1674 return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
1675 xfs_get_blocks_direct, endio, NULL, flags);
1676}
1677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001679xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 struct kiocb *iocb,
Al Virod8d3d942014-03-04 21:27:34 -05001681 struct iov_iter *iter,
1682 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001684 struct inode *inode = iocb->ki_filp->f_mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Dave Chinner6e1ba0b2015-06-04 09:19:15 +10001686 if (iov_iter_rw(iter) == WRITE)
1687 return xfs_vm_do_dio(inode, iocb, iter, offset,
1688 xfs_end_io_direct_write, DIO_ASYNC_EXTEND);
1689 return xfs_vm_do_dio(inode, iocb, iter, offset, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690}
1691
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001692/*
1693 * Punch out the delalloc blocks we have already allocated.
1694 *
1695 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1696 * as the page is still locked at this point.
1697 */
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001698STATIC void
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001699xfs_vm_kill_delalloc_range(
1700 struct inode *inode,
1701 loff_t start,
1702 loff_t end)
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001703{
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001704 struct xfs_inode *ip = XFS_I(inode);
1705 xfs_fileoff_t start_fsb;
1706 xfs_fileoff_t end_fsb;
1707 int error;
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001708
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001709 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1710 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1711 if (end_fsb <= start_fsb)
1712 return;
Dave Chinnerc726de42010-11-30 15:14:39 +11001713
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001714 xfs_ilock(ip, XFS_ILOCK_EXCL);
1715 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1716 end_fsb - start_fsb);
1717 if (error) {
1718 /* something screwed, just bail */
1719 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1720 xfs_alert(ip->i_mount,
1721 "xfs_vm_write_failed: unable to clean up ino %lld",
1722 ip->i_ino);
Dave Chinnerc726de42010-11-30 15:14:39 +11001723 }
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001724 }
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001725 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001726}
1727
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001728STATIC void
1729xfs_vm_write_failed(
1730 struct inode *inode,
1731 struct page *page,
1732 loff_t pos,
1733 unsigned len)
1734{
Jie Liu58e59852013-07-16 13:11:16 +08001735 loff_t block_offset;
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001736 loff_t block_start;
1737 loff_t block_end;
1738 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1739 loff_t to = from + len;
1740 struct buffer_head *bh, *head;
1741
Jie Liu58e59852013-07-16 13:11:16 +08001742 /*
1743 * The request pos offset might be 32 or 64 bit, this is all fine
1744 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1745 * platform, the high 32-bit will be masked off if we evaluate the
1746 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1747 * 0xfffff000 as an unsigned long, hence the result is incorrect
1748 * which could cause the following ASSERT failed in most cases.
1749 * In order to avoid this, we can evaluate the block_offset of the
1750 * start of the page by using shifts rather than masks the mismatch
1751 * problem.
1752 */
1753 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1754
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001755 ASSERT(block_offset + from == pos);
1756
1757 head = page_buffers(page);
1758 block_start = 0;
1759 for (bh = head; bh != head || !block_start;
1760 bh = bh->b_this_page, block_start = block_end,
1761 block_offset += bh->b_size) {
1762 block_end = block_start + bh->b_size;
1763
1764 /* skip buffers before the write */
1765 if (block_end <= from)
1766 continue;
1767
1768 /* if the buffer is after the write, we're done */
1769 if (block_start >= to)
1770 break;
1771
1772 if (!buffer_delay(bh))
1773 continue;
1774
1775 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1776 continue;
1777
1778 xfs_vm_kill_delalloc_range(inode, block_offset,
1779 block_offset + bh->b_size);
Dave Chinner4ab9ed52014-04-14 18:11:58 +10001780
1781 /*
1782 * This buffer does not contain data anymore. make sure anyone
1783 * who finds it knows that for certain.
1784 */
1785 clear_buffer_delay(bh);
1786 clear_buffer_uptodate(bh);
1787 clear_buffer_mapped(bh);
1788 clear_buffer_new(bh);
1789 clear_buffer_dirty(bh);
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001790 }
1791
1792}
1793
1794/*
1795 * This used to call block_write_begin(), but it unlocks and releases the page
1796 * on error, and we need that page to be able to punch stale delalloc blocks out
1797 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1798 * the appropriate point.
1799 */
Nathan Scottf51623b2006-03-14 13:26:27 +11001800STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001801xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001802 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001803 struct address_space *mapping,
1804 loff_t pos,
1805 unsigned len,
1806 unsigned flags,
1807 struct page **pagep,
1808 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001809{
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001810 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1811 struct page *page;
1812 int status;
Christoph Hellwig155130a2010-06-04 11:29:58 +02001813
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001814 ASSERT(len <= PAGE_CACHE_SIZE);
1815
Dave Chinnerad22c7a2013-10-29 22:11:57 +11001816 page = grab_cache_page_write_begin(mapping, index, flags);
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001817 if (!page)
1818 return -ENOMEM;
1819
1820 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1821 if (unlikely(status)) {
1822 struct inode *inode = mapping->host;
Dave Chinner72ab70a2014-04-14 18:13:29 +10001823 size_t isize = i_size_read(inode);
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001824
1825 xfs_vm_write_failed(inode, page, pos, len);
1826 unlock_page(page);
1827
Dave Chinner72ab70a2014-04-14 18:13:29 +10001828 /*
1829 * If the write is beyond EOF, we only want to kill blocks
1830 * allocated in this write, not blocks that were previously
1831 * written successfully.
1832 */
1833 if (pos + len > isize) {
1834 ssize_t start = max_t(ssize_t, pos, isize);
1835
1836 truncate_pagecache_range(inode, start, pos + len);
1837 }
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001838
1839 page_cache_release(page);
1840 page = NULL;
1841 }
1842
1843 *pagep = page;
1844 return status;
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001845}
Christoph Hellwig155130a2010-06-04 11:29:58 +02001846
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001847/*
Dave Chinneraad3f372014-04-14 18:14:11 +10001848 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1849 * this specific write because they will never be written. Previous writes
1850 * beyond EOF where block allocation succeeded do not need to be trashed, so
1851 * only new blocks from this write should be trashed. For blocks within
1852 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1853 * written with all the other valid data.
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001854 */
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001855STATIC int
1856xfs_vm_write_end(
1857 struct file *file,
1858 struct address_space *mapping,
1859 loff_t pos,
1860 unsigned len,
1861 unsigned copied,
1862 struct page *page,
1863 void *fsdata)
1864{
1865 int ret;
1866
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001867 ASSERT(len <= PAGE_CACHE_SIZE);
1868
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001869 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001870 if (unlikely(ret < len)) {
1871 struct inode *inode = mapping->host;
1872 size_t isize = i_size_read(inode);
1873 loff_t to = pos + len;
1874
1875 if (to > isize) {
Dave Chinneraad3f372014-04-14 18:14:11 +10001876 /* only kill blocks in this write beyond EOF */
1877 if (pos > isize)
1878 isize = pos;
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001879 xfs_vm_kill_delalloc_range(inode, isize, to);
Dave Chinneraad3f372014-04-14 18:14:11 +10001880 truncate_pagecache_range(inode, isize, to);
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001881 }
1882 }
Christoph Hellwig155130a2010-06-04 11:29:58 +02001883 return ret;
Nathan Scottf51623b2006-03-14 13:26:27 +11001884}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
1886STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001887xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 struct address_space *mapping,
1889 sector_t block)
1890{
1891 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001892 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001894 trace_xfs_vm_bmap(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001895 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Dave Chinner4bc1ea62012-11-12 22:53:56 +11001896 filemap_write_and_wait(mapping);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001897 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001898 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899}
1900
1901STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001902xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 struct file *unused,
1904 struct page *page)
1905{
Dave Chinner121e2132016-01-08 11:28:35 +11001906 trace_xfs_vm_readpage(page->mapping->host, 1);
Nathan Scottc2536662006-03-29 10:44:40 +10001907 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908}
1909
1910STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001911xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 struct file *unused,
1913 struct address_space *mapping,
1914 struct list_head *pages,
1915 unsigned nr_pages)
1916{
Dave Chinner121e2132016-01-08 11:28:35 +11001917 trace_xfs_vm_readpages(mapping->host, nr_pages);
Nathan Scottc2536662006-03-29 10:44:40 +10001918 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919}
1920
Dave Chinner22e757a2014-09-02 12:12:51 +10001921/*
1922 * This is basically a copy of __set_page_dirty_buffers() with one
1923 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1924 * dirty, we'll never be able to clean them because we don't write buffers
1925 * beyond EOF, and that means we can't invalidate pages that span EOF
1926 * that have been marked dirty. Further, the dirty state can leak into
1927 * the file interior if the file is extended, resulting in all sorts of
1928 * bad things happening as the state does not match the underlying data.
1929 *
1930 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1931 * this only exist because of bufferheads and how the generic code manages them.
1932 */
1933STATIC int
1934xfs_vm_set_page_dirty(
1935 struct page *page)
1936{
1937 struct address_space *mapping = page->mapping;
1938 struct inode *inode = mapping->host;
1939 loff_t end_offset;
1940 loff_t offset;
1941 int newly_dirty;
Greg Thelenc4843a72015-05-22 17:13:16 -04001942 struct mem_cgroup *memcg;
Dave Chinner22e757a2014-09-02 12:12:51 +10001943
1944 if (unlikely(!mapping))
1945 return !TestSetPageDirty(page);
1946
1947 end_offset = i_size_read(inode);
1948 offset = page_offset(page);
1949
1950 spin_lock(&mapping->private_lock);
1951 if (page_has_buffers(page)) {
1952 struct buffer_head *head = page_buffers(page);
1953 struct buffer_head *bh = head;
1954
1955 do {
1956 if (offset < end_offset)
1957 set_buffer_dirty(bh);
1958 bh = bh->b_this_page;
1959 offset += 1 << inode->i_blkbits;
1960 } while (bh != head);
1961 }
Greg Thelenc4843a72015-05-22 17:13:16 -04001962 /*
1963 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
1964 * per-memcg dirty page counters.
1965 */
1966 memcg = mem_cgroup_begin_page_stat(page);
Dave Chinner22e757a2014-09-02 12:12:51 +10001967 newly_dirty = !TestSetPageDirty(page);
1968 spin_unlock(&mapping->private_lock);
1969
1970 if (newly_dirty) {
1971 /* sigh - __set_page_dirty() is static, so copy it here, too */
1972 unsigned long flags;
1973
1974 spin_lock_irqsave(&mapping->tree_lock, flags);
1975 if (page->mapping) { /* Race with truncate? */
1976 WARN_ON_ONCE(!PageUptodate(page));
Greg Thelenc4843a72015-05-22 17:13:16 -04001977 account_page_dirtied(page, mapping, memcg);
Dave Chinner22e757a2014-09-02 12:12:51 +10001978 radix_tree_tag_set(&mapping->page_tree,
1979 page_index(page), PAGECACHE_TAG_DIRTY);
1980 }
1981 spin_unlock_irqrestore(&mapping->tree_lock, flags);
Dave Chinner22e757a2014-09-02 12:12:51 +10001982 }
Greg Thelenc4843a72015-05-22 17:13:16 -04001983 mem_cgroup_end_page_stat(memcg);
1984 if (newly_dirty)
1985 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Dave Chinner22e757a2014-09-02 12:12:51 +10001986 return newly_dirty;
1987}
1988
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001989const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001990 .readpage = xfs_vm_readpage,
1991 .readpages = xfs_vm_readpages,
1992 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001993 .writepages = xfs_vm_writepages,
Dave Chinner22e757a2014-09-02 12:12:51 +10001994 .set_page_dirty = xfs_vm_set_page_dirty,
Nathan Scott238f4c52006-03-17 17:26:25 +11001995 .releasepage = xfs_vm_releasepage,
1996 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001997 .write_begin = xfs_vm_write_begin,
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001998 .write_end = xfs_vm_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001999 .bmap = xfs_vm_bmap,
2000 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08002001 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02002002 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02002003 .error_remove_page = generic_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004};