blob: 80714ebd54c05f6c9264ca39cbbcdf7ecbb46be4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Dave Chinner70a9883c2013-10-23 10:36:05 +110019#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110020#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110025#include "xfs_trans.h"
Christoph Hellwig281627d2012-03-13 08:41:05 +000026#include "xfs_inode_item.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include "xfs_iomap.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000030#include "xfs_trace.h"
Dave Chinner3ed3a432010-03-05 02:00:42 +000031#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100032#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110033#include "xfs_bmap_btree.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110036#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/writeback.h>
38
Christoph Hellwig273dda72016-02-08 14:40:51 +110039/* flags for direct write completions */
40#define XFS_DIO_FLAG_UNWRITTEN (1 << 0)
41#define XFS_DIO_FLAG_APPEND (1 << 1)
42
Dave Chinnerfbcc0252016-02-15 17:21:19 +110043/*
44 * structure owned by writepages passed to individual writepage calls
45 */
46struct xfs_writepage_ctx {
47 struct xfs_bmbt_irec imap;
48 bool imap_valid;
49 unsigned int io_type;
Dave Chinnerfbcc0252016-02-15 17:21:19 +110050 struct xfs_ioend *ioend;
51 sector_t last_block;
52};
53
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000054void
Nathan Scottf51623b2006-03-14 13:26:27 +110055xfs_count_page_state(
56 struct page *page,
57 int *delalloc,
Nathan Scottf51623b2006-03-14 13:26:27 +110058 int *unwritten)
59{
60 struct buffer_head *bh, *head;
61
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100062 *delalloc = *unwritten = 0;
Nathan Scottf51623b2006-03-14 13:26:27 +110063
64 bh = head = page_buffers(page);
65 do {
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100066 if (buffer_unwritten(bh))
Nathan Scottf51623b2006-03-14 13:26:27 +110067 (*unwritten) = 1;
68 else if (buffer_delay(bh))
69 (*delalloc) = 1;
70 } while ((bh = bh->b_this_page) != head);
71}
72
Ross Zwisler20a90f52016-02-26 15:19:52 -080073struct block_device *
Christoph Hellwig6214ed42007-09-14 15:23:17 +100074xfs_find_bdev_for_inode(
Christoph Hellwig046f1682010-04-28 12:28:52 +000075 struct inode *inode)
Christoph Hellwig6214ed42007-09-14 15:23:17 +100076{
Christoph Hellwig046f1682010-04-28 12:28:52 +000077 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig6214ed42007-09-14 15:23:17 +100078 struct xfs_mount *mp = ip->i_mount;
79
Eric Sandeen71ddabb2007-11-23 16:29:42 +110080 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +100081 return mp->m_rtdev_targp->bt_bdev;
82 else
83 return mp->m_ddev_targp->bt_bdev;
84}
85
Christoph Hellwig0829c362005-09-02 16:58:49 +100086/*
Dave Chinner37992c12016-04-06 08:12:28 +100087 * We're now finished for good with this page. Update the page state via the
88 * associated buffer_heads, paying attention to the start and end offsets that
89 * we need to process on the page.
90 */
91static void
92xfs_finish_page_writeback(
93 struct inode *inode,
94 struct bio_vec *bvec,
95 int error)
96{
Dave Chinner37992c12016-04-06 08:12:28 +100097 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
98 struct buffer_head *head, *bh;
99 unsigned int off = 0;
100
101 ASSERT(bvec->bv_offset < PAGE_SIZE);
Christoph Hellwig690a7872016-05-20 10:29:15 +1000102 ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
Dave Chinner37992c12016-04-06 08:12:28 +1000103 ASSERT(end < PAGE_SIZE);
Christoph Hellwig690a7872016-05-20 10:29:15 +1000104 ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
Dave Chinner37992c12016-04-06 08:12:28 +1000105
106 bh = head = page_buffers(bvec->bv_page);
107
108 do {
109 if (off < bvec->bv_offset)
110 goto next_bh;
111 if (off > end)
112 break;
113 bh->b_end_io(bh, !error);
114next_bh:
115 off += bh->b_size;
116 } while ((bh = bh->b_this_page) != head);
117}
118
119/*
120 * We're now finished for good with this ioend structure. Update the page
121 * state, release holds on bios, and finally free up memory. Do not use the
122 * ioend after this.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100123 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000124STATIC void
125xfs_destroy_ioend(
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000126 struct xfs_ioend *ioend,
127 int error)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000128{
Dave Chinner37992c12016-04-06 08:12:28 +1000129 struct inode *inode = ioend->io_inode;
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000130 struct bio *last = ioend->io_bio;
Dave Chinner37992c12016-04-06 08:12:28 +1000131 struct bio *bio, *next;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100132
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000133 for (bio = &ioend->io_inline_bio; bio; bio = next) {
Dave Chinner37992c12016-04-06 08:12:28 +1000134 struct bio_vec *bvec;
135 int i;
136
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000137 /*
138 * For the last bio, bi_private points to the ioend, so we
139 * need to explicitly end the iteration here.
140 */
141 if (bio == last)
142 next = NULL;
143 else
144 next = bio->bi_private;
Dave Chinner37992c12016-04-06 08:12:28 +1000145
146 /* walk each page on bio, ending page IO on them */
147 bio_for_each_segment_all(bvec, bio, i)
148 xfs_finish_page_writeback(inode, bvec, error);
149
150 bio_put(bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100151 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152}
153
154/*
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000155 * Fast and loose check if this write could update the on-disk inode size.
156 */
157static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
158{
159 return ioend->io_offset + ioend->io_size >
160 XFS_I(ioend->io_inode)->i_d.di_size;
161}
162
Christoph Hellwig281627d2012-03-13 08:41:05 +0000163STATIC int
164xfs_setfilesize_trans_alloc(
165 struct xfs_ioend *ioend)
166{
167 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
168 struct xfs_trans *tp;
169 int error;
170
Christoph Hellwig253f4912016-04-06 09:19:55 +1000171 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
172 if (error)
Christoph Hellwig281627d2012-03-13 08:41:05 +0000173 return error;
Christoph Hellwig281627d2012-03-13 08:41:05 +0000174
175 ioend->io_append_trans = tp;
176
177 /*
Dave Chinner437a2552012-11-28 13:01:00 +1100178 * We may pass freeze protection with a transaction. So tell lockdep
Jan Karad9457dc2012-06-12 16:20:39 +0200179 * we released it.
180 */
Oleg Nesterovbee91822015-07-19 23:48:20 +0200181 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
Jan Karad9457dc2012-06-12 16:20:39 +0200182 /*
Christoph Hellwig281627d2012-03-13 08:41:05 +0000183 * We hand off the transaction to the completion thread now, so
184 * clear the flag here.
185 */
186 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
187 return 0;
188}
189
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000190/*
Christoph Hellwig2813d682011-12-18 20:00:12 +0000191 * Update on-disk file size now that data has been written to disk.
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000192 */
Christoph Hellwig281627d2012-03-13 08:41:05 +0000193STATIC int
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000194xfs_setfilesize(
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100195 struct xfs_inode *ip,
196 struct xfs_trans *tp,
197 xfs_off_t offset,
198 size_t size)
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000199{
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000200 xfs_fsize_t isize;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000201
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000202 xfs_ilock(ip, XFS_ILOCK_EXCL);
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100203 isize = xfs_new_eof(ip, offset + size);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000204 if (!isize) {
205 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwig4906e212015-06-04 13:47:56 +1000206 xfs_trans_cancel(tp);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000207 return 0;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000208 }
209
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100210 trace_xfs_setfilesize(ip, offset, size);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000211
212 ip->i_d.di_size = isize;
213 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
214 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
215
Christoph Hellwig70393312015-06-04 13:48:08 +1000216 return xfs_trans_commit(tp);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000217}
218
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100219STATIC int
220xfs_setfilesize_ioend(
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000221 struct xfs_ioend *ioend,
222 int error)
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100223{
224 struct xfs_inode *ip = XFS_I(ioend->io_inode);
225 struct xfs_trans *tp = ioend->io_append_trans;
226
227 /*
228 * The transaction may have been allocated in the I/O submission thread,
229 * thus we need to mark ourselves as being in a transaction manually.
230 * Similarly for freeze protection.
231 */
232 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
Oleg Nesterovbee91822015-07-19 23:48:20 +0200233 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100234
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100235 /* we abort the update if there was an IO error */
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000236 if (error) {
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100237 xfs_trans_cancel(tp);
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000238 return error;
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100239 }
240
Christoph Hellwig2ba66232015-02-02 10:02:09 +1100241 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
242}
243
Christoph Hellwig0829c362005-09-02 16:58:49 +1000244/*
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000245 * IO write completion.
246 */
247STATIC void
248xfs_end_io(
249 struct work_struct *work)
250{
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000251 struct xfs_ioend *ioend =
252 container_of(work, struct xfs_ioend, io_work);
253 struct xfs_inode *ip = XFS_I(ioend->io_inode);
254 int error = ioend->io_bio->bi_error;
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000255
Brian Fosteraf055e32016-02-08 15:00:02 +1100256 /*
257 * Set an error if the mount has shut down and proceed with end I/O
258 * processing so it can perform whatever cleanups are necessary.
259 */
260 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000261 error = -EIO;
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000262
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000263 /*
264 * For unwritten extents we need to issue transactions to convert a
265 * range to normal written extens after the data I/O has finished.
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100266 * Detecting and handling completion IO errors is done individually
267 * for each case as different cleanup operations need to be performed
268 * on error.
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000269 */
Alain Renaud0d882a32012-05-22 15:56:21 -0500270 if (ioend->io_type == XFS_IO_UNWRITTEN) {
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000271 if (error)
Zhaohongjiang5cb13dc2015-10-12 15:28:39 +1100272 goto done;
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000273 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
Dave Chinner437a2552012-11-28 13:01:00 +1100274 ioend->io_size);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000275 } else if (ioend->io_append_trans) {
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000276 error = xfs_setfilesize_ioend(ioend, error);
Christoph Hellwig84803fb2012-02-29 09:53:50 +0000277 } else {
Christoph Hellwig281627d2012-03-13 08:41:05 +0000278 ASSERT(!xfs_ioend_is_append(ioend));
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000279 }
280
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000281done:
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000282 xfs_destroy_ioend(ioend, error);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000283}
284
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000285STATIC void
286xfs_end_bio(
287 struct bio *bio)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000288{
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000289 struct xfs_ioend *ioend = bio->bi_private;
290 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000291
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000292 if (ioend->io_type == XFS_IO_UNWRITTEN)
293 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
294 else if (ioend->io_append_trans)
295 queue_work(mp->m_data_workqueue, &ioend->io_work);
296 else
297 xfs_destroy_ioend(ioend, bio->bi_error);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300STATIC int
301xfs_map_blocks(
302 struct inode *inode,
303 loff_t offset,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000304 struct xfs_bmbt_irec *imap,
Dave Chinner988ef922016-02-15 17:20:50 +1100305 int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Christoph Hellwiga206c812010-12-10 08:42:20 +0000307 struct xfs_inode *ip = XFS_I(inode);
308 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000309 ssize_t count = 1 << inode->i_blkbits;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000310 xfs_fileoff_t offset_fsb, end_fsb;
311 int error = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000312 int bmapi_flags = XFS_BMAPI_ENTIRE;
313 int nimaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Christoph Hellwiga206c812010-12-10 08:42:20 +0000315 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000316 return -EIO;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000317
Alain Renaud0d882a32012-05-22 15:56:21 -0500318 if (type == XFS_IO_UNWRITTEN)
Christoph Hellwiga206c812010-12-10 08:42:20 +0000319 bmapi_flags |= XFS_BMAPI_IGSTATE;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000320
Dave Chinner988ef922016-02-15 17:20:50 +1100321 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000322 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
323 (ip->i_df.if_flags & XFS_IFEXTENTS));
Dave Chinnerd2c28192012-06-08 15:44:53 +1000324 ASSERT(offset <= mp->m_super->s_maxbytes);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000325
Dave Chinnerd2c28192012-06-08 15:44:53 +1000326 if (offset + count > mp->m_super->s_maxbytes)
327 count = mp->m_super->s_maxbytes - offset;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000328 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
329 offset_fsb = XFS_B_TO_FSBT(mp, offset);
Dave Chinner5c8ed202011-09-18 20:40:45 +0000330 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
331 imap, &nimaps, bmapi_flags);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000332 xfs_iunlock(ip, XFS_ILOCK_SHARED);
333
Christoph Hellwiga206c812010-12-10 08:42:20 +0000334 if (error)
Dave Chinner24513372014-06-25 14:58:08 +1000335 return error;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000336
Alain Renaud0d882a32012-05-22 15:56:21 -0500337 if (type == XFS_IO_DELALLOC &&
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000338 (!nimaps || isnullstartblock(imap->br_startblock))) {
Jie Liu0799a3e2013-09-29 18:56:04 +0800339 error = xfs_iomap_write_allocate(ip, offset, imap);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000340 if (!error)
341 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
Dave Chinner24513372014-06-25 14:58:08 +1000342 return error;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000343 }
344
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000345#ifdef DEBUG
Alain Renaud0d882a32012-05-22 15:56:21 -0500346 if (type == XFS_IO_UNWRITTEN) {
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000347 ASSERT(nimaps);
348 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
349 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
350 }
351#endif
352 if (nimaps)
353 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
354 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
Dave Chinnerfbcc0252016-02-15 17:21:19 +1100357STATIC bool
Christoph Hellwig558e6892010-04-28 12:28:58 +0000358xfs_imap_valid(
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000359 struct inode *inode,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000360 struct xfs_bmbt_irec *imap,
Christoph Hellwig558e6892010-04-28 12:28:58 +0000361 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
Christoph Hellwig558e6892010-04-28 12:28:58 +0000363 offset >>= inode->i_blkbits;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000364
Christoph Hellwig558e6892010-04-28 12:28:58 +0000365 return offset >= imap->br_startoff &&
366 offset < imap->br_startoff + imap->br_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367}
368
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100369STATIC void
370xfs_start_buffer_writeback(
371 struct buffer_head *bh)
372{
373 ASSERT(buffer_mapped(bh));
374 ASSERT(buffer_locked(bh));
375 ASSERT(!buffer_delay(bh));
376 ASSERT(!buffer_unwritten(bh));
377
378 mark_buffer_async_write(bh);
379 set_buffer_uptodate(bh);
380 clear_buffer_dirty(bh);
381}
382
383STATIC void
384xfs_start_page_writeback(
385 struct page *page,
Dave Chinnere10de372016-02-15 17:23:12 +1100386 int clear_dirty)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100387{
388 ASSERT(PageLocked(page));
389 ASSERT(!PageWriteback(page));
Dave Chinner0d085a52014-09-23 15:36:27 +1000390
391 /*
392 * if the page was not fully cleaned, we need to ensure that the higher
393 * layers come back to it correctly. That means we need to keep the page
394 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
395 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
396 * write this page in this writeback sweep will be made.
397 */
398 if (clear_dirty) {
David Chinner92132022006-12-21 10:24:01 +1100399 clear_page_dirty_for_io(page);
Dave Chinner0d085a52014-09-23 15:36:27 +1000400 set_page_writeback(page);
401 } else
402 set_page_writeback_keepwrite(page);
403
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100404 unlock_page(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100405}
406
Zhi Yong Wuc7c1a7d2013-08-07 10:11:09 +0000407static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100408{
409 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
410}
411
412/*
Dave Chinnerbb187822016-04-06 08:11:25 +1000413 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
414 * it, and we submit that bio. The ioend may be used for multiple bio
415 * submissions, so we only want to allocate an append transaction for the ioend
416 * once. In the case of multiple bio submission, each bio will take an IO
417 * reference to the ioend to ensure that the ioend completion is only done once
418 * all bios have been submitted and the ioend is really done.
Dave Chinner7bf7f352012-11-12 22:09:45 +1100419 *
420 * If @fail is non-zero, it means that we have a situation where some part of
421 * the submission process has failed after we have marked paged for writeback
Dave Chinnerbb187822016-04-06 08:11:25 +1000422 * and unlocked them. In this situation, we need to fail the bio and ioend
423 * rather than submit it to IO. This typically only happens on a filesystem
424 * shutdown.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100425 */
Dave Chinnere10de372016-02-15 17:23:12 +1100426STATIC int
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100427xfs_submit_ioend(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000428 struct writeback_control *wbc,
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000429 struct xfs_ioend *ioend,
Dave Chinnere10de372016-02-15 17:23:12 +1100430 int status)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100431{
Dave Chinnere10de372016-02-15 17:23:12 +1100432 /* Reserve log space if we might write beyond the on-disk inode size. */
433 if (!status &&
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000434 ioend->io_type != XFS_IO_UNWRITTEN &&
Dave Chinnerbb187822016-04-06 08:11:25 +1000435 xfs_ioend_is_append(ioend) &&
436 !ioend->io_append_trans)
Dave Chinnere10de372016-02-15 17:23:12 +1100437 status = xfs_setfilesize_trans_alloc(ioend);
Dave Chinnerbb187822016-04-06 08:11:25 +1000438
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000439 ioend->io_bio->bi_private = ioend;
440 ioend->io_bio->bi_end_io = xfs_end_bio;
441
Dave Chinnere10de372016-02-15 17:23:12 +1100442 /*
443 * If we are failing the IO now, just mark the ioend with an
444 * error and finish it. This will run IO completion immediately
445 * as there is only one reference to the ioend at this point in
446 * time.
447 */
448 if (status) {
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000449 ioend->io_bio->bi_error = status;
450 bio_endio(ioend->io_bio);
Dave Chinnere10de372016-02-15 17:23:12 +1100451 return status;
452 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100453
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000454 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
455 ioend->io_bio);
Dave Chinnere10de372016-02-15 17:23:12 +1100456 return 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100457}
458
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000459static void
460xfs_init_bio_from_bh(
461 struct bio *bio,
462 struct buffer_head *bh)
463{
464 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
465 bio->bi_bdev = bh->b_bdev;
466}
467
468static struct xfs_ioend *
469xfs_alloc_ioend(
470 struct inode *inode,
471 unsigned int type,
472 xfs_off_t offset,
473 struct buffer_head *bh)
474{
475 struct xfs_ioend *ioend;
476 struct bio *bio;
477
478 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
479 xfs_init_bio_from_bh(bio, bh);
480
481 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
482 INIT_LIST_HEAD(&ioend->io_list);
483 ioend->io_type = type;
484 ioend->io_inode = inode;
485 ioend->io_size = 0;
486 ioend->io_offset = offset;
487 INIT_WORK(&ioend->io_work, xfs_end_io);
488 ioend->io_append_trans = NULL;
489 ioend->io_bio = bio;
490 return ioend;
491}
492
493/*
494 * Allocate a new bio, and chain the old bio to the new one.
495 *
496 * Note that we have to do perform the chaining in this unintuitive order
497 * so that the bi_private linkage is set up in the right direction for the
498 * traversal in xfs_destroy_ioend().
499 */
500static void
501xfs_chain_bio(
502 struct xfs_ioend *ioend,
503 struct writeback_control *wbc,
504 struct buffer_head *bh)
505{
506 struct bio *new;
507
508 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
509 xfs_init_bio_from_bh(new, bh);
510
511 bio_chain(ioend->io_bio, new);
512 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
513 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
514 ioend->io_bio);
515 ioend->io_bio = new;
516}
517
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100518/*
519 * Test to see if we've been building up a completion structure for
520 * earlier buffers -- if so, we try to append to this ioend if we
521 * can, otherwise we finish off any current ioend and start another.
Dave Chinnere10de372016-02-15 17:23:12 +1100522 * Return the ioend we finished off so that the caller can submit it
523 * once it has finished processing the dirty page.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100524 */
525STATIC void
526xfs_add_to_ioend(
527 struct inode *inode,
528 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100529 xfs_off_t offset,
Dave Chinnere10de372016-02-15 17:23:12 +1100530 struct xfs_writepage_ctx *wpc,
Dave Chinnerbb187822016-04-06 08:11:25 +1000531 struct writeback_control *wbc,
Dave Chinnere10de372016-02-15 17:23:12 +1100532 struct list_head *iolist)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100533{
Dave Chinnerfbcc0252016-02-15 17:21:19 +1100534 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
Darrick J. Wong0df61da2016-03-07 09:32:14 +1100535 bh->b_blocknr != wpc->last_block + 1 ||
536 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
Dave Chinnere10de372016-02-15 17:23:12 +1100537 if (wpc->ioend)
538 list_add(&wpc->ioend->io_list, iolist);
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000539 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100540 }
541
Christoph Hellwig0e51a8e2016-04-06 08:34:30 +1000542 /*
543 * If the buffer doesn't fit into the bio we need to allocate a new
544 * one. This shouldn't happen more than once for a given buffer.
545 */
546 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
547 xfs_chain_bio(wpc->ioend, wbc, bh);
Dave Chinnerbb187822016-04-06 08:11:25 +1000548
Dave Chinnerfbcc0252016-02-15 17:21:19 +1100549 wpc->ioend->io_size += bh->b_size;
550 wpc->last_block = bh->b_blocknr;
Dave Chinnere10de372016-02-15 17:23:12 +1100551 xfs_start_buffer_writeback(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100552}
553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100555xfs_map_buffer(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000556 struct inode *inode,
Nathan Scott87cbc492006-03-14 13:26:43 +1100557 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000558 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000559 xfs_off_t offset)
Nathan Scott87cbc492006-03-14 13:26:43 +1100560{
561 sector_t bn;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000562 struct xfs_mount *m = XFS_I(inode)->i_mount;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000563 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
564 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
Nathan Scott87cbc492006-03-14 13:26:43 +1100565
Christoph Hellwig207d0412010-04-28 12:28:56 +0000566 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
567 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Nathan Scott87cbc492006-03-14 13:26:43 +1100568
Christoph Hellwige5131822010-04-28 12:28:55 +0000569 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000570 ((offset - iomap_offset) >> inode->i_blkbits);
Nathan Scott87cbc492006-03-14 13:26:43 +1100571
Christoph Hellwig046f1682010-04-28 12:28:52 +0000572 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
Nathan Scott87cbc492006-03-14 13:26:43 +1100573
574 bh->b_blocknr = bn;
575 set_buffer_mapped(bh);
576}
577
578STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579xfs_map_at_offset(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000580 struct inode *inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000582 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000583 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584{
Christoph Hellwig207d0412010-04-28 12:28:56 +0000585 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
586 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Christoph Hellwig207d0412010-04-28 12:28:56 +0000588 xfs_map_buffer(inode, bh, imap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 set_buffer_mapped(bh);
590 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100591 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
594/*
Dave Chinnera49935f2014-03-07 16:19:14 +1100595 * Test if a given page contains at least one buffer of a given @type.
596 * If @check_all_buffers is true, then we walk all the buffers in the page to
597 * try to find one of the type passed in. If it is not set, then the caller only
598 * needs to check the first buffer on the page for a match.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 */
Dave Chinnera49935f2014-03-07 16:19:14 +1100600STATIC bool
Dave Chinner6ffc4db2012-04-23 15:58:43 +1000601xfs_check_page_type(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100602 struct page *page,
Dave Chinnera49935f2014-03-07 16:19:14 +1100603 unsigned int type,
604 bool check_all_buffers)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Dave Chinnera49935f2014-03-07 16:19:14 +1100606 struct buffer_head *bh;
607 struct buffer_head *head;
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 if (PageWriteback(page))
Dave Chinnera49935f2014-03-07 16:19:14 +1100610 return false;
611 if (!page->mapping)
612 return false;
613 if (!page_has_buffers(page))
614 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Dave Chinnera49935f2014-03-07 16:19:14 +1100616 bh = head = page_buffers(page);
617 do {
618 if (buffer_unwritten(bh)) {
619 if (type == XFS_IO_UNWRITTEN)
620 return true;
621 } else if (buffer_delay(bh)) {
Dan Carpenter805eeb82014-04-04 06:56:30 +1100622 if (type == XFS_IO_DELALLOC)
Dave Chinnera49935f2014-03-07 16:19:14 +1100623 return true;
624 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
Dan Carpenter805eeb82014-04-04 06:56:30 +1100625 if (type == XFS_IO_OVERWRITE)
Dave Chinnera49935f2014-03-07 16:19:14 +1100626 return true;
627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
Dave Chinnera49935f2014-03-07 16:19:14 +1100629 /* If we are only checking the first buffer, we are done now. */
630 if (!check_all_buffers)
631 break;
632 } while ((bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Dave Chinnera49935f2014-03-07 16:19:14 +1100634 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
Dave Chinner3ed3a432010-03-05 02:00:42 +0000637STATIC void
638xfs_vm_invalidatepage(
639 struct page *page,
Lukas Czernerd47992f2013-05-21 23:17:23 -0400640 unsigned int offset,
641 unsigned int length)
Dave Chinner3ed3a432010-03-05 02:00:42 +0000642{
Lukas Czerner34097df2013-05-21 23:58:01 -0400643 trace_xfs_invalidatepage(page->mapping->host, page, offset,
644 length);
645 block_invalidatepage(page, offset, length);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000646}
647
648/*
649 * If the page has delalloc buffers on it, we need to punch them out before we
650 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
651 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
652 * is done on that same region - the delalloc extent is returned when none is
653 * supposed to be there.
654 *
655 * We prevent this by truncating away the delalloc regions on the page before
656 * invalidating it. Because they are delalloc, we can do this without needing a
657 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
658 * truncation without a transaction as there is no space left for block
659 * reservation (typically why we see a ENOSPC in writeback).
660 *
661 * This is not a performance critical path, so for now just do the punching a
662 * buffer head at a time.
663 */
664STATIC void
665xfs_aops_discard_page(
666 struct page *page)
667{
668 struct inode *inode = page->mapping->host;
669 struct xfs_inode *ip = XFS_I(inode);
670 struct buffer_head *bh, *head;
671 loff_t offset = page_offset(page);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000672
Dave Chinnera49935f2014-03-07 16:19:14 +1100673 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
Dave Chinner3ed3a432010-03-05 02:00:42 +0000674 goto out_invalidate;
675
Dave Chinnere8c37532010-03-15 02:36:35 +0000676 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
677 goto out_invalidate;
678
Dave Chinner4f107002011-03-07 10:00:35 +1100679 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000680 "page discard on page %p, inode 0x%llx, offset %llu.",
681 page, ip->i_ino, offset);
682
683 xfs_ilock(ip, XFS_ILOCK_EXCL);
684 bh = head = page_buffers(page);
685 do {
Dave Chinner3ed3a432010-03-05 02:00:42 +0000686 int error;
Dave Chinnerc726de42010-11-30 15:14:39 +1100687 xfs_fileoff_t start_fsb;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000688
689 if (!buffer_delay(bh))
690 goto next_buffer;
691
Dave Chinnerc726de42010-11-30 15:14:39 +1100692 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
693 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000694 if (error) {
695 /* something screwed, just bail */
Dave Chinnere8c37532010-03-15 02:36:35 +0000696 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100697 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000698 "page discard unable to remove delalloc mapping.");
Dave Chinnere8c37532010-03-15 02:36:35 +0000699 }
Dave Chinner3ed3a432010-03-05 02:00:42 +0000700 break;
701 }
702next_buffer:
Dave Chinnerc726de42010-11-30 15:14:39 +1100703 offset += 1 << inode->i_blkbits;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000704
705 } while ((bh = bh->b_this_page) != head);
706
707 xfs_iunlock(ip, XFS_ILOCK_EXCL);
708out_invalidate:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300709 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000710 return;
711}
712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713/*
Dave Chinnere10de372016-02-15 17:23:12 +1100714 * We implement an immediate ioend submission policy here to avoid needing to
715 * chain multiple ioends and hence nest mempool allocations which can violate
716 * forward progress guarantees we need to provide. The current ioend we are
717 * adding buffers to is cached on the writepage context, and if the new buffer
718 * does not append to the cached ioend it will create a new ioend and cache that
719 * instead.
720 *
721 * If a new ioend is created and cached, the old ioend is returned and queued
722 * locally for submission once the entire page is processed or an error has been
723 * detected. While ioends are submitted immediately after they are completed,
724 * batching optimisations are provided by higher level block plugging.
725 *
726 * At the end of a writeback pass, there will be a cached ioend remaining on the
727 * writepage context that the caller will need to submit.
728 */
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100729static int
730xfs_writepage_map(
731 struct xfs_writepage_ctx *wpc,
Dave Chinnere10de372016-02-15 17:23:12 +1100732 struct writeback_control *wbc,
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100733 struct inode *inode,
734 struct page *page,
735 loff_t offset,
736 __uint64_t end_offset)
737{
Dave Chinnere10de372016-02-15 17:23:12 +1100738 LIST_HEAD(submit_list);
739 struct xfs_ioend *ioend, *next;
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100740 struct buffer_head *bh, *head;
741 ssize_t len = 1 << inode->i_blkbits;
742 int error = 0;
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100743 int count = 0;
Dave Chinnere10de372016-02-15 17:23:12 +1100744 int uptodate = 1;
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100745
746 bh = head = page_buffers(page);
747 offset = page_offset(page);
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100748 do {
749 if (offset >= end_offset)
750 break;
751 if (!buffer_uptodate(bh))
752 uptodate = 0;
753
754 /*
755 * set_page_dirty dirties all buffers in a page, independent
756 * of their state. The dirty state however is entirely
757 * meaningless for holes (!mapped && uptodate), so skip
758 * buffers covering holes here.
759 */
760 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
761 wpc->imap_valid = false;
762 continue;
763 }
764
765 if (buffer_unwritten(bh)) {
766 if (wpc->io_type != XFS_IO_UNWRITTEN) {
767 wpc->io_type = XFS_IO_UNWRITTEN;
768 wpc->imap_valid = false;
769 }
770 } else if (buffer_delay(bh)) {
771 if (wpc->io_type != XFS_IO_DELALLOC) {
772 wpc->io_type = XFS_IO_DELALLOC;
773 wpc->imap_valid = false;
774 }
775 } else if (buffer_uptodate(bh)) {
776 if (wpc->io_type != XFS_IO_OVERWRITE) {
777 wpc->io_type = XFS_IO_OVERWRITE;
778 wpc->imap_valid = false;
779 }
780 } else {
781 if (PageUptodate(page))
782 ASSERT(buffer_mapped(bh));
783 /*
784 * This buffer is not uptodate and will not be
785 * written to disk. Ensure that we will put any
786 * subsequent writeable buffers into a new
787 * ioend.
788 */
789 wpc->imap_valid = false;
790 continue;
791 }
792
793 if (wpc->imap_valid)
794 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
795 offset);
796 if (!wpc->imap_valid) {
797 error = xfs_map_blocks(inode, offset, &wpc->imap,
798 wpc->io_type);
799 if (error)
Dave Chinnere10de372016-02-15 17:23:12 +1100800 goto out;
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100801 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
802 offset);
803 }
804 if (wpc->imap_valid) {
805 lock_buffer(bh);
806 if (wpc->io_type != XFS_IO_OVERWRITE)
807 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
Dave Chinnerbb187822016-04-06 08:11:25 +1000808 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100809 count++;
810 }
811
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100812 } while (offset += len, ((bh = bh->b_this_page) != head));
813
814 if (uptodate && bh == head)
815 SetPageUptodate(page);
816
Dave Chinnere10de372016-02-15 17:23:12 +1100817 ASSERT(wpc->ioend || list_empty(&submit_list));
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100818
Dave Chinnere10de372016-02-15 17:23:12 +1100819out:
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100820 /*
Dave Chinnere10de372016-02-15 17:23:12 +1100821 * On error, we have to fail the ioend here because we have locked
822 * buffers in the ioend. If we don't do this, we'll deadlock
823 * invalidating the page as that tries to lock the buffers on the page.
824 * Also, because we may have set pages under writeback, we have to make
825 * sure we run IO completion to mark the error state of the IO
826 * appropriately, so we can't cancel the ioend directly here. That means
827 * we have to mark this page as under writeback if we included any
828 * buffers from it in the ioend chain so that completion treats it
829 * correctly.
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100830 *
Dave Chinnere10de372016-02-15 17:23:12 +1100831 * If we didn't include the page in the ioend, the on error we can
832 * simply discard and unlock it as there are no other users of the page
833 * or it's buffers right now. The caller will still need to trigger
834 * submission of outstanding ioends on the writepage context so they are
835 * treated correctly on error.
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100836 */
Dave Chinnere10de372016-02-15 17:23:12 +1100837 if (count) {
838 xfs_start_page_writeback(page, !error);
839
840 /*
841 * Preserve the original error if there was one, otherwise catch
842 * submission errors here and propagate into subsequent ioend
843 * submissions.
844 */
845 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
846 int error2;
847
848 list_del_init(&ioend->io_list);
849 error2 = xfs_submit_ioend(wbc, ioend, error);
850 if (error2 && !error)
851 error = error2;
852 }
853 } else if (error) {
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100854 xfs_aops_discard_page(page);
855 ClearPageUptodate(page);
856 unlock_page(page);
Dave Chinnere10de372016-02-15 17:23:12 +1100857 } else {
858 /*
859 * We can end up here with no error and nothing to write if we
860 * race with a partial page truncate on a sub-page block sized
861 * filesystem. In that case we need to mark the page clean.
862 */
863 xfs_start_page_writeback(page, 1);
864 end_page_writeback(page);
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100865 }
Dave Chinnere10de372016-02-15 17:23:12 +1100866
Dave Chinnerbfce7d22016-02-15 17:21:37 +1100867 mapping_set_error(page->mapping, error);
868 return error;
869}
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/*
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000872 * Write out a dirty page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 *
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000874 * For delalloc space on the page we need to allocate space and flush it.
875 * For unwritten space on the page we need to start the conversion to
876 * regular allocated space.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000877 * For any other dirty buffer heads on the page we should flush them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879STATIC int
Dave Chinnerfbcc0252016-02-15 17:21:19 +1100880xfs_do_writepage(
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000881 struct page *page,
Dave Chinnerfbcc0252016-02-15 17:21:19 +1100882 struct writeback_control *wbc,
883 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
Dave Chinnerfbcc0252016-02-15 17:21:19 +1100885 struct xfs_writepage_ctx *wpc = data;
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000886 struct inode *inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 loff_t offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 __uint64_t end_offset;
Dave Chinnerad689722016-02-15 17:21:31 +1100889 pgoff_t end_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Lukas Czerner34097df2013-05-21 23:58:01 -0400891 trace_xfs_writepage(inode, page, 0, 0);
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000892
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000893 ASSERT(page_has_buffers(page));
894
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000895 /*
896 * Refuse to write the page out if we are called from reclaim context.
897 *
Christoph Hellwigd4f7a5c2010-06-28 10:34:44 -0400898 * This avoids stack overflows when called from deeply used stacks in
899 * random callers for direct reclaim or memcg reclaim. We explicitly
900 * allow reclaim from kswapd as the stack usage there is relatively low.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000901 *
Mel Gorman94054fa2011-10-31 17:07:45 -0700902 * This should never happen except in the case of a VM regression so
903 * warn about it.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000904 */
Mel Gorman94054fa2011-10-31 17:07:45 -0700905 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
906 PF_MEMALLOC))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000907 goto redirty;
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000908
909 /*
Christoph Hellwig680a6472011-07-08 14:34:05 +0200910 * Given that we do not allow direct reclaim to call us, we should
911 * never be called while in a filesystem transaction.
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000912 */
Christoph Hellwig448011e2014-06-06 16:05:15 +1000913 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000914 goto redirty;
Christoph Hellwig89f3b3632010-06-24 09:45:48 +1000915
Jie Liu8695d272014-05-20 08:24:26 +1000916 /*
Dave Chinnerad689722016-02-15 17:21:31 +1100917 * Is this page beyond the end of the file?
918 *
Jie Liu8695d272014-05-20 08:24:26 +1000919 * The page index is less than the end_index, adjust the end_offset
920 * to the highest offset that this page should represent.
921 * -----------------------------------------------------
922 * | file mapping | <EOF> |
923 * -----------------------------------------------------
924 * | Page ... | Page N-2 | Page N-1 | Page N | |
925 * ^--------------------------------^----------|--------
926 * | desired writeback range | see else |
927 * ---------------------------------^------------------|
928 */
Dave Chinnerad689722016-02-15 17:21:31 +1100929 offset = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300930 end_index = offset >> PAGE_SHIFT;
Jie Liu8695d272014-05-20 08:24:26 +1000931 if (page->index < end_index)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300932 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
Jie Liu8695d272014-05-20 08:24:26 +1000933 else {
934 /*
935 * Check whether the page to write out is beyond or straddles
936 * i_size or not.
937 * -------------------------------------------------------
938 * | file mapping | <EOF> |
939 * -------------------------------------------------------
940 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
941 * ^--------------------------------^-----------|---------
942 * | | Straddles |
943 * ---------------------------------^-----------|--------|
944 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300945 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -0400946
947 /*
Jan Karaff9a28f2013-03-14 14:30:54 +0100948 * Skip the page if it is fully outside i_size, e.g. due to a
949 * truncate operation that is in progress. We must redirty the
950 * page so that reclaim stops reclaiming it. Otherwise
951 * xfs_vm_releasepage() is called on it and gets confused.
Jie Liu8695d272014-05-20 08:24:26 +1000952 *
953 * Note that the end_index is unsigned long, it would overflow
954 * if the given offset is greater than 16TB on 32-bit system
955 * and if we do check the page is fully outside i_size or not
956 * via "if (page->index >= end_index + 1)" as "end_index + 1"
957 * will be evaluated to 0. Hence this page will be redirtied
958 * and be written out repeatedly which would result in an
959 * infinite loop, the user program that perform this operation
960 * will hang. Instead, we can verify this situation by checking
961 * if the page to write is totally beyond the i_size or if it's
962 * offset is just equal to the EOF.
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -0400963 */
Jie Liu8695d272014-05-20 08:24:26 +1000964 if (page->index > end_index ||
965 (page->index == end_index && offset_into_page == 0))
Jan Karaff9a28f2013-03-14 14:30:54 +0100966 goto redirty;
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -0400967
968 /*
969 * The page straddles i_size. It must be zeroed out on each
970 * and every writepage invocation because it may be mmapped.
971 * "A file is mapped in multiples of the page size. For a file
Jie Liu8695d272014-05-20 08:24:26 +1000972 * that is not a multiple of the page size, the remaining
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -0400973 * memory is zeroed when mapped, and writes to that region are
974 * not written out to the file."
975 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300976 zero_user_segment(page, offset_into_page, PAGE_SIZE);
Jie Liu8695d272014-05-20 08:24:26 +1000977
978 /* Adjust the end_offset to the end of file */
979 end_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 }
981
Dave Chinnere10de372016-02-15 17:23:12 +1100982 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
Nathan Scottf51623b2006-03-14 13:26:27 +1100983
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000984redirty:
Nathan Scottf51623b2006-03-14 13:26:27 +1100985 redirty_page_for_writepage(wbc, page);
986 unlock_page(page);
987 return 0;
Nathan Scottf51623b2006-03-14 13:26:27 +1100988}
989
Nathan Scott7d4fb402006-06-09 15:27:16 +1000990STATIC int
Dave Chinnerfbcc0252016-02-15 17:21:19 +1100991xfs_vm_writepage(
992 struct page *page,
993 struct writeback_control *wbc)
994{
995 struct xfs_writepage_ctx wpc = {
996 .io_type = XFS_IO_INVALID,
997 };
998 int ret;
999
1000 ret = xfs_do_writepage(page, wbc, &wpc);
Dave Chinnere10de372016-02-15 17:23:12 +11001001 if (wpc.ioend)
1002 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1003 return ret;
Dave Chinnerfbcc0252016-02-15 17:21:19 +11001004}
1005
1006STATIC int
Nathan Scott7d4fb402006-06-09 15:27:16 +10001007xfs_vm_writepages(
1008 struct address_space *mapping,
1009 struct writeback_control *wbc)
1010{
Dave Chinnerfbcc0252016-02-15 17:21:19 +11001011 struct xfs_writepage_ctx wpc = {
1012 .io_type = XFS_IO_INVALID,
1013 };
1014 int ret;
1015
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001016 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001017 if (dax_mapping(mapping))
1018 return dax_writeback_mapping_range(mapping,
1019 xfs_find_bdev_for_inode(mapping->host), wbc);
1020
Dave Chinnerfbcc0252016-02-15 17:21:19 +11001021 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
Dave Chinnere10de372016-02-15 17:23:12 +11001022 if (wpc.ioend)
1023 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1024 return ret;
Nathan Scott7d4fb402006-06-09 15:27:16 +10001025}
1026
Nathan Scottf51623b2006-03-14 13:26:27 +11001027/*
1028 * Called to move a page into cleanable state - and from there
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001029 * to be released. The page should already be clean. We always
Nathan Scottf51623b2006-03-14 13:26:27 +11001030 * have buffer heads in this call.
1031 *
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001032 * Returns 1 if the page is ok to release, 0 otherwise.
Nathan Scottf51623b2006-03-14 13:26:27 +11001033 */
1034STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001035xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001036 struct page *page,
1037 gfp_t gfp_mask)
1038{
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001039 int delalloc, unwritten;
Nathan Scottf51623b2006-03-14 13:26:27 +11001040
Lukas Czerner34097df2013-05-21 23:58:01 -04001041 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
Nathan Scott238f4c52006-03-17 17:26:25 +11001042
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001043 xfs_count_page_state(page, &delalloc, &unwritten);
Nathan Scottf51623b2006-03-14 13:26:27 +11001044
Christoph Hellwig448011e2014-06-06 16:05:15 +10001045 if (WARN_ON_ONCE(delalloc))
Christoph Hellwig89f3b3632010-06-24 09:45:48 +10001046 return 0;
Christoph Hellwig448011e2014-06-06 16:05:15 +10001047 if (WARN_ON_ONCE(unwritten))
Nathan Scottf51623b2006-03-14 13:26:27 +11001048 return 0;
1049
Nathan Scottf51623b2006-03-14 13:26:27 +11001050 return try_to_free_buffers(page);
1051}
1052
Dave Chinnera7193702015-04-16 21:57:48 +10001053/*
Christoph Hellwig273dda72016-02-08 14:40:51 +11001054 * When we map a DIO buffer, we may need to pass flags to
1055 * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001056 *
1057 * Note that for DIO, an IO to the highest supported file block offset (i.e.
1058 * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1059 * bit variable. Hence if we see this overflow, we have to assume that the IO is
1060 * extending the file size. We won't know for sure until IO completion is run
1061 * and the actual max write offset is communicated to the IO completion
1062 * routine.
Dave Chinnera7193702015-04-16 21:57:48 +10001063 */
1064static void
1065xfs_map_direct(
1066 struct inode *inode,
1067 struct buffer_head *bh_result,
1068 struct xfs_bmbt_irec *imap,
Christoph Hellwig273dda72016-02-08 14:40:51 +11001069 xfs_off_t offset)
Dave Chinnera7193702015-04-16 21:57:48 +10001070{
Christoph Hellwig273dda72016-02-08 14:40:51 +11001071 uintptr_t *flags = (uintptr_t *)&bh_result->b_private;
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001072 xfs_off_t size = bh_result->b_size;
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001073
Christoph Hellwig273dda72016-02-08 14:40:51 +11001074 trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
1075 ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, imap);
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001076
Christoph Hellwig273dda72016-02-08 14:40:51 +11001077 if (ISUNWRITTEN(imap)) {
1078 *flags |= XFS_DIO_FLAG_UNWRITTEN;
Dave Chinnera06c2772015-04-16 22:00:00 +10001079 set_buffer_defer_completion(bh_result);
Christoph Hellwig273dda72016-02-08 14:40:51 +11001080 } else if (offset + size > i_size_read(inode) || offset + size < 0) {
1081 *flags |= XFS_DIO_FLAG_APPEND;
1082 set_buffer_defer_completion(bh_result);
Dave Chinnera7193702015-04-16 21:57:48 +10001083 }
1084}
1085
Dave Chinner1fdca9c2015-04-16 21:58:21 +10001086/*
1087 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1088 * is, so that we can avoid repeated get_blocks calls.
1089 *
1090 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1091 * for blocks beyond EOF must be marked new so that sub block regions can be
1092 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1093 * was just allocated or is unwritten, otherwise the callers would overwrite
1094 * existing data with zeros. Hence we have to split the mapping into a range up
1095 * to and including EOF, and a second mapping for beyond EOF.
1096 */
1097static void
1098xfs_map_trim_size(
1099 struct inode *inode,
1100 sector_t iblock,
1101 struct buffer_head *bh_result,
1102 struct xfs_bmbt_irec *imap,
1103 xfs_off_t offset,
1104 ssize_t size)
1105{
1106 xfs_off_t mapping_size;
1107
1108 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1109 mapping_size <<= inode->i_blkbits;
1110
1111 ASSERT(mapping_size > 0);
1112 if (mapping_size > size)
1113 mapping_size = size;
1114 if (offset < i_size_read(inode) &&
1115 offset + mapping_size >= i_size_read(inode)) {
1116 /* limit mapping to block that spans EOF */
1117 mapping_size = roundup_64(i_size_read(inode) - offset,
1118 1 << inode->i_blkbits);
1119 }
1120 if (mapping_size > LONG_MAX)
1121 mapping_size = LONG_MAX;
1122
1123 bh_result->b_size = mapping_size;
1124}
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001127__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 struct inode *inode,
1129 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 struct buffer_head *bh_result,
1131 int create,
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001132 bool direct,
1133 bool dax_fault)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134{
Christoph Hellwiga206c812010-12-10 08:42:20 +00001135 struct xfs_inode *ip = XFS_I(inode);
1136 struct xfs_mount *mp = ip->i_mount;
1137 xfs_fileoff_t offset_fsb, end_fsb;
1138 int error = 0;
1139 int lockmode = 0;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001140 struct xfs_bmbt_irec imap;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001141 int nimaps = 1;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001142 xfs_off_t offset;
1143 ssize_t size;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001144 int new = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001145
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001146 BUG_ON(create && !direct);
1147
Christoph Hellwiga206c812010-12-10 08:42:20 +00001148 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10001149 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001151 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001152 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1153 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001154
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001155 if (!create && offset >= i_size_read(inode))
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001156 return 0;
1157
Dave Chinner507630b2012-03-27 10:34:50 -04001158 /*
1159 * Direct I/O is usually done on preallocated files, so try getting
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001160 * a block mapping without an exclusive lock first.
Dave Chinner507630b2012-03-27 10:34:50 -04001161 */
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001162 lockmode = xfs_ilock_data_map_shared(ip);
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001163
Dave Chinnerd2c28192012-06-08 15:44:53 +10001164 ASSERT(offset <= mp->m_super->s_maxbytes);
1165 if (offset + size > mp->m_super->s_maxbytes)
1166 size = mp->m_super->s_maxbytes - offset;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001167 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1168 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1169
Dave Chinner5c8ed202011-09-18 20:40:45 +00001170 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1171 &imap, &nimaps, XFS_BMAPI_ENTIRE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 if (error)
Christoph Hellwiga206c812010-12-10 08:42:20 +00001173 goto out_unlock;
1174
Dave Chinner1ca19152015-11-03 12:37:00 +11001175 /* for DAX, we convert unwritten extents directly */
Christoph Hellwiga206c812010-12-10 08:42:20 +00001176 if (create &&
1177 (!nimaps ||
1178 (imap.br_startblock == HOLESTARTBLOCK ||
Dave Chinner1ca19152015-11-03 12:37:00 +11001179 imap.br_startblock == DELAYSTARTBLOCK) ||
1180 (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001181 /*
1182 * xfs_iomap_write_direct() expects the shared lock. It
1183 * is unlocked on return.
1184 */
1185 if (lockmode == XFS_ILOCK_EXCL)
1186 xfs_ilock_demote(ip, lockmode);
Brian Foster009c6e82015-10-12 15:34:20 +11001187
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001188 error = xfs_iomap_write_direct(ip, offset, size,
1189 &imap, nimaps);
1190 if (error)
1191 return error;
1192 new = 1;
Dave Chinner6b698ed2015-06-04 09:18:53 +10001193
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001194 trace_xfs_get_blocks_alloc(ip, offset, size,
1195 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1196 : XFS_IO_DELALLOC, &imap);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001197 } else if (nimaps) {
Dave Chinnerd5cc2e32015-04-16 21:59:07 +10001198 trace_xfs_get_blocks_found(ip, offset, size,
1199 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1200 : XFS_IO_OVERWRITE, &imap);
Dave Chinner507630b2012-03-27 10:34:50 -04001201 xfs_iunlock(ip, lockmode);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001202 } else {
1203 trace_xfs_get_blocks_notfound(ip, offset, size);
1204 goto out_unlock;
1205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Dave Chinner1ca19152015-11-03 12:37:00 +11001207 if (IS_DAX(inode) && create) {
1208 ASSERT(!ISUNWRITTEN(&imap));
1209 /* zeroing is not needed at a higher layer */
1210 new = 0;
1211 }
1212
Dave Chinner1fdca9c2015-04-16 21:58:21 +10001213 /* trim mapping down to size requested */
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001214 xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
Dave Chinner1fdca9c2015-04-16 21:58:21 +10001215
Dave Chinnera7193702015-04-16 21:57:48 +10001216 /*
1217 * For unwritten extents do not report a disk address in the buffered
1218 * read case (treat as if we're reading into a hole).
1219 */
Christoph Hellwig207d0412010-04-28 12:28:56 +00001220 if (imap.br_startblock != HOLESTARTBLOCK &&
Dave Chinnera7193702015-04-16 21:57:48 +10001221 imap.br_startblock != DELAYSTARTBLOCK &&
1222 (create || !ISUNWRITTEN(&imap))) {
1223 xfs_map_buffer(inode, bh_result, &imap, offset);
1224 if (ISUNWRITTEN(&imap))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 set_buffer_unwritten(bh_result);
Dave Chinnera7193702015-04-16 21:57:48 +10001226 /* direct IO needs special help */
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001227 if (create) {
Christoph Hellwig273dda72016-02-08 14:40:51 +11001228 if (dax_fault)
1229 ASSERT(!ISUNWRITTEN(&imap));
1230 else
1231 xfs_map_direct(inode, bh_result, &imap, offset);
1232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
1234
Nathan Scottc2536662006-03-29 10:44:40 +10001235 /*
1236 * If this is a realtime file, data may be on a different device.
1237 * to that pointed to from the buffer_head b_bdev currently.
1238 */
Christoph Hellwig046f1682010-04-28 12:28:52 +00001239 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Nathan Scottc2536662006-03-29 10:44:40 +10001241 /*
David Chinner549054a2007-02-10 18:36:35 +11001242 * If we previously allocated a block out beyond eof and we are now
1243 * coming back to use it then we will need to flag it as new even if it
1244 * has a disk address.
1245 *
1246 * With sub-block writes into unwritten extents we also need to mark
1247 * the buffer as new so that the unwritten parts of the buffer gets
1248 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 */
1250 if (create &&
1251 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001252 (offset >= i_size_read(inode)) ||
Christoph Hellwig207d0412010-04-28 12:28:56 +00001253 (new || ISUNWRITTEN(&imap))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Christoph Hellwig6e8a27a2016-06-21 09:53:45 +10001256 BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 return 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001259
1260out_unlock:
1261 xfs_iunlock(ip, lockmode);
Dave Chinner24513372014-06-25 14:58:08 +10001262 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263}
1264
1265int
Nathan Scottc2536662006-03-29 10:44:40 +10001266xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 struct inode *inode,
1268 sector_t iblock,
1269 struct buffer_head *bh_result,
1270 int create)
1271{
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001272 return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273}
1274
Dave Chinner6b698ed2015-06-04 09:18:53 +10001275int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001276xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 struct inode *inode,
1278 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 struct buffer_head *bh_result,
1280 int create)
1281{
Dave Chinner3e12dbb2015-11-03 12:27:22 +11001282 return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1283}
1284
1285int
1286xfs_get_blocks_dax_fault(
1287 struct inode *inode,
1288 sector_t iblock,
1289 struct buffer_head *bh_result,
1290 int create)
1291{
1292 return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}
1294
Christoph Hellwig273dda72016-02-08 14:40:51 +11001295/*
1296 * Complete a direct I/O write request.
1297 *
1298 * xfs_map_direct passes us some flags in the private data to tell us what to
1299 * do. If no flags are set, then the write IO is an overwrite wholly within
1300 * the existing allocated file size and so there is nothing for us to do.
1301 *
1302 * Note that in this case the completion can be called in interrupt context,
1303 * whereas if we have flags set we will always be called in task context
1304 * (i.e. from a workqueue).
1305 */
1306STATIC int
1307xfs_end_io_direct_write(
1308 struct kiocb *iocb,
Christoph Hellwig209fb872010-07-18 21:17:11 +00001309 loff_t offset,
Christoph Hellwig273dda72016-02-08 14:40:51 +11001310 ssize_t size,
1311 void *private)
Christoph Hellwigf0973862005-09-05 08:22:52 +10001312{
Christoph Hellwig273dda72016-02-08 14:40:51 +11001313 struct inode *inode = file_inode(iocb->ki_filp);
1314 struct xfs_inode *ip = XFS_I(inode);
1315 struct xfs_mount *mp = ip->i_mount;
1316 uintptr_t flags = (uintptr_t)private;
1317 int error = 0;
Christoph Hellwig2ba66232015-02-02 10:02:09 +11001318
Christoph Hellwig273dda72016-02-08 14:40:51 +11001319 trace_xfs_end_io_direct_write(ip, offset, size);
1320
1321 if (XFS_FORCED_SHUTDOWN(mp))
1322 return -EIO;
1323
1324 if (size <= 0)
1325 return size;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001326
1327 /*
Christoph Hellwig273dda72016-02-08 14:40:51 +11001328 * The flags tell us whether we are doing unwritten extent conversions
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001329 * or an append transaction that updates the on-disk file size. These
1330 * cases are the only cases where we should *potentially* be needing
Dave Chinnera06c2772015-04-16 22:00:00 +10001331 * to update the VFS inode size.
Christoph Hellwig273dda72016-02-08 14:40:51 +11001332 */
1333 if (flags == 0) {
1334 ASSERT(offset + size <= i_size_read(inode));
1335 return 0;
1336 }
1337
1338 /*
Dave Chinner6dfa1b62015-04-16 21:59:34 +10001339 * We need to update the in-core inode size here so that we don't end up
Dave Chinnera06c2772015-04-16 22:00:00 +10001340 * with the on-disk inode size being outside the in-core inode size. We
1341 * have no other method of updating EOF for AIO, so always do it here
1342 * if necessary.
Dave Chinnerb9d59842015-04-16 22:03:07 +10001343 *
1344 * We need to lock the test/set EOF update as we can be racing with
1345 * other IO completions here to update the EOF. Failing to serialise
1346 * here can result in EOF moving backwards and Bad Things Happen when
1347 * that occurs.
Christoph Hellwig2813d682011-12-18 20:00:12 +00001348 */
Christoph Hellwig273dda72016-02-08 14:40:51 +11001349 spin_lock(&ip->i_flags_lock);
Christoph Hellwig2ba66232015-02-02 10:02:09 +11001350 if (offset + size > i_size_read(inode))
1351 i_size_write(inode, offset + size);
Christoph Hellwig273dda72016-02-08 14:40:51 +11001352 spin_unlock(&ip->i_flags_lock);
Christoph Hellwig2813d682011-12-18 20:00:12 +00001353
Christoph Hellwig273dda72016-02-08 14:40:51 +11001354 if (flags & XFS_DIO_FLAG_UNWRITTEN) {
1355 trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
Christoph Hellwig40e2e972010-07-18 21:17:09 +00001356
Christoph Hellwig273dda72016-02-08 14:40:51 +11001357 error = xfs_iomap_write_unwritten(ip, offset, size);
1358 } else if (flags & XFS_DIO_FLAG_APPEND) {
1359 struct xfs_trans *tp;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001360
Christoph Hellwig273dda72016-02-08 14:40:51 +11001361 trace_xfs_end_io_direct_write_append(ip, offset, size);
Dave Chinner6b698ed2015-06-04 09:18:53 +10001362
Christoph Hellwig253f4912016-04-06 09:19:55 +10001363 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0,
1364 &tp);
1365 if (!error)
1366 error = xfs_setfilesize(ip, tp, offset, size);
Dave Chinner6b698ed2015-06-04 09:18:53 +10001367 }
1368
Christoph Hellwig273dda72016-02-08 14:40:51 +11001369 return error;
Dave Chinner6e1ba0b2015-06-04 09:19:15 +10001370}
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001373xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 struct kiocb *iocb,
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001375 struct iov_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001377 struct inode *inode = iocb->ki_filp->f_mapping->host;
Christoph Hellwigc19b1042016-02-08 14:40:51 +11001378 dio_iodone_t *endio = NULL;
1379 int flags = 0;
1380 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Christoph Hellwigc19b1042016-02-08 14:40:51 +11001382 if (iov_iter_rw(iter) == WRITE) {
1383 endio = xfs_end_io_direct_write;
1384 flags = DIO_ASYNC_EXTEND;
1385 }
1386
1387 if (IS_DAX(inode)) {
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001388 return dax_do_io(iocb, inode, iter,
Christoph Hellwigc19b1042016-02-08 14:40:51 +11001389 xfs_get_blocks_direct, endio, 0);
1390 }
1391
1392 bdev = xfs_find_bdev_for_inode(inode);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001393 return __blockdev_direct_IO(iocb, inode, bdev, iter,
Christoph Hellwigc19b1042016-02-08 14:40:51 +11001394 xfs_get_blocks_direct, endio, NULL, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395}
1396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001398xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 struct address_space *mapping,
1400 sector_t block)
1401{
1402 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001403 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001405 trace_xfs_vm_bmap(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001406 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Dave Chinner4bc1ea62012-11-12 22:53:56 +11001407 filemap_write_and_wait(mapping);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001408 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001409 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410}
1411
1412STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001413xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 struct file *unused,
1415 struct page *page)
1416{
Dave Chinner121e2132016-01-08 11:28:35 +11001417 trace_xfs_vm_readpage(page->mapping->host, 1);
Nathan Scottc2536662006-03-29 10:44:40 +10001418 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419}
1420
1421STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001422xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 struct file *unused,
1424 struct address_space *mapping,
1425 struct list_head *pages,
1426 unsigned nr_pages)
1427{
Dave Chinner121e2132016-01-08 11:28:35 +11001428 trace_xfs_vm_readpages(mapping->host, nr_pages);
Nathan Scottc2536662006-03-29 10:44:40 +10001429 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430}
1431
Dave Chinner22e757a2014-09-02 12:12:51 +10001432/*
1433 * This is basically a copy of __set_page_dirty_buffers() with one
1434 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1435 * dirty, we'll never be able to clean them because we don't write buffers
1436 * beyond EOF, and that means we can't invalidate pages that span EOF
1437 * that have been marked dirty. Further, the dirty state can leak into
1438 * the file interior if the file is extended, resulting in all sorts of
1439 * bad things happening as the state does not match the underlying data.
1440 *
1441 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1442 * this only exist because of bufferheads and how the generic code manages them.
1443 */
1444STATIC int
1445xfs_vm_set_page_dirty(
1446 struct page *page)
1447{
1448 struct address_space *mapping = page->mapping;
1449 struct inode *inode = mapping->host;
1450 loff_t end_offset;
1451 loff_t offset;
1452 int newly_dirty;
1453
1454 if (unlikely(!mapping))
1455 return !TestSetPageDirty(page);
1456
1457 end_offset = i_size_read(inode);
1458 offset = page_offset(page);
1459
1460 spin_lock(&mapping->private_lock);
1461 if (page_has_buffers(page)) {
1462 struct buffer_head *head = page_buffers(page);
1463 struct buffer_head *bh = head;
1464
1465 do {
1466 if (offset < end_offset)
1467 set_buffer_dirty(bh);
1468 bh = bh->b_this_page;
1469 offset += 1 << inode->i_blkbits;
1470 } while (bh != head);
1471 }
Greg Thelenc4843a72015-05-22 17:13:16 -04001472 /*
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001473 * Lock out page->mem_cgroup migration to keep PageDirty
1474 * synchronized with per-memcg dirty page counters.
Greg Thelenc4843a72015-05-22 17:13:16 -04001475 */
Johannes Weiner62cccb82016-03-15 14:57:22 -07001476 lock_page_memcg(page);
Dave Chinner22e757a2014-09-02 12:12:51 +10001477 newly_dirty = !TestSetPageDirty(page);
1478 spin_unlock(&mapping->private_lock);
1479
1480 if (newly_dirty) {
1481 /* sigh - __set_page_dirty() is static, so copy it here, too */
1482 unsigned long flags;
1483
1484 spin_lock_irqsave(&mapping->tree_lock, flags);
1485 if (page->mapping) { /* Race with truncate? */
1486 WARN_ON_ONCE(!PageUptodate(page));
Johannes Weiner62cccb82016-03-15 14:57:22 -07001487 account_page_dirtied(page, mapping);
Dave Chinner22e757a2014-09-02 12:12:51 +10001488 radix_tree_tag_set(&mapping->page_tree,
1489 page_index(page), PAGECACHE_TAG_DIRTY);
1490 }
1491 spin_unlock_irqrestore(&mapping->tree_lock, flags);
Dave Chinner22e757a2014-09-02 12:12:51 +10001492 }
Johannes Weiner62cccb82016-03-15 14:57:22 -07001493 unlock_page_memcg(page);
Greg Thelenc4843a72015-05-22 17:13:16 -04001494 if (newly_dirty)
1495 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Dave Chinner22e757a2014-09-02 12:12:51 +10001496 return newly_dirty;
1497}
1498
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001499const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001500 .readpage = xfs_vm_readpage,
1501 .readpages = xfs_vm_readpages,
1502 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001503 .writepages = xfs_vm_writepages,
Dave Chinner22e757a2014-09-02 12:12:51 +10001504 .set_page_dirty = xfs_vm_set_page_dirty,
Nathan Scott238f4c52006-03-17 17:26:25 +11001505 .releasepage = xfs_vm_releasepage,
1506 .invalidatepage = xfs_vm_invalidatepage,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001507 .bmap = xfs_vm_bmap,
1508 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001509 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02001510 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001511 .error_remove_page = generic_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512};