blob: 032c8a74824a38ad5da5bc95d844cde41da7a4a1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000019#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110020#include "xfs_shared.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110021#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110025#include "xfs_da_format.h"
26#include "xfs_da_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110028#include "xfs_trans.h"
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000029#include "xfs_inode_item.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000030#include "xfs_bmap.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100031#include "xfs_bmap_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include "xfs_error.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100033#include "xfs_dir2.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100034#include "xfs_dir2_priv.h"
Christoph Hellwigddcd8562008-12-03 07:55:34 -050035#include "xfs_ioctl.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000036#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110037#include "xfs_log.h"
Brian Fosterdc06f3982014-07-24 19:49:28 +100038#include "xfs_icache.h"
Christoph Hellwig781355c2015-02-16 11:59:50 +110039#include "xfs_pnfs.h"
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100040#include "xfs_iomap.h"
Darrick J. Wong0613f162016-10-03 09:11:37 -070041#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include <linux/dcache.h>
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010044#include <linux/falloc.h>
Jeff Liud126d432012-08-21 17:11:57 +080045#include <linux/pagevec.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040046#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +040048static const struct vm_operations_struct xfs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Christoph Hellwigdda35b82010-02-15 09:44:46 +000050/*
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100051 * Clear the specified ranges to zero through either the pagecache or DAX.
52 * Holes and unwritten extents will be left as-is as they already are zeroed.
Christoph Hellwigdda35b82010-02-15 09:44:46 +000053 */
Dave Chinneref9d8732012-11-29 15:26:33 +110054int
Christoph Hellwig7bb41db2016-06-21 09:56:26 +100055xfs_zero_range(
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100056 struct xfs_inode *ip,
Christoph Hellwig7bb41db2016-06-21 09:56:26 +100057 xfs_off_t pos,
58 xfs_off_t count,
59 bool *did_zero)
Christoph Hellwigdda35b82010-02-15 09:44:46 +000060{
Christoph Hellwig459f0fb2016-06-21 09:55:18 +100061 return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
Christoph Hellwigdda35b82010-02-15 09:44:46 +000062}
63
Christoph Hellwig8add71c2015-02-02 09:53:56 +110064int
65xfs_update_prealloc_flags(
66 struct xfs_inode *ip,
67 enum xfs_prealloc_flags flags)
68{
69 struct xfs_trans *tp;
70 int error;
71
Christoph Hellwig253f4912016-04-06 09:19:55 +100072 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
73 0, 0, 0, &tp);
74 if (error)
Christoph Hellwig8add71c2015-02-02 09:53:56 +110075 return error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110076
77 xfs_ilock(ip, XFS_ILOCK_EXCL);
78 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
79
80 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
Dave Chinnerc19b3b052016-02-09 16:54:58 +110081 VFS_I(ip)->i_mode &= ~S_ISUID;
82 if (VFS_I(ip)->i_mode & S_IXGRP)
83 VFS_I(ip)->i_mode &= ~S_ISGID;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110084 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
85 }
86
87 if (flags & XFS_PREALLOC_SET)
88 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
89 if (flags & XFS_PREALLOC_CLEAR)
90 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
91
92 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
93 if (flags & XFS_PREALLOC_SYNC)
94 xfs_trans_set_sync(tp);
Christoph Hellwig70393312015-06-04 13:48:08 +100095 return xfs_trans_commit(tp);
Christoph Hellwig8add71c2015-02-02 09:53:56 +110096}
97
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +000098/*
99 * Fsync operations on directories are much simpler than on regular files,
100 * as there is no file data to flush, and thus also no need for explicit
101 * cache flush operations, and there are no non-transaction metadata updates
102 * on directories either.
103 */
104STATIC int
105xfs_dir_fsync(
106 struct file *file,
107 loff_t start,
108 loff_t end,
109 int datasync)
110{
111 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
112 struct xfs_mount *mp = ip->i_mount;
113 xfs_lsn_t lsn = 0;
114
115 trace_xfs_dir_fsync(ip);
116
117 xfs_ilock(ip, XFS_ILOCK_SHARED);
118 if (xfs_ipincount(ip))
119 lsn = ip->i_itemp->ili_last_lsn;
120 xfs_iunlock(ip, XFS_ILOCK_SHARED);
121
122 if (!lsn)
123 return 0;
Dave Chinner24513372014-06-25 14:58:08 +1000124 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +0000125}
126
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000127STATIC int
128xfs_file_fsync(
129 struct file *file,
Josef Bacik02c24a82011-07-16 20:44:56 -0400130 loff_t start,
131 loff_t end,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000132 int datasync)
133{
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200134 struct inode *inode = file->f_mapping->host;
135 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000136 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000137 int error = 0;
138 int log_flushed = 0;
Christoph Hellwigb1037052011-09-19 14:55:51 +0000139 xfs_lsn_t lsn = 0;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000140
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000141 trace_xfs_file_fsync(ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000142
Josef Bacik02c24a82011-07-16 20:44:56 -0400143 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
144 if (error)
145 return error;
146
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000147 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000148 return -EIO;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000149
150 xfs_iflags_clear(ip, XFS_ITRUNCATED);
151
Dave Chinner2291dab2016-12-09 16:49:54 +1100152 /*
153 * If we have an RT and/or log subvolume we need to make sure to flush
154 * the write cache the device used for file data first. This is to
155 * ensure newly written file data make it to disk before logging the new
156 * inode size in case of an extending write.
157 */
158 if (XFS_IS_REALTIME_INODE(ip))
159 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
160 else if (mp->m_logdev_targp != mp->m_ddev_targp)
161 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000162
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000163 /*
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100164 * All metadata updates are logged, which means that we just have to
165 * flush the log up to the latest LSN that touched the inode. If we have
166 * concurrent fsync/fdatasync() calls, we need them to all block on the
167 * log force before we clear the ili_fsync_fields field. This ensures
168 * that we don't get a racing sync operation that does not wait for the
169 * metadata to hit the journal before returning. If we race with
170 * clearing the ili_fsync_fields, then all that will happen is the log
171 * force will do nothing as the lsn will already be on disk. We can't
172 * race with setting ili_fsync_fields because that is done under
173 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
174 * until after the ili_fsync_fields is cleared.
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000175 */
176 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000177 if (xfs_ipincount(ip)) {
178 if (!datasync ||
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100179 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000180 lsn = ip->i_itemp->ili_last_lsn;
181 }
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000182
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100183 if (lsn) {
Christoph Hellwigb1037052011-09-19 14:55:51 +0000184 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100185 ip->i_itemp->ili_fsync_fields = 0;
186 }
187 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Christoph Hellwigb1037052011-09-19 14:55:51 +0000188
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000189 /*
190 * If we only have a single device, and the log force about was
191 * a no-op we might have to flush the data device cache here.
192 * This can only happen for fdatasync/O_DSYNC if we were overwriting
193 * an already allocated file and thus do not have any metadata to
194 * commit.
195 */
Dave Chinner2291dab2016-12-09 16:49:54 +1100196 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
197 mp->m_logdev_targp == mp->m_ddev_targp)
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000198 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000199
Dave Chinner24513372014-06-25 14:58:08 +1000200 return error;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000201}
202
Christoph Hellwig00258e32010-02-15 09:44:47 +0000203STATIC ssize_t
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000204xfs_file_dio_aio_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000205 struct kiocb *iocb,
Al Virob4f5d2c2014-04-02 14:37:59 -0400206 struct iov_iter *to)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000207{
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100208 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000209 size_t count = iov_iter_count(to);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100210 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000211
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000212 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000213
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000214 if (!count)
215 return 0; /* skip atime */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000216
Christoph Hellwiga447d7c2016-10-03 09:47:34 +1100217 file_accessed(iocb->ki_filp);
218
Christoph Hellwig65523212016-11-30 14:33:25 +1100219 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100220 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
Christoph Hellwig65523212016-11-30 14:33:25 +1100221 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100222
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000223 return ret;
224}
225
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000226static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000227xfs_file_dax_read(
228 struct kiocb *iocb,
229 struct iov_iter *to)
230{
Christoph Hellwig6c31f492016-09-19 11:28:38 +1000231 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000232 size_t count = iov_iter_count(to);
233 ssize_t ret = 0;
234
235 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
236
237 if (!count)
238 return 0; /* skip atime */
239
Christoph Hellwig65523212016-11-30 14:33:25 +1100240 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Ross Zwisler11c59c92016-11-08 11:32:46 +1100241 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
Christoph Hellwig65523212016-11-30 14:33:25 +1100242 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000243
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000244 file_accessed(iocb->ki_filp);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000245 return ret;
246}
247
248STATIC ssize_t
249xfs_file_buffered_aio_read(
250 struct kiocb *iocb,
251 struct iov_iter *to)
252{
253 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
254 ssize_t ret;
255
256 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
257
Christoph Hellwig65523212016-11-30 14:33:25 +1100258 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Al Virob4f5d2c2014-04-02 14:37:59 -0400259 ret = generic_file_read_iter(iocb, to);
Christoph Hellwig65523212016-11-30 14:33:25 +1100260 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000261
262 return ret;
263}
264
265STATIC ssize_t
266xfs_file_read_iter(
267 struct kiocb *iocb,
268 struct iov_iter *to)
269{
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000270 struct inode *inode = file_inode(iocb->ki_filp);
271 struct xfs_mount *mp = XFS_I(inode)->i_mount;
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000272 ssize_t ret = 0;
273
274 XFS_STATS_INC(mp, xs_read_calls);
275
276 if (XFS_FORCED_SHUTDOWN(mp))
277 return -EIO;
278
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000279 if (IS_DAX(inode))
280 ret = xfs_file_dax_read(iocb, to);
281 else if (iocb->ki_flags & IOCB_DIRECT)
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000282 ret = xfs_file_dio_aio_read(iocb, to);
283 else
284 ret = xfs_file_buffered_aio_read(iocb, to);
285
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000286 if (ret > 0)
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100287 XFS_STATS_ADD(mp, xs_read_bytes, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000288 return ret;
289}
290
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100291/*
Christoph Hellwig193aec12012-03-27 10:34:49 -0400292 * Zero any on disk space between the current EOF and the new, larger EOF.
293 *
294 * This handles the normal case of zeroing the remainder of the last block in
295 * the file and the unusual case of zeroing blocks out beyond the size of the
296 * file. This second case only happens with fixed size extents and when the
297 * system crashes before the inode size was updated but after blocks were
298 * allocated.
299 *
300 * Expects the iolock to be held exclusive, and will take the ilock internally.
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000301 */
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000302int /* error (positive) */
303xfs_zero_eof(
Christoph Hellwig193aec12012-03-27 10:34:49 -0400304 struct xfs_inode *ip,
305 xfs_off_t offset, /* starting I/O offset */
Dave Chinner5885ebd2015-02-23 22:37:08 +1100306 xfs_fsize_t isize, /* current inode size */
307 bool *did_zeroing)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000308{
Christoph Hellwig193aec12012-03-27 10:34:49 -0400309 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000310 ASSERT(offset > isize);
311
Brian Foster0a50f162015-10-12 16:02:08 +1100312 trace_xfs_zero_eof(ip, isize, offset - isize);
Christoph Hellwig570b6212016-06-21 09:57:26 +1000313 return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000314}
315
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100316/*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100317 * Common pre-write limit and setup checks.
318 *
Christoph Hellwig5bf1f262011-12-18 20:00:13 +0000319 * Called with the iolocked held either shared and exclusive according to
320 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
321 * if called for a direct write beyond i_size.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100322 */
323STATIC ssize_t
324xfs_file_aio_write_checks(
Al Viro99733fa2015-04-07 14:25:18 -0400325 struct kiocb *iocb,
326 struct iov_iter *from,
Dave Chinner4d8d1582011-01-11 10:23:42 +1100327 int *iolock)
328{
Al Viro99733fa2015-04-07 14:25:18 -0400329 struct file *file = iocb->ki_filp;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100330 struct inode *inode = file->f_mapping->host;
331 struct xfs_inode *ip = XFS_I(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400332 ssize_t error = 0;
Al Viro99733fa2015-04-07 14:25:18 -0400333 size_t count = iov_iter_count(from);
Brian Foster3136e8b2015-10-12 16:02:05 +1100334 bool drained_dio = false;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100335
Dave Chinner7271d242011-08-25 07:17:02 +0000336restart:
Al Viro3309dd02015-04-09 12:55:47 -0400337 error = generic_write_checks(iocb, from);
338 if (error <= 0)
Dave Chinner4d8d1582011-01-11 10:23:42 +1100339 return error;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100340
Christoph Hellwig65523212016-11-30 14:33:25 +1100341 error = xfs_break_layouts(inode, iolock);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100342 if (error)
343 return error;
344
Christoph Hellwig65523212016-11-30 14:33:25 +1100345 /*
346 * For changing security info in file_remove_privs() we need i_rwsem
347 * exclusively.
348 */
Jan Karaa6de82c2015-05-21 16:05:56 +0200349 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100350 xfs_iunlock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200351 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100352 xfs_ilock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200353 goto restart;
354 }
Dave Chinner4d8d1582011-01-11 10:23:42 +1100355 /*
356 * If the offset is beyond the size of the file, we need to zero any
357 * blocks that fall between the existing EOF and the start of this
Christoph Hellwig2813d682011-12-18 20:00:12 +0000358 * write. If zeroing is needed and we are currently holding the
Christoph Hellwig467f7892012-03-27 10:34:47 -0400359 * iolock shared, we need to update it to exclusive which implies
360 * having to redo all checks before.
Dave Chinnerb9d59842015-04-16 22:03:07 +1000361 *
362 * We need to serialise against EOF updates that occur in IO
363 * completions here. We want to make sure that nobody is changing the
364 * size while we do this check until we have placed an IO barrier (i.e.
365 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
366 * The spinlock effectively forms a memory barrier once we have the
367 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
368 * and hence be able to correctly determine if we need to run zeroing.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100369 */
Dave Chinnerb9d59842015-04-16 22:03:07 +1000370 spin_lock(&ip->i_flags_lock);
Al Viro99733fa2015-04-07 14:25:18 -0400371 if (iocb->ki_pos > i_size_read(inode)) {
Dave Chinner5885ebd2015-02-23 22:37:08 +1100372 bool zero = false;
373
Dave Chinnerb9d59842015-04-16 22:03:07 +1000374 spin_unlock(&ip->i_flags_lock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100375 if (!drained_dio) {
376 if (*iolock == XFS_IOLOCK_SHARED) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100377 xfs_iunlock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100378 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100379 xfs_ilock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100380 iov_iter_reexpand(from, count);
381 }
Dave Chinner40c63fb2015-04-16 22:03:17 +1000382 /*
383 * We now have an IO submission barrier in place, but
384 * AIO can do EOF updates during IO completion and hence
385 * we now need to wait for all of them to drain. Non-AIO
386 * DIO will have drained before we are given the
387 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
388 * no-op.
389 */
390 inode_dio_wait(inode);
Brian Foster3136e8b2015-10-12 16:02:05 +1100391 drained_dio = true;
Dave Chinner7271d242011-08-25 07:17:02 +0000392 goto restart;
393 }
Al Viro99733fa2015-04-07 14:25:18 -0400394 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
Christoph Hellwig467f7892012-03-27 10:34:47 -0400395 if (error)
396 return error;
Dave Chinnerb9d59842015-04-16 22:03:07 +1000397 } else
398 spin_unlock(&ip->i_flags_lock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100399
400 /*
Christoph Hellwig8a9c9982012-02-29 09:53:52 +0000401 * Updating the timestamps will grab the ilock again from
402 * xfs_fs_dirty_inode, so we have to call it after dropping the
403 * lock above. Eventually we should look into a way to avoid
404 * the pointless lock roundtrip.
405 */
Josef Bacikc3b2da32012-03-26 09:59:21 -0400406 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
407 error = file_update_time(file);
408 if (error)
409 return error;
410 }
Christoph Hellwig8a9c9982012-02-29 09:53:52 +0000411
412 /*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100413 * If we're writing the file then make sure to clear the setuid and
414 * setgid bits if the process is not being run by root. This keeps
415 * people from modifying setuid and setgid binaries.
416 */
Jan Karaa6de82c2015-05-21 16:05:56 +0200417 if (!IS_NOSEC(inode))
418 return file_remove_privs(file);
419 return 0;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100420}
421
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100422static int
423xfs_dio_write_end_io(
424 struct kiocb *iocb,
425 ssize_t size,
426 unsigned flags)
427{
428 struct inode *inode = file_inode(iocb->ki_filp);
429 struct xfs_inode *ip = XFS_I(inode);
430 loff_t offset = iocb->ki_pos;
431 bool update_size = false;
432 int error = 0;
433
434 trace_xfs_end_io_direct_write(ip, offset, size);
435
436 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
437 return -EIO;
438
439 if (size <= 0)
440 return size;
441
442 /*
443 * We need to update the in-core inode size here so that we don't end up
444 * with the on-disk inode size being outside the in-core inode size. We
445 * have no other method of updating EOF for AIO, so always do it here
446 * if necessary.
447 *
448 * We need to lock the test/set EOF update as we can be racing with
449 * other IO completions here to update the EOF. Failing to serialise
450 * here can result in EOF moving backwards and Bad Things Happen when
451 * that occurs.
452 */
453 spin_lock(&ip->i_flags_lock);
454 if (offset + size > i_size_read(inode)) {
455 i_size_write(inode, offset + size);
456 update_size = true;
457 }
458 spin_unlock(&ip->i_flags_lock);
459
460 if (flags & IOMAP_DIO_COW) {
461 error = xfs_reflink_end_cow(ip, offset, size);
462 if (error)
463 return error;
464 }
465
466 if (flags & IOMAP_DIO_UNWRITTEN)
467 error = xfs_iomap_write_unwritten(ip, offset, size);
468 else if (update_size)
469 error = xfs_setfilesize(ip, offset, size);
470
471 return error;
472}
473
Dave Chinner4d8d1582011-01-11 10:23:42 +1100474/*
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100475 * xfs_file_dio_aio_write - handle direct IO writes
476 *
477 * Lock the inode appropriately to prepare for and issue a direct IO write.
Dave Chinnereda77982011-01-11 10:22:40 +1100478 * By separating it from the buffered write path we remove all the tricky to
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100479 * follow locking changes and looping.
480 *
Dave Chinnereda77982011-01-11 10:22:40 +1100481 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
482 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
483 * pages are flushed out.
484 *
485 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
486 * allowing them to be done in parallel with reads and other direct IO writes.
487 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
488 * needs to do sub-block zeroing and that requires serialisation against other
489 * direct IOs to the same block. In this case we need to serialise the
490 * submission of the unaligned IOs so that we don't get racing block zeroing in
491 * the dio layer. To avoid the problem with aio, we also need to wait for
492 * outstanding IOs to complete so that unwritten extent conversion is completed
493 * before we try to map the overlapping block. This is currently implemented by
Christoph Hellwig4a06fd22011-08-23 08:28:13 +0000494 * hitting it with a big hammer (i.e. inode_dio_wait()).
Dave Chinnereda77982011-01-11 10:22:40 +1100495 *
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100496 * Returns with locks held indicated by @iolock and errors indicated by
497 * negative return values.
498 */
499STATIC ssize_t
500xfs_file_dio_aio_write(
501 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400502 struct iov_iter *from)
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100503{
504 struct file *file = iocb->ki_filp;
505 struct address_space *mapping = file->f_mapping;
506 struct inode *inode = mapping->host;
507 struct xfs_inode *ip = XFS_I(inode);
508 struct xfs_mount *mp = ip->i_mount;
509 ssize_t ret = 0;
Dave Chinnereda77982011-01-11 10:22:40 +1100510 int unaligned_io = 0;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000511 int iolock;
Al Virob3188912014-04-02 07:06:30 -0400512 size_t count = iov_iter_count(from);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100513 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100514 mp->m_rtdev_targp : mp->m_ddev_targp;
515
Eric Sandeen7c71ee72014-01-21 16:46:23 -0600516 /* DIO must be aligned to device logical sector size */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000517 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000518 return -EINVAL;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100519
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100520 /*
521 * Don't take the exclusive iolock here unless the I/O is unaligned to
522 * the file system block size. We don't need to consider the EOF
523 * extension case here because xfs_file_aio_write_checks() will relock
524 * the inode as necessary for EOF zeroing cases and fill out the new
525 * inode size as appropriate.
526 */
Christoph Hellwig13712712016-04-07 08:51:57 -0700527 if ((iocb->ki_pos & mp->m_blockmask) ||
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100528 ((iocb->ki_pos + count) & mp->m_blockmask)) {
Dave Chinnereda77982011-01-11 10:22:40 +1100529 unaligned_io = 1;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000530 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100531 } else {
Christoph Hellwigd0606462011-12-18 20:00:14 +0000532 iolock = XFS_IOLOCK_SHARED;
Christoph Hellwigc58cb162011-08-27 14:42:53 +0000533 }
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100534
Christoph Hellwig65523212016-11-30 14:33:25 +1100535 xfs_ilock(ip, iolock);
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100536
Al Viro99733fa2015-04-07 14:25:18 -0400537 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100538 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000539 goto out;
Al Viro99733fa2015-04-07 14:25:18 -0400540 count = iov_iter_count(from);
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100541
Dave Chinnereda77982011-01-11 10:22:40 +1100542 /*
543 * If we are doing unaligned IO, wait for all other IO to drain,
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100544 * otherwise demote the lock if we had to take the exclusive lock
545 * for other reasons in xfs_file_aio_write_checks.
Dave Chinnereda77982011-01-11 10:22:40 +1100546 */
547 if (unaligned_io)
Christoph Hellwig4a06fd22011-08-23 08:28:13 +0000548 inode_dio_wait(inode);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000549 else if (iolock == XFS_IOLOCK_EXCL) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100550 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000551 iolock = XFS_IOLOCK_SHARED;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100552 }
553
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000554 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100555
Darrick J. Wong0613f162016-10-03 09:11:37 -0700556 /* If this is a block-aligned directio CoW, remap immediately. */
557 if (xfs_is_reflink_inode(ip) && !unaligned_io) {
558 ret = xfs_reflink_allocate_cow_range(ip, iocb->ki_pos, count);
559 if (ret)
560 goto out;
561 }
562
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100563 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000564out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100565 xfs_iunlock(ip, iolock);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000566
Dave Chinner6b698ed2015-06-04 09:18:53 +1000567 /*
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000568 * No fallback to buffered IO on errors for XFS, direct IO will either
569 * complete fully or fail.
Dave Chinner6b698ed2015-06-04 09:18:53 +1000570 */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000571 ASSERT(ret < 0 || ret == count);
572 return ret;
573}
574
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000575static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000576xfs_file_dax_write(
577 struct kiocb *iocb,
578 struct iov_iter *from)
579{
Christoph Hellwig6c31f492016-09-19 11:28:38 +1000580 struct inode *inode = iocb->ki_filp->f_mapping->host;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000581 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig17879e82016-09-19 11:24:50 +1000582 int iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig6c31f492016-09-19 11:28:38 +1000583 ssize_t ret, error = 0;
584 size_t count;
585 loff_t pos;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000586
Christoph Hellwig65523212016-11-30 14:33:25 +1100587 xfs_ilock(ip, iolock);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000588 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
589 if (ret)
590 goto out;
591
Christoph Hellwig6c31f492016-09-19 11:28:38 +1000592 pos = iocb->ki_pos;
593 count = iov_iter_count(from);
Dave Chinner8b2180b2016-08-17 08:31:33 +1000594
Christoph Hellwig6c31f492016-09-19 11:28:38 +1000595 trace_xfs_file_dax_write(ip, count, pos);
Ross Zwisler11c59c92016-11-08 11:32:46 +1100596 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
Christoph Hellwig6c31f492016-09-19 11:28:38 +1000597 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
598 i_size_write(inode, iocb->ki_pos);
599 error = xfs_setfilesize(ip, pos, ret);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000600 }
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000601out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100602 xfs_iunlock(ip, iolock);
Christoph Hellwig6c31f492016-09-19 11:28:38 +1000603 return error ? error : ret;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100604}
605
Christoph Hellwig00258e32010-02-15 09:44:47 +0000606STATIC ssize_t
Dave Chinner637bbc72011-01-11 10:17:30 +1100607xfs_file_buffered_aio_write(
608 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400609 struct iov_iter *from)
Dave Chinner637bbc72011-01-11 10:17:30 +1100610{
611 struct file *file = iocb->ki_filp;
612 struct address_space *mapping = file->f_mapping;
613 struct inode *inode = mapping->host;
614 struct xfs_inode *ip = XFS_I(inode);
615 ssize_t ret;
616 int enospc = 0;
Brian Fosterc3155092017-01-27 23:22:56 -0800617 int iolock;
Dave Chinner637bbc72011-01-11 10:17:30 +1100618
Brian Fosterc3155092017-01-27 23:22:56 -0800619write_retry:
620 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100621 xfs_ilock(ip, iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100622
Al Viro99733fa2015-04-07 14:25:18 -0400623 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100624 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000625 goto out;
Dave Chinner637bbc72011-01-11 10:17:30 +1100626
627 /* We can write back this queue in page reclaim */
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100628 current->backing_dev_info = inode_to_bdi(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100629
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000630 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +1000631 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
Al Viro0a64bc22014-02-11 22:25:22 -0500632 if (likely(ret >= 0))
Al Viro99733fa2015-04-07 14:25:18 -0400633 iocb->ki_pos += ret;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000634
Dave Chinner637bbc72011-01-11 10:17:30 +1100635 /*
Brian Fosterdc06f3982014-07-24 19:49:28 +1000636 * If we hit a space limit, try to free up some lingering preallocated
637 * space before returning an error. In the case of ENOSPC, first try to
638 * write back all dirty inodes to free up some of the excess reserved
639 * metadata space. This reduces the chances that the eofblocks scan
640 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
641 * also behaves as a filter to prevent too many eofblocks scans from
642 * running at the same time.
Dave Chinner637bbc72011-01-11 10:17:30 +1100643 */
Brian Fosterdc06f3982014-07-24 19:49:28 +1000644 if (ret == -EDQUOT && !enospc) {
Brian Fosterc3155092017-01-27 23:22:56 -0800645 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000646 enospc = xfs_inode_free_quota_eofblocks(ip);
647 if (enospc)
648 goto write_retry;
Darrick J. Wong83104d42016-10-03 09:11:46 -0700649 enospc = xfs_inode_free_quota_cowblocks(ip);
650 if (enospc)
651 goto write_retry;
Brian Fosterc3155092017-01-27 23:22:56 -0800652 iolock = 0;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000653 } else if (ret == -ENOSPC && !enospc) {
654 struct xfs_eofblocks eofb = {0};
655
Dave Chinner637bbc72011-01-11 10:17:30 +1100656 enospc = 1;
Dave Chinner9aa05002012-10-08 21:56:04 +1100657 xfs_flush_inodes(ip->i_mount);
Brian Fosterc3155092017-01-27 23:22:56 -0800658
659 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000660 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
661 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100662 goto write_retry;
Dave Chinner637bbc72011-01-11 10:17:30 +1100663 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000664
Dave Chinner637bbc72011-01-11 10:17:30 +1100665 current->backing_dev_info = NULL;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000666out:
Brian Fosterc3155092017-01-27 23:22:56 -0800667 if (iolock)
668 xfs_iunlock(ip, iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100669 return ret;
670}
671
672STATIC ssize_t
Al Virobf97f3bc2014-04-03 14:20:23 -0400673xfs_file_write_iter(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000674 struct kiocb *iocb,
Al Virobf97f3bc2014-04-03 14:20:23 -0400675 struct iov_iter *from)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000676{
677 struct file *file = iocb->ki_filp;
678 struct address_space *mapping = file->f_mapping;
679 struct inode *inode = mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000680 struct xfs_inode *ip = XFS_I(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100681 ssize_t ret;
Al Virobf97f3bc2014-04-03 14:20:23 -0400682 size_t ocount = iov_iter_count(from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000683
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100684 XFS_STATS_INC(ip->i_mount, xs_write_calls);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000685
Dave Chinner637bbc72011-01-11 10:17:30 +1100686 if (ocount == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000687 return 0;
688
Al Virobf97f3bc2014-04-03 14:20:23 -0400689 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
690 return -EIO;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000691
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000692 if (IS_DAX(inode))
693 ret = xfs_file_dax_write(iocb, from);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700694 else if (iocb->ki_flags & IOCB_DIRECT) {
695 /*
696 * Allow a directio write to fall back to a buffered
697 * write *only* in the case that we're doing a reflink
698 * CoW. In all other directio scenarios we do not
699 * allow an operation to fall back to buffered mode.
700 */
Al Virobf97f3bc2014-04-03 14:20:23 -0400701 ret = xfs_file_dio_aio_write(iocb, from);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700702 if (ret == -EREMCHG)
703 goto buffered;
704 } else {
705buffered:
Al Virobf97f3bc2014-04-03 14:20:23 -0400706 ret = xfs_file_buffered_aio_write(iocb, from);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700707 }
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000708
Christoph Hellwigd0606462011-12-18 20:00:14 +0000709 if (ret > 0) {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100710 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
Christoph Hellwigce7ae1512011-12-18 20:00:11 +0000711
Christoph Hellwigd0606462011-12-18 20:00:14 +0000712 /* Handle various SYNC-type writes */
Christoph Hellwige2592212016-04-07 08:52:01 -0700713 ret = generic_write_sync(iocb, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000714 }
Dave Chinnera363f0c2011-01-11 10:13:53 +1100715 return ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000716}
717
Namjae Jeona904b1c2015-03-25 15:08:56 +1100718#define XFS_FALLOC_FL_SUPPORTED \
719 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
720 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700721 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
Namjae Jeona904b1c2015-03-25 15:08:56 +1100722
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100723STATIC long
724xfs_file_fallocate(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700725 struct file *file,
726 int mode,
727 loff_t offset,
728 loff_t len)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100729{
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700730 struct inode *inode = file_inode(file);
731 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700732 long error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100733 enum xfs_prealloc_flags flags = 0;
Christoph Hellwig781355c2015-02-16 11:59:50 +1100734 uint iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700735 loff_t new_size = 0;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100736 bool do_file_insert = 0;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100737
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700738 if (!S_ISREG(inode->i_mode))
739 return -EINVAL;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100740 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100741 return -EOPNOTSUPP;
742
Christoph Hellwig781355c2015-02-16 11:59:50 +1100743 xfs_ilock(ip, iolock);
Christoph Hellwig65523212016-11-30 14:33:25 +1100744 error = xfs_break_layouts(inode, &iolock);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100745 if (error)
746 goto out_unlock;
747
Dave Chinnere8e9ad42015-02-23 21:45:32 +1100748 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
749 iolock |= XFS_MMAPLOCK_EXCL;
750
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700751 if (mode & FALLOC_FL_PUNCH_HOLE) {
752 error = xfs_free_file_space(ip, offset, len);
753 if (error)
754 goto out_unlock;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100755 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
756 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
757
758 if (offset & blksize_mask || len & blksize_mask) {
Dave Chinner24513372014-06-25 14:58:08 +1000759 error = -EINVAL;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100760 goto out_unlock;
761 }
762
Lukas Czerner23fffa92014-04-12 09:56:41 -0400763 /*
764 * There is no need to overlap collapse range with EOF,
765 * in which case it is effectively a truncate operation
766 */
767 if (offset + len >= i_size_read(inode)) {
Dave Chinner24513372014-06-25 14:58:08 +1000768 error = -EINVAL;
Lukas Czerner23fffa92014-04-12 09:56:41 -0400769 goto out_unlock;
770 }
771
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100772 new_size = i_size_read(inode) - len;
773
774 error = xfs_collapse_file_space(ip, offset, len);
775 if (error)
776 goto out_unlock;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100777 } else if (mode & FALLOC_FL_INSERT_RANGE) {
778 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
779
780 new_size = i_size_read(inode) + len;
781 if (offset & blksize_mask || len & blksize_mask) {
782 error = -EINVAL;
783 goto out_unlock;
784 }
785
786 /* check the new inode size does not wrap through zero */
787 if (new_size > inode->i_sb->s_maxbytes) {
788 error = -EFBIG;
789 goto out_unlock;
790 }
791
792 /* Offset should be less than i_size */
793 if (offset >= i_size_read(inode)) {
794 error = -EINVAL;
795 goto out_unlock;
796 }
797 do_file_insert = 1;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700798 } else {
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100799 flags |= XFS_PREALLOC_SET;
800
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700801 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
802 offset + len > i_size_read(inode)) {
803 new_size = offset + len;
Dave Chinner24513372014-06-25 14:58:08 +1000804 error = inode_newsize_ok(inode, new_size);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700805 if (error)
806 goto out_unlock;
807 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100808
Lukas Czerner376ba312014-03-13 19:07:58 +1100809 if (mode & FALLOC_FL_ZERO_RANGE)
810 error = xfs_zero_file_space(ip, offset, len);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700811 else {
812 if (mode & FALLOC_FL_UNSHARE_RANGE) {
813 error = xfs_reflink_unshare(ip, offset, len);
814 if (error)
815 goto out_unlock;
816 }
Lukas Czerner376ba312014-03-13 19:07:58 +1100817 error = xfs_alloc_file_space(ip, offset, len,
818 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700819 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100820 if (error)
821 goto out_unlock;
822 }
823
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700824 if (file->f_flags & O_DSYNC)
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100825 flags |= XFS_PREALLOC_SYNC;
826
827 error = xfs_update_prealloc_flags(ip, flags);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100828 if (error)
829 goto out_unlock;
830
831 /* Change file size if needed */
832 if (new_size) {
833 struct iattr iattr;
834
835 iattr.ia_valid = ATTR_SIZE;
836 iattr.ia_size = new_size;
Jan Kara69bca802016-05-26 14:46:43 +0200837 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100838 if (error)
839 goto out_unlock;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100840 }
841
Namjae Jeona904b1c2015-03-25 15:08:56 +1100842 /*
843 * Perform hole insertion now that the file size has been
844 * updated so that if we crash during the operation we don't
845 * leave shifted extents past EOF and hence losing access to
846 * the data that is contained within them.
847 */
848 if (do_file_insert)
849 error = xfs_insert_file_space(ip, offset, len);
850
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100851out_unlock:
Christoph Hellwig781355c2015-02-16 11:59:50 +1100852 xfs_iunlock(ip, iolock);
Dave Chinner24513372014-06-25 14:58:08 +1000853 return error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100854}
855
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700856STATIC int
857xfs_file_clone_range(
858 struct file *file_in,
859 loff_t pos_in,
860 struct file *file_out,
861 loff_t pos_out,
862 u64 len)
863{
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +1100864 return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
Darrick J. Wongcc714662016-10-03 09:11:41 -0700865 len, false);
866}
867
Darrick J. Wongcc714662016-10-03 09:11:41 -0700868STATIC ssize_t
869xfs_file_dedupe_range(
870 struct file *src_file,
871 u64 loff,
872 u64 len,
873 struct file *dst_file,
874 u64 dst_loff)
875{
876 int error;
877
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +1100878 error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
Darrick J. Wongcc714662016-10-03 09:11:41 -0700879 len, true);
880 if (error)
881 return error;
882 return len;
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700883}
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100886xfs_file_open(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 struct inode *inode,
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100888 struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100890 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 return -EFBIG;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100892 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
893 return -EIO;
894 return 0;
895}
896
897STATIC int
898xfs_dir_open(
899 struct inode *inode,
900 struct file *file)
901{
902 struct xfs_inode *ip = XFS_I(inode);
903 int mode;
904 int error;
905
906 error = xfs_file_open(inode, file);
907 if (error)
908 return error;
909
910 /*
911 * If there are any blocks, read-ahead block 0 as we're almost
912 * certain to have the next operation be a read there.
913 */
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800914 mode = xfs_ilock_data_map_shared(ip);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100915 if (ip->i_d.di_nextents > 0)
Darrick J. Wong7a652bb2017-02-02 15:13:58 -0800916 error = xfs_dir3_data_readahead(ip, 0, -1);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100917 xfs_iunlock(ip, mode);
Darrick J. Wong7a652bb2017-02-02 15:13:58 -0800918 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100922xfs_file_release(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 struct inode *inode,
924 struct file *filp)
925{
Dave Chinner24513372014-06-25 14:58:08 +1000926 return xfs_release(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927}
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100930xfs_file_readdir(
Al Virob8227552013-05-22 17:07:56 -0400931 struct file *file,
932 struct dir_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933{
Al Virob8227552013-05-22 17:07:56 -0400934 struct inode *inode = file_inode(file);
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000935 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000936 size_t bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000938 /*
939 * The Linux API doesn't pass down the total size of the buffer
940 * we read into down to the filesystem. With the filldir concept
941 * it's not needed for correct information, but the XFS dir2 leaf
942 * code wants an estimate of the buffer size to calculate it's
943 * readahead window and size the buffers used for mapping to
944 * physical blocks.
945 *
946 * Try to give it an estimate that's good enough, maybe at some
947 * point we can change the ->readdir prototype to include the
Eric Sandeena9cc7992010-02-03 17:50:13 +0000948 * buffer size. For now we use the current glibc buffer size.
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000949 */
Eric Sandeena9cc7992010-02-03 17:50:13 +0000950 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
kbuild test robot83004752014-12-01 08:25:28 +1100952 return xfs_readdir(ip, ctx, bufsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953}
954
David Chinner4f57dbc2007-07-19 16:28:17 +1000955/*
Jeff Liud126d432012-08-21 17:11:57 +0800956 * This type is designed to indicate the type of offset we would like
Eric Sandeen49c69592014-09-09 11:56:48 +1000957 * to search from page cache for xfs_seek_hole_data().
Jeff Liud126d432012-08-21 17:11:57 +0800958 */
959enum {
960 HOLE_OFF = 0,
961 DATA_OFF,
962};
963
964/*
965 * Lookup the desired type of offset from the given page.
966 *
967 * On success, return true and the offset argument will point to the
968 * start of the region that was found. Otherwise this function will
969 * return false and keep the offset argument unchanged.
970 */
971STATIC bool
972xfs_lookup_buffer_offset(
973 struct page *page,
974 loff_t *offset,
975 unsigned int type)
976{
977 loff_t lastoff = page_offset(page);
978 bool found = false;
979 struct buffer_head *bh, *head;
980
981 bh = head = page_buffers(page);
982 do {
983 /*
984 * Unwritten extents that have data in the page
985 * cache covering them can be identified by the
986 * BH_Unwritten state flag. Pages with multiple
987 * buffers might have a mix of holes, data and
988 * unwritten extents - any buffer with valid
989 * data in it should have BH_Uptodate flag set
990 * on it.
991 */
992 if (buffer_unwritten(bh) ||
993 buffer_uptodate(bh)) {
994 if (type == DATA_OFF)
995 found = true;
996 } else {
997 if (type == HOLE_OFF)
998 found = true;
999 }
1000
1001 if (found) {
1002 *offset = lastoff;
1003 break;
1004 }
1005 lastoff += bh->b_size;
1006 } while ((bh = bh->b_this_page) != head);
1007
1008 return found;
1009}
1010
1011/*
1012 * This routine is called to find out and return a data or hole offset
1013 * from the page cache for unwritten extents according to the desired
Eric Sandeen49c69592014-09-09 11:56:48 +10001014 * type for xfs_seek_hole_data().
Jeff Liud126d432012-08-21 17:11:57 +08001015 *
1016 * The argument offset is used to tell where we start to search from the
1017 * page cache. Map is used to figure out the end points of the range to
1018 * lookup pages.
1019 *
1020 * Return true if the desired type of offset was found, and the argument
1021 * offset is filled with that address. Otherwise, return false and keep
1022 * offset unchanged.
1023 */
1024STATIC bool
1025xfs_find_get_desired_pgoff(
1026 struct inode *inode,
1027 struct xfs_bmbt_irec *map,
1028 unsigned int type,
1029 loff_t *offset)
1030{
1031 struct xfs_inode *ip = XFS_I(inode);
1032 struct xfs_mount *mp = ip->i_mount;
1033 struct pagevec pvec;
1034 pgoff_t index;
1035 pgoff_t end;
1036 loff_t endoff;
1037 loff_t startoff = *offset;
1038 loff_t lastoff = startoff;
1039 bool found = false;
1040
1041 pagevec_init(&pvec, 0);
1042
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001043 index = startoff >> PAGE_SHIFT;
Jeff Liud126d432012-08-21 17:11:57 +08001044 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001045 end = endoff >> PAGE_SHIFT;
Jeff Liud126d432012-08-21 17:11:57 +08001046 do {
1047 int want;
1048 unsigned nr_pages;
1049 unsigned int i;
1050
1051 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1052 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1053 want);
1054 /*
1055 * No page mapped into given range. If we are searching holes
1056 * and if this is the first time we got into the loop, it means
1057 * that the given offset is landed in a hole, return it.
1058 *
1059 * If we have already stepped through some block buffers to find
1060 * holes but they all contains data. In this case, the last
1061 * offset is already updated and pointed to the end of the last
1062 * mapped page, if it does not reach the endpoint to search,
1063 * that means there should be a hole between them.
1064 */
1065 if (nr_pages == 0) {
1066 /* Data search found nothing */
1067 if (type == DATA_OFF)
1068 break;
1069
1070 ASSERT(type == HOLE_OFF);
1071 if (lastoff == startoff || lastoff < endoff) {
1072 found = true;
1073 *offset = lastoff;
1074 }
1075 break;
1076 }
1077
1078 /*
1079 * At lease we found one page. If this is the first time we
1080 * step into the loop, and if the first page index offset is
1081 * greater than the given search offset, a hole was found.
1082 */
1083 if (type == HOLE_OFF && lastoff == startoff &&
1084 lastoff < page_offset(pvec.pages[0])) {
1085 found = true;
1086 break;
1087 }
1088
1089 for (i = 0; i < nr_pages; i++) {
1090 struct page *page = pvec.pages[i];
1091 loff_t b_offset;
1092
1093 /*
1094 * At this point, the page may be truncated or
1095 * invalidated (changing page->mapping to NULL),
1096 * or even swizzled back from swapper_space to tmpfs
1097 * file mapping. However, page->index will not change
1098 * because we have a reference on the page.
1099 *
1100 * Searching done if the page index is out of range.
1101 * If the current offset is not reaches the end of
1102 * the specified search range, there should be a hole
1103 * between them.
1104 */
1105 if (page->index > end) {
1106 if (type == HOLE_OFF && lastoff < endoff) {
1107 *offset = lastoff;
1108 found = true;
1109 }
1110 goto out;
1111 }
1112
1113 lock_page(page);
1114 /*
1115 * Page truncated or invalidated(page->mapping == NULL).
1116 * We can freely skip it and proceed to check the next
1117 * page.
1118 */
1119 if (unlikely(page->mapping != inode->i_mapping)) {
1120 unlock_page(page);
1121 continue;
1122 }
1123
1124 if (!page_has_buffers(page)) {
1125 unlock_page(page);
1126 continue;
1127 }
1128
1129 found = xfs_lookup_buffer_offset(page, &b_offset, type);
1130 if (found) {
1131 /*
1132 * The found offset may be less than the start
1133 * point to search if this is the first time to
1134 * come here.
1135 */
1136 *offset = max_t(loff_t, startoff, b_offset);
1137 unlock_page(page);
1138 goto out;
1139 }
1140
1141 /*
1142 * We either searching data but nothing was found, or
1143 * searching hole but found a data buffer. In either
1144 * case, probably the next page contains the desired
1145 * things, update the last offset to it so.
1146 */
1147 lastoff = page_offset(page) + PAGE_SIZE;
1148 unlock_page(page);
1149 }
1150
1151 /*
1152 * The number of returned pages less than our desired, search
1153 * done. In this case, nothing was found for searching data,
1154 * but we found a hole behind the last offset.
1155 */
1156 if (nr_pages < want) {
1157 if (type == HOLE_OFF) {
1158 *offset = lastoff;
1159 found = true;
1160 }
1161 break;
1162 }
1163
1164 index = pvec.pages[i - 1]->index + 1;
1165 pagevec_release(&pvec);
1166 } while (index <= end);
1167
1168out:
1169 pagevec_release(&pvec);
1170 return found;
1171}
1172
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001173/*
1174 * caller must lock inode with xfs_ilock_data_map_shared,
1175 * can we craft an appropriate ASSERT?
1176 *
1177 * end is because the VFS-level lseek interface is defined such that any
1178 * offset past i_size shall return -ENXIO, but we use this for quota code
1179 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1180 */
1181loff_t
1182__xfs_seek_hole_data(
1183 struct inode *inode,
Eric Sandeen49c69592014-09-09 11:56:48 +10001184 loff_t start,
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001185 loff_t end,
Eric Sandeen49c69592014-09-09 11:56:48 +10001186 int whence)
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001187{
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001188 struct xfs_inode *ip = XFS_I(inode);
1189 struct xfs_mount *mp = ip->i_mount;
1190 loff_t uninitialized_var(offset);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001191 xfs_fileoff_t fsbno;
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001192 xfs_filblks_t lastbno;
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001193 int error;
1194
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001195 if (start >= end) {
Dave Chinner24513372014-06-25 14:58:08 +10001196 error = -ENXIO;
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001197 goto out_error;
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001198 }
1199
Eric Sandeen49c69592014-09-09 11:56:48 +10001200 /*
1201 * Try to read extents from the first block indicated
1202 * by fsbno to the end block of the file.
1203 */
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001204 fsbno = XFS_B_TO_FSBT(mp, start);
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001205 lastbno = XFS_B_TO_FSB(mp, end);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001206
Jeff Liub686d1f2012-08-21 17:12:18 +08001207 for (;;) {
1208 struct xfs_bmbt_irec map[2];
1209 int nmap = 2;
1210 unsigned int i;
1211
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001212 error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
Jeff Liub686d1f2012-08-21 17:12:18 +08001213 XFS_BMAPI_ENTIRE);
1214 if (error)
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001215 goto out_error;
Jeff Liub686d1f2012-08-21 17:12:18 +08001216
1217 /* No extents at given offset, must be beyond EOF */
1218 if (nmap == 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001219 error = -ENXIO;
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001220 goto out_error;
Jeff Liub686d1f2012-08-21 17:12:18 +08001221 }
1222
1223 for (i = 0; i < nmap; i++) {
1224 offset = max_t(loff_t, start,
1225 XFS_FSB_TO_B(mp, map[i].br_startoff));
1226
Eric Sandeen49c69592014-09-09 11:56:48 +10001227 /* Landed in the hole we wanted? */
1228 if (whence == SEEK_HOLE &&
1229 map[i].br_startblock == HOLESTARTBLOCK)
1230 goto out;
1231
1232 /* Landed in the data extent we wanted? */
1233 if (whence == SEEK_DATA &&
1234 (map[i].br_startblock == DELAYSTARTBLOCK ||
1235 (map[i].br_state == XFS_EXT_NORM &&
1236 !isnullstartblock(map[i].br_startblock))))
Jeff Liub686d1f2012-08-21 17:12:18 +08001237 goto out;
1238
1239 /*
Eric Sandeen49c69592014-09-09 11:56:48 +10001240 * Landed in an unwritten extent, try to search
1241 * for hole or data from page cache.
Jeff Liub686d1f2012-08-21 17:12:18 +08001242 */
1243 if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1244 if (xfs_find_get_desired_pgoff(inode, &map[i],
Eric Sandeen49c69592014-09-09 11:56:48 +10001245 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1246 &offset))
Jeff Liub686d1f2012-08-21 17:12:18 +08001247 goto out;
1248 }
1249 }
1250
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001251 /*
Eric Sandeen49c69592014-09-09 11:56:48 +10001252 * We only received one extent out of the two requested. This
1253 * means we've hit EOF and didn't find what we are looking for.
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001254 */
Jeff Liub686d1f2012-08-21 17:12:18 +08001255 if (nmap == 1) {
Eric Sandeen49c69592014-09-09 11:56:48 +10001256 /*
1257 * If we were looking for a hole, set offset to
1258 * the end of the file (i.e., there is an implicit
1259 * hole at the end of any file).
1260 */
1261 if (whence == SEEK_HOLE) {
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001262 offset = end;
Eric Sandeen49c69592014-09-09 11:56:48 +10001263 break;
1264 }
1265 /*
1266 * If we were looking for data, it's nowhere to be found
1267 */
1268 ASSERT(whence == SEEK_DATA);
1269 error = -ENXIO;
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001270 goto out_error;
Jeff Liub686d1f2012-08-21 17:12:18 +08001271 }
1272
1273 ASSERT(i > 1);
1274
1275 /*
Eric Sandeen49c69592014-09-09 11:56:48 +10001276 * Nothing was found, proceed to the next round of search
1277 * if the next reading offset is not at or beyond EOF.
Jeff Liub686d1f2012-08-21 17:12:18 +08001278 */
1279 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1280 start = XFS_FSB_TO_B(mp, fsbno);
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001281 if (start >= end) {
Eric Sandeen49c69592014-09-09 11:56:48 +10001282 if (whence == SEEK_HOLE) {
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001283 offset = end;
Eric Sandeen49c69592014-09-09 11:56:48 +10001284 break;
1285 }
1286 ASSERT(whence == SEEK_DATA);
1287 error = -ENXIO;
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001288 goto out_error;
Jeff Liub686d1f2012-08-21 17:12:18 +08001289 }
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001290 }
1291
Jeff Liub686d1f2012-08-21 17:12:18 +08001292out:
1293 /*
Eric Sandeen49c69592014-09-09 11:56:48 +10001294 * If at this point we have found the hole we wanted, the returned
Jeff Liub686d1f2012-08-21 17:12:18 +08001295 * offset may be bigger than the file size as it may be aligned to
Eric Sandeen49c69592014-09-09 11:56:48 +10001296 * page boundary for unwritten extents. We need to deal with this
Jeff Liub686d1f2012-08-21 17:12:18 +08001297 * situation in particular.
1298 */
Eric Sandeen49c69592014-09-09 11:56:48 +10001299 if (whence == SEEK_HOLE)
Eric Sandeen8aa7d372016-02-08 11:25:16 +11001300 offset = min_t(loff_t, offset, end);
1301
1302 return offset;
1303
1304out_error:
1305 return error;
1306}
1307
1308STATIC loff_t
1309xfs_seek_hole_data(
1310 struct file *file,
1311 loff_t start,
1312 int whence)
1313{
1314 struct inode *inode = file->f_mapping->host;
1315 struct xfs_inode *ip = XFS_I(inode);
1316 struct xfs_mount *mp = ip->i_mount;
1317 uint lock;
1318 loff_t offset, end;
1319 int error = 0;
1320
1321 if (XFS_FORCED_SHUTDOWN(mp))
1322 return -EIO;
1323
1324 lock = xfs_ilock_data_map_shared(ip);
1325
1326 end = i_size_read(inode);
1327 offset = __xfs_seek_hole_data(inode, start, end, whence);
1328 if (offset < 0) {
1329 error = offset;
1330 goto out_unlock;
1331 }
1332
Jie Liu46a1c2c2013-06-25 12:02:13 +08001333 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001334
1335out_unlock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -08001336 xfs_iunlock(ip, lock);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001337
1338 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10001339 return error;
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001340 return offset;
1341}
1342
1343STATIC loff_t
1344xfs_file_llseek(
1345 struct file *file,
1346 loff_t offset,
Eric Sandeen59f9c002014-09-09 11:57:10 +10001347 int whence)
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001348{
Eric Sandeen59f9c002014-09-09 11:57:10 +10001349 switch (whence) {
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001350 case SEEK_END:
1351 case SEEK_CUR:
1352 case SEEK_SET:
Eric Sandeen59f9c002014-09-09 11:57:10 +10001353 return generic_file_llseek(file, offset, whence);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001354 case SEEK_HOLE:
Eric Sandeen49c69592014-09-09 11:56:48 +10001355 case SEEK_DATA:
Eric Sandeen59f9c002014-09-09 11:57:10 +10001356 return xfs_seek_hole_data(file, offset, whence);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001357 default:
1358 return -EINVAL;
1359 }
1360}
1361
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001362/*
1363 * Locking for serialisation of IO during page faults. This results in a lock
1364 * ordering of:
1365 *
1366 * mmap_sem (MM)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001367 * sb_start_pagefault(vfs, freeze)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001368 * i_mmaplock (XFS - truncate serialisation)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001369 * page_lock (MM)
1370 * i_lock (XFS - extent map serialisation)
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001371 */
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001372
Dave Chinner075a9242015-02-23 21:44:54 +11001373/*
1374 * mmap()d file has taken write protection fault and is being made writable. We
1375 * can set the page state up correctly for a writable page, which means we can
1376 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1377 * mapping.
1378 */
1379STATIC int
1380xfs_filemap_page_mkwrite(
1381 struct vm_area_struct *vma,
1382 struct vm_fault *vmf)
1383{
Dave Chinner6b698ed2015-06-04 09:18:53 +10001384 struct inode *inode = file_inode(vma->vm_file);
Dave Chinnerec56b1f2015-06-04 09:18:18 +10001385 int ret;
Dave Chinner075a9242015-02-23 21:44:54 +11001386
Dave Chinner6b698ed2015-06-04 09:18:53 +10001387 trace_xfs_filemap_page_mkwrite(XFS_I(inode));
Dave Chinner075a9242015-02-23 21:44:54 +11001388
Dave Chinner6b698ed2015-06-04 09:18:53 +10001389 sb_start_pagefault(inode->i_sb);
Dave Chinnerec56b1f2015-06-04 09:18:18 +10001390 file_update_time(vma->vm_file);
Dave Chinner6b698ed2015-06-04 09:18:53 +10001391 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1392
1393 if (IS_DAX(inode)) {
Ross Zwisler11c59c92016-11-08 11:32:46 +11001394 ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
Dave Chinner6b698ed2015-06-04 09:18:53 +10001395 } else {
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +10001396 ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
Dave Chinner6b698ed2015-06-04 09:18:53 +10001397 ret = block_page_mkwrite_return(ret);
1398 }
1399
1400 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1401 sb_end_pagefault(inode->i_sb);
1402
1403 return ret;
1404}
1405
1406STATIC int
1407xfs_filemap_fault(
1408 struct vm_area_struct *vma,
1409 struct vm_fault *vmf)
1410{
Dave Chinnerb2442c52015-07-29 11:48:00 +10001411 struct inode *inode = file_inode(vma->vm_file);
Dave Chinner6b698ed2015-06-04 09:18:53 +10001412 int ret;
1413
Dave Chinnerb2442c52015-07-29 11:48:00 +10001414 trace_xfs_filemap_fault(XFS_I(inode));
Dave Chinner6b698ed2015-06-04 09:18:53 +10001415
1416 /* DAX can shortcut the normal fault path on write faults! */
Dave Chinnerb2442c52015-07-29 11:48:00 +10001417 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
Dave Chinner6b698ed2015-06-04 09:18:53 +10001418 return xfs_filemap_page_mkwrite(vma, vmf);
Dave Chinner075a9242015-02-23 21:44:54 +11001419
Dave Chinnerb2442c52015-07-29 11:48:00 +10001420 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +11001421 if (IS_DAX(inode))
Ross Zwisler11c59c92016-11-08 11:32:46 +11001422 ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +11001423 else
Dave Chinnerb2442c52015-07-29 11:48:00 +10001424 ret = filemap_fault(vma, vmf);
1425 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Dave Chinner075a9242015-02-23 21:44:54 +11001426
Dave Chinner6b698ed2015-06-04 09:18:53 +10001427 return ret;
1428}
1429
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001430/*
1431 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1432 * both read and write faults. Hence we need to handle both cases. There is no
1433 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1434 * handle both cases here. @flags carries the information on the type of fault
1435 * occuring.
1436 */
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001437STATIC int
1438xfs_filemap_pmd_fault(
1439 struct vm_area_struct *vma,
1440 unsigned long addr,
1441 pmd_t *pmd,
1442 unsigned int flags)
1443{
1444 struct inode *inode = file_inode(vma->vm_file);
1445 struct xfs_inode *ip = XFS_I(inode);
1446 int ret;
1447
1448 if (!IS_DAX(inode))
1449 return VM_FAULT_FALLBACK;
1450
1451 trace_xfs_filemap_pmd_fault(ip);
1452
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001453 if (flags & FAULT_FLAG_WRITE) {
1454 sb_start_pagefault(inode->i_sb);
1455 file_update_time(vma->vm_file);
1456 }
1457
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001458 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Ross Zwisler862f1b92016-11-08 11:35:02 +11001459 ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001460 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001461
1462 if (flags & FAULT_FLAG_WRITE)
1463 sb_end_pagefault(inode->i_sb);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001464
1465 return ret;
1466}
1467
Dave Chinner3af49282015-11-03 12:37:02 +11001468/*
1469 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1470 * updates on write faults. In reality, it's need to serialise against
Ross Zwisler5eb88dc2016-01-22 15:10:56 -08001471 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1472 * to ensure we serialise the fault barrier in place.
Dave Chinner3af49282015-11-03 12:37:02 +11001473 */
1474static int
1475xfs_filemap_pfn_mkwrite(
1476 struct vm_area_struct *vma,
1477 struct vm_fault *vmf)
1478{
1479
1480 struct inode *inode = file_inode(vma->vm_file);
1481 struct xfs_inode *ip = XFS_I(inode);
1482 int ret = VM_FAULT_NOPAGE;
1483 loff_t size;
1484
1485 trace_xfs_filemap_pfn_mkwrite(ip);
1486
1487 sb_start_pagefault(inode->i_sb);
1488 file_update_time(vma->vm_file);
1489
1490 /* check if the faulting page hasn't raced with truncate */
1491 xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1492 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1493 if (vmf->pgoff >= size)
1494 ret = VM_FAULT_SIGBUS;
Ross Zwisler5eb88dc2016-01-22 15:10:56 -08001495 else if (IS_DAX(inode))
1496 ret = dax_pfn_mkwrite(vma, vmf);
Dave Chinner3af49282015-11-03 12:37:02 +11001497 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1498 sb_end_pagefault(inode->i_sb);
1499 return ret;
1500
1501}
1502
Dave Chinner6b698ed2015-06-04 09:18:53 +10001503static const struct vm_operations_struct xfs_file_vm_ops = {
1504 .fault = xfs_filemap_fault,
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001505 .pmd_fault = xfs_filemap_pmd_fault,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001506 .map_pages = filemap_map_pages,
1507 .page_mkwrite = xfs_filemap_page_mkwrite,
Dave Chinner3af49282015-11-03 12:37:02 +11001508 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001509};
1510
1511STATIC int
1512xfs_file_mmap(
1513 struct file *filp,
1514 struct vm_area_struct *vma)
1515{
1516 file_accessed(filp);
1517 vma->vm_ops = &xfs_file_vm_ops;
1518 if (IS_DAX(file_inode(filp)))
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001519 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Dave Chinner6b698ed2015-06-04 09:18:53 +10001520 return 0;
Dave Chinner075a9242015-02-23 21:44:54 +11001521}
1522
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001523const struct file_operations xfs_file_operations = {
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001524 .llseek = xfs_file_llseek,
Al Virob4f5d2c2014-04-02 14:37:59 -04001525 .read_iter = xfs_file_read_iter,
Al Virobf97f3bc2014-04-03 14:20:23 -04001526 .write_iter = xfs_file_write_iter,
Al Viro82c156f2016-09-22 23:35:42 -04001527 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001528 .splice_write = iter_file_splice_write,
Nathan Scott3562fd42006-03-14 14:00:35 +11001529 .unlocked_ioctl = xfs_file_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001531 .compat_ioctl = xfs_file_compat_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532#endif
Nathan Scott3562fd42006-03-14 14:00:35 +11001533 .mmap = xfs_file_mmap,
1534 .open = xfs_file_open,
1535 .release = xfs_file_release,
1536 .fsync = xfs_file_fsync,
Toshi Kanidbe6ec82016-10-07 16:59:59 -07001537 .get_unmapped_area = thp_get_unmapped_area,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001538 .fallocate = xfs_file_fallocate,
Darrick J. Wong9fe26042016-10-03 09:11:40 -07001539 .clone_file_range = xfs_file_clone_range,
Darrick J. Wongcc714662016-10-03 09:11:41 -07001540 .dedupe_file_range = xfs_file_dedupe_range,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541};
1542
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001543const struct file_operations xfs_dir_file_operations = {
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001544 .open = xfs_dir_open,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 .read = generic_read_dir,
Al Viro3b0a3c12016-04-20 23:42:46 -04001546 .iterate_shared = xfs_file_readdir,
Al Viro59af1582008-08-24 07:24:41 -04001547 .llseek = generic_file_llseek,
Nathan Scott3562fd42006-03-14 14:00:35 +11001548 .unlocked_ioctl = xfs_file_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001549#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001550 .compat_ioctl = xfs_file_compat_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001551#endif
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +00001552 .fsync = xfs_dir_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553};