blob: f4213ba1ff853dad53d16d27b6cd713f01784ea7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000019#include "xfs_fs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110020#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110022#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110024#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "xfs_mount.h"
27#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include "xfs_dinode.h"
30#include "xfs_inode.h"
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000031#include "xfs_inode_item.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000032#include "xfs_bmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_error.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100034#include "xfs_vnodeops.h"
Christoph Hellwigf999a5b2008-11-28 14:23:32 +110035#include "xfs_da_btree.h"
Christoph Hellwigddcd8562008-12-03 07:55:34 -050036#include "xfs_ioctl.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000037#include "xfs_trace.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39#include <linux/dcache.h>
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010040#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +040042static const struct vm_operations_struct xfs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Christoph Hellwigdda35b82010-02-15 09:44:46 +000044/*
Dave Chinner487f84f2011-01-12 11:37:10 +110045 * Locking primitives for read and write IO paths to ensure we consistently use
46 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
47 */
48static inline void
49xfs_rw_ilock(
50 struct xfs_inode *ip,
51 int type)
52{
53 if (type & XFS_IOLOCK_EXCL)
54 mutex_lock(&VFS_I(ip)->i_mutex);
55 xfs_ilock(ip, type);
56}
57
58static inline void
59xfs_rw_iunlock(
60 struct xfs_inode *ip,
61 int type)
62{
63 xfs_iunlock(ip, type);
64 if (type & XFS_IOLOCK_EXCL)
65 mutex_unlock(&VFS_I(ip)->i_mutex);
66}
67
68static inline void
69xfs_rw_ilock_demote(
70 struct xfs_inode *ip,
71 int type)
72{
73 xfs_ilock_demote(ip, type);
74 if (type & XFS_IOLOCK_EXCL)
75 mutex_unlock(&VFS_I(ip)->i_mutex);
76}
77
78/*
Christoph Hellwigdda35b82010-02-15 09:44:46 +000079 * xfs_iozero
80 *
81 * xfs_iozero clears the specified range of buffer supplied,
82 * and marks all the affected blocks as valid and modified. If
83 * an affected block is not allocated, it will be allocated. If
84 * an affected block is not completely overwritten, and is not
85 * valid before the operation, it will be read from disk before
86 * being partially zeroed.
87 */
88STATIC int
89xfs_iozero(
90 struct xfs_inode *ip, /* inode */
91 loff_t pos, /* offset in file */
92 size_t count) /* size of data to zero */
93{
94 struct page *page;
95 struct address_space *mapping;
96 int status;
97
98 mapping = VFS_I(ip)->i_mapping;
99 do {
100 unsigned offset, bytes;
101 void *fsdata;
102
103 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
104 bytes = PAGE_CACHE_SIZE - offset;
105 if (bytes > count)
106 bytes = count;
107
108 status = pagecache_write_begin(NULL, mapping, pos, bytes,
109 AOP_FLAG_UNINTERRUPTIBLE,
110 &page, &fsdata);
111 if (status)
112 break;
113
114 zero_user(page, offset, bytes);
115
116 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
117 page, fsdata);
118 WARN_ON(status <= 0); /* can't return less than zero! */
119 pos += bytes;
120 count -= bytes;
121 status = 0;
122 } while (count);
123
124 return (-status);
125}
126
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000127STATIC int
128xfs_file_fsync(
129 struct file *file,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000130 int datasync)
131{
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200132 struct inode *inode = file->f_mapping->host;
133 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000134 struct xfs_trans *tp;
135 int error = 0;
136 int log_flushed = 0;
137
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000138 trace_xfs_file_fsync(ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000139
140 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
141 return -XFS_ERROR(EIO);
142
143 xfs_iflags_clear(ip, XFS_ITRUNCATED);
144
Christoph Hellwig37bc5742010-04-20 17:00:59 +1000145 xfs_ioend_wait(ip);
146
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000147 /*
148 * We always need to make sure that the required inode state is safe on
149 * disk. The inode might be clean but we still might need to force the
150 * log because of committed transactions that haven't hit the disk yet.
151 * Likewise, there could be unflushed non-transactional changes to the
152 * inode core that have to go to disk and this requires us to issue
153 * a synchronous transaction to capture these changes correctly.
154 *
155 * This code relies on the assumption that if the i_update_core field
156 * of the inode is clear and the inode is unpinned then it is clean
157 * and no action is required.
158 */
159 xfs_ilock(ip, XFS_ILOCK_SHARED);
160
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000161 /*
162 * First check if the VFS inode is marked dirty. All the dirtying
163 * of non-transactional updates no goes through mark_inode_dirty*,
164 * which allows us to distinguish beteeen pure timestamp updates
165 * and i_size updates which need to be caught for fdatasync.
166 * After that also theck for the dirty state in the XFS inode, which
167 * might gets cleared when the inode gets written out via the AIL
168 * or xfs_iflush_cluster.
169 */
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200170 if (((inode->i_state & I_DIRTY_DATASYNC) ||
171 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000172 ip->i_update_core) {
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000173 /*
174 * Kick off a transaction to log the inode core to get the
175 * updates. The sync transaction will also force the log.
176 */
177 xfs_iunlock(ip, XFS_ILOCK_SHARED);
178 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
179 error = xfs_trans_reserve(tp, 0,
180 XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
181 if (error) {
182 xfs_trans_cancel(tp, 0);
183 return -error;
184 }
185 xfs_ilock(ip, XFS_ILOCK_EXCL);
186
187 /*
188 * Note - it's possible that we might have pushed ourselves out
189 * of the way during trans_reserve which would flush the inode.
190 * But there's no guarantee that the inode buffer has actually
191 * gone out yet (it's delwri). Plus the buffer could be pinned
192 * anyway if it's part of an inode in another recent
193 * transaction. So we play it safe and fire off the
194 * transaction anyway.
195 */
Christoph Hellwig898621d2010-06-24 11:36:58 +1000196 xfs_trans_ijoin(tp, ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000197 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
198 xfs_trans_set_sync(tp);
199 error = _xfs_trans_commit(tp, 0, &log_flushed);
200
201 xfs_iunlock(ip, XFS_ILOCK_EXCL);
202 } else {
203 /*
204 * Timestamps/size haven't changed since last inode flush or
205 * inode transaction commit. That means either nothing got
206 * written or a transaction committed which caught the updates.
207 * If the latter happened and the transaction hasn't hit the
208 * disk yet, the inode will be still be pinned. If it is,
209 * force the log.
210 */
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000211 if (xfs_ipincount(ip)) {
Christoph Hellwig024910c2010-02-17 19:34:57 +0000212 error = _xfs_log_force_lsn(ip->i_mount,
213 ip->i_itemp->ili_last_lsn,
214 XFS_LOG_SYNC, &log_flushed);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000215 }
Christoph Hellwig024910c2010-02-17 19:34:57 +0000216 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000217 }
218
219 if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
220 /*
221 * If the log write didn't issue an ordered tag we need
222 * to flush the disk cache for the data device now.
223 */
224 if (!log_flushed)
225 xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
226
227 /*
228 * If this inode is on the RT dev we need to flush that
229 * cache as well.
230 */
231 if (XFS_IS_REALTIME_INODE(ip))
232 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
233 }
234
235 return -error;
236}
237
Christoph Hellwig00258e32010-02-15 09:44:47 +0000238STATIC ssize_t
239xfs_file_aio_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000240 struct kiocb *iocb,
241 const struct iovec *iovp,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000242 unsigned long nr_segs,
243 loff_t pos)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000244{
245 struct file *file = iocb->ki_filp;
246 struct inode *inode = file->f_mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000247 struct xfs_inode *ip = XFS_I(inode);
248 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000249 size_t size = 0;
250 ssize_t ret = 0;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000251 int ioflags = 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000252 xfs_fsize_t n;
253 unsigned long seg;
254
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000255 XFS_STATS_INC(xs_read_calls);
256
Christoph Hellwig00258e32010-02-15 09:44:47 +0000257 BUG_ON(iocb->ki_pos != pos);
258
259 if (unlikely(file->f_flags & O_DIRECT))
260 ioflags |= IO_ISDIRECT;
261 if (file->f_mode & FMODE_NOCMTIME)
262 ioflags |= IO_INVIS;
263
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000264 /* START copy & waste from filemap.c */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000265 for (seg = 0; seg < nr_segs; seg++) {
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000266 const struct iovec *iv = &iovp[seg];
267
268 /*
269 * If any segment has a negative length, or the cumulative
270 * length ever wraps negative then return -EINVAL.
271 */
272 size += iv->iov_len;
273 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
274 return XFS_ERROR(-EINVAL);
275 }
276 /* END copy & waste from filemap.c */
277
278 if (unlikely(ioflags & IO_ISDIRECT)) {
279 xfs_buftarg_t *target =
280 XFS_IS_REALTIME_INODE(ip) ?
281 mp->m_rtdev_targp : mp->m_ddev_targp;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000282 if ((iocb->ki_pos & target->bt_smask) ||
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000283 (size & target->bt_smask)) {
Christoph Hellwig00258e32010-02-15 09:44:47 +0000284 if (iocb->ki_pos == ip->i_size)
285 return 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000286 return -XFS_ERROR(EINVAL);
287 }
288 }
289
Christoph Hellwig00258e32010-02-15 09:44:47 +0000290 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
291 if (n <= 0 || size == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000292 return 0;
293
294 if (n < size)
295 size = n;
296
297 if (XFS_FORCED_SHUTDOWN(mp))
298 return -EIO;
299
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000300 if (unlikely(ioflags & IO_ISDIRECT)) {
Dave Chinner487f84f2011-01-12 11:37:10 +1100301 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
302
Christoph Hellwig00258e32010-02-15 09:44:47 +0000303 if (inode->i_mapping->nrpages) {
304 ret = -xfs_flushinval_pages(ip,
305 (iocb->ki_pos & PAGE_CACHE_MASK),
306 -1, FI_REMAPF_LOCKED);
Dave Chinner487f84f2011-01-12 11:37:10 +1100307 if (ret) {
308 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
309 return ret;
310 }
Christoph Hellwig00258e32010-02-15 09:44:47 +0000311 }
Dave Chinner487f84f2011-01-12 11:37:10 +1100312 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
313 } else
314 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000315
Christoph Hellwig00258e32010-02-15 09:44:47 +0000316 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000317
Christoph Hellwig00258e32010-02-15 09:44:47 +0000318 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000319 if (ret > 0)
320 XFS_STATS_ADD(xs_read_bytes, ret);
321
Dave Chinner487f84f2011-01-12 11:37:10 +1100322 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000323 return ret;
324}
325
Christoph Hellwig00258e32010-02-15 09:44:47 +0000326STATIC ssize_t
327xfs_file_splice_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000328 struct file *infilp,
329 loff_t *ppos,
330 struct pipe_inode_info *pipe,
331 size_t count,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000332 unsigned int flags)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000333{
Christoph Hellwig00258e32010-02-15 09:44:47 +0000334 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000335 int ioflags = 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000336 ssize_t ret;
337
338 XFS_STATS_INC(xs_read_calls);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000339
340 if (infilp->f_mode & FMODE_NOCMTIME)
341 ioflags |= IO_INVIS;
342
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000343 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
344 return -EIO;
345
Dave Chinner487f84f2011-01-12 11:37:10 +1100346 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000347
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000348 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
349
350 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
351 if (ret > 0)
352 XFS_STATS_ADD(xs_read_bytes, ret);
353
Dave Chinner487f84f2011-01-12 11:37:10 +1100354 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000355 return ret;
356}
357
Dave Chinneredafb6d2011-01-11 10:14:06 +1100358STATIC void
359xfs_aio_write_isize_update(
360 struct inode *inode,
361 loff_t *ppos,
362 ssize_t bytes_written)
363{
364 struct xfs_inode *ip = XFS_I(inode);
365 xfs_fsize_t isize = i_size_read(inode);
366
367 if (bytes_written > 0)
368 XFS_STATS_ADD(xs_write_bytes, bytes_written);
369
370 if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
371 *ppos > isize))
372 *ppos = isize;
373
374 if (*ppos > ip->i_size) {
Dave Chinner487f84f2011-01-12 11:37:10 +1100375 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinneredafb6d2011-01-11 10:14:06 +1100376 if (*ppos > ip->i_size)
377 ip->i_size = *ppos;
Dave Chinner487f84f2011-01-12 11:37:10 +1100378 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinneredafb6d2011-01-11 10:14:06 +1100379 }
380}
381
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100382/*
383 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300384 * part of the I/O may have been written to disk before the error occurred. In
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100385 * this case the on-disk file size may have been adjusted beyond the in-memory
386 * file size and now needs to be truncated back.
387 */
388STATIC void
389xfs_aio_write_newsize_update(
390 struct xfs_inode *ip)
391{
392 if (ip->i_new_size) {
Dave Chinner487f84f2011-01-12 11:37:10 +1100393 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100394 ip->i_new_size = 0;
395 if (ip->i_d.di_size > ip->i_size)
396 ip->i_d.di_size = ip->i_size;
Dave Chinner487f84f2011-01-12 11:37:10 +1100397 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100398 }
399}
400
Dave Chinner487f84f2011-01-12 11:37:10 +1100401/*
402 * xfs_file_splice_write() does not use xfs_rw_ilock() because
403 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
404 * couuld cause lock inversions between the aio_write path and the splice path
405 * if someone is doing concurrent splice(2) based writes and write(2) based
406 * writes to the same inode. The only real way to fix this is to re-implement
407 * the generic code here with correct locking orders.
408 */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000409STATIC ssize_t
410xfs_file_splice_write(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000411 struct pipe_inode_info *pipe,
412 struct file *outfilp,
413 loff_t *ppos,
414 size_t count,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000415 unsigned int flags)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000416{
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000417 struct inode *inode = outfilp->f_mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000418 struct xfs_inode *ip = XFS_I(inode);
Dave Chinneredafb6d2011-01-11 10:14:06 +1100419 xfs_fsize_t new_size;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000420 int ioflags = 0;
421 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000422
423 XFS_STATS_INC(xs_write_calls);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000424
425 if (outfilp->f_mode & FMODE_NOCMTIME)
426 ioflags |= IO_INVIS;
427
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000428 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
429 return -EIO;
430
431 xfs_ilock(ip, XFS_IOLOCK_EXCL);
432
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000433 new_size = *ppos + count;
434
435 xfs_ilock(ip, XFS_ILOCK_EXCL);
436 if (new_size > ip->i_size)
437 ip->i_new_size = new_size;
438 xfs_iunlock(ip, XFS_ILOCK_EXCL);
439
440 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
441
442 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000443
Dave Chinneredafb6d2011-01-11 10:14:06 +1100444 xfs_aio_write_isize_update(inode, ppos, ret);
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100445 xfs_aio_write_newsize_update(ip);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000446 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
447 return ret;
448}
449
450/*
451 * This routine is called to handle zeroing any space in the last
452 * block of the file that is beyond the EOF. We do this since the
453 * size is being increased without writing anything to that block
454 * and we don't want anyone to read the garbage on the disk.
455 */
456STATIC int /* error (positive) */
457xfs_zero_last_block(
458 xfs_inode_t *ip,
459 xfs_fsize_t offset,
460 xfs_fsize_t isize)
461{
462 xfs_fileoff_t last_fsb;
463 xfs_mount_t *mp = ip->i_mount;
464 int nimaps;
465 int zero_offset;
466 int zero_len;
467 int error = 0;
468 xfs_bmbt_irec_t imap;
469
470 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
471
472 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
473 if (zero_offset == 0) {
474 /*
475 * There are no extra bytes in the last block on disk to
476 * zero, so return.
477 */
478 return 0;
479 }
480
481 last_fsb = XFS_B_TO_FSBT(mp, isize);
482 nimaps = 1;
483 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
Christoph Hellwigb4e91812010-06-23 18:11:15 +1000484 &nimaps, NULL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000485 if (error) {
486 return error;
487 }
488 ASSERT(nimaps > 0);
489 /*
490 * If the block underlying isize is just a hole, then there
491 * is nothing to zero.
492 */
493 if (imap.br_startblock == HOLESTARTBLOCK) {
494 return 0;
495 }
496 /*
497 * Zero the part of the last block beyond the EOF, and write it
498 * out sync. We need to drop the ilock while we do this so we
499 * don't deadlock when the buffer cache calls back to us.
500 */
501 xfs_iunlock(ip, XFS_ILOCK_EXCL);
502
503 zero_len = mp->m_sb.sb_blocksize - zero_offset;
504 if (isize + zero_len > offset)
505 zero_len = offset - isize;
506 error = xfs_iozero(ip, isize, zero_len);
507
508 xfs_ilock(ip, XFS_ILOCK_EXCL);
509 ASSERT(error >= 0);
510 return error;
511}
512
513/*
514 * Zero any on disk space between the current EOF and the new,
515 * larger EOF. This handles the normal case of zeroing the remainder
516 * of the last block in the file and the unusual case of zeroing blocks
517 * out beyond the size of the file. This second case only happens
518 * with fixed size extents and when the system crashes before the inode
519 * size was updated but after blocks were allocated. If fill is set,
520 * then any holes in the range are filled and zeroed. If not, the holes
521 * are left alone as holes.
522 */
523
524int /* error (positive) */
525xfs_zero_eof(
526 xfs_inode_t *ip,
527 xfs_off_t offset, /* starting I/O offset */
528 xfs_fsize_t isize) /* current inode size */
529{
530 xfs_mount_t *mp = ip->i_mount;
531 xfs_fileoff_t start_zero_fsb;
532 xfs_fileoff_t end_zero_fsb;
533 xfs_fileoff_t zero_count_fsb;
534 xfs_fileoff_t last_fsb;
535 xfs_fileoff_t zero_off;
536 xfs_fsize_t zero_len;
537 int nimaps;
538 int error = 0;
539 xfs_bmbt_irec_t imap;
540
541 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
542 ASSERT(offset > isize);
543
544 /*
545 * First handle zeroing the block on which isize resides.
546 * We only zero a part of that block so it is handled specially.
547 */
548 error = xfs_zero_last_block(ip, offset, isize);
549 if (error) {
550 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
551 return error;
552 }
553
554 /*
555 * Calculate the range between the new size and the old
556 * where blocks needing to be zeroed may exist. To get the
557 * block where the last byte in the file currently resides,
558 * we need to subtract one from the size and truncate back
559 * to a block boundary. We subtract 1 in case the size is
560 * exactly on a block boundary.
561 */
562 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
563 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
564 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
565 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
566 if (last_fsb == end_zero_fsb) {
567 /*
568 * The size was only incremented on its last block.
569 * We took care of that above, so just return.
570 */
571 return 0;
572 }
573
574 ASSERT(start_zero_fsb <= end_zero_fsb);
575 while (start_zero_fsb <= end_zero_fsb) {
576 nimaps = 1;
577 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
578 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
Christoph Hellwigb4e91812010-06-23 18:11:15 +1000579 0, NULL, 0, &imap, &nimaps, NULL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000580 if (error) {
581 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
582 return error;
583 }
584 ASSERT(nimaps > 0);
585
586 if (imap.br_state == XFS_EXT_UNWRITTEN ||
587 imap.br_startblock == HOLESTARTBLOCK) {
588 /*
589 * This loop handles initializing pages that were
590 * partially initialized by the code below this
591 * loop. It basically zeroes the part of the page
592 * that sits on a hole and sets the page as P_HOLE
593 * and calls remapf if it is a mapped file.
594 */
595 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
596 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
597 continue;
598 }
599
600 /*
601 * There are blocks we need to zero.
602 * Drop the inode lock while we're doing the I/O.
603 * We'll still have the iolock to protect us.
604 */
605 xfs_iunlock(ip, XFS_ILOCK_EXCL);
606
607 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
608 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
609
610 if ((zero_off + zero_len) > offset)
611 zero_len = offset - zero_off;
612
613 error = xfs_iozero(ip, zero_off, zero_len);
614 if (error) {
615 goto out_lock;
616 }
617
618 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
619 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
620
621 xfs_ilock(ip, XFS_ILOCK_EXCL);
622 }
623
624 return 0;
625
626out_lock:
627 xfs_ilock(ip, XFS_ILOCK_EXCL);
628 ASSERT(error >= 0);
629 return error;
630}
631
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100632/*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100633 * Common pre-write limit and setup checks.
634 *
635 * Returns with iolock held according to @iolock.
636 */
637STATIC ssize_t
638xfs_file_aio_write_checks(
639 struct file *file,
640 loff_t *pos,
641 size_t *count,
642 int *iolock)
643{
644 struct inode *inode = file->f_mapping->host;
645 struct xfs_inode *ip = XFS_I(inode);
646 xfs_fsize_t new_size;
647 int error = 0;
648
649 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
650 if (error) {
651 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
652 *iolock = 0;
653 return error;
654 }
655
656 new_size = *pos + *count;
657 if (new_size > ip->i_size)
658 ip->i_new_size = new_size;
659
660 if (likely(!(file->f_mode & FMODE_NOCMTIME)))
661 file_update_time(file);
662
663 /*
664 * If the offset is beyond the size of the file, we need to zero any
665 * blocks that fall between the existing EOF and the start of this
666 * write.
667 */
668 if (*pos > ip->i_size)
669 error = -xfs_zero_eof(ip, *pos, ip->i_size);
670
671 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
672 if (error)
673 return error;
674
675 /*
676 * If we're writing the file then make sure to clear the setuid and
677 * setgid bits if the process is not being run by root. This keeps
678 * people from modifying setuid and setgid binaries.
679 */
680 return file_remove_suid(file);
681
682}
683
684/*
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100685 * xfs_file_dio_aio_write - handle direct IO writes
686 *
687 * Lock the inode appropriately to prepare for and issue a direct IO write.
Dave Chinnereda77982011-01-11 10:22:40 +1100688 * By separating it from the buffered write path we remove all the tricky to
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100689 * follow locking changes and looping.
690 *
Dave Chinnereda77982011-01-11 10:22:40 +1100691 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
692 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
693 * pages are flushed out.
694 *
695 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
696 * allowing them to be done in parallel with reads and other direct IO writes.
697 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
698 * needs to do sub-block zeroing and that requires serialisation against other
699 * direct IOs to the same block. In this case we need to serialise the
700 * submission of the unaligned IOs so that we don't get racing block zeroing in
701 * the dio layer. To avoid the problem with aio, we also need to wait for
702 * outstanding IOs to complete so that unwritten extent conversion is completed
703 * before we try to map the overlapping block. This is currently implemented by
704 * hitting it with a big hammer (i.e. xfs_ioend_wait()).
705 *
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100706 * Returns with locks held indicated by @iolock and errors indicated by
707 * negative return values.
708 */
709STATIC ssize_t
710xfs_file_dio_aio_write(
711 struct kiocb *iocb,
712 const struct iovec *iovp,
713 unsigned long nr_segs,
714 loff_t pos,
715 size_t ocount,
716 int *iolock)
717{
718 struct file *file = iocb->ki_filp;
719 struct address_space *mapping = file->f_mapping;
720 struct inode *inode = mapping->host;
721 struct xfs_inode *ip = XFS_I(inode);
722 struct xfs_mount *mp = ip->i_mount;
723 ssize_t ret = 0;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100724 size_t count = ocount;
Dave Chinnereda77982011-01-11 10:22:40 +1100725 int unaligned_io = 0;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100726 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
727 mp->m_rtdev_targp : mp->m_ddev_targp;
728
729 *iolock = 0;
730 if ((pos & target->bt_smask) || (count & target->bt_smask))
731 return -XFS_ERROR(EINVAL);
732
Dave Chinnereda77982011-01-11 10:22:40 +1100733 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
734 unaligned_io = 1;
735
736 if (unaligned_io || mapping->nrpages || pos > ip->i_size)
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100737 *iolock = XFS_IOLOCK_EXCL;
738 else
739 *iolock = XFS_IOLOCK_SHARED;
740 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
741
Dave Chinner4d8d1582011-01-11 10:23:42 +1100742 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
743 if (ret)
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100744 return ret;
745
746 if (mapping->nrpages) {
747 WARN_ON(*iolock != XFS_IOLOCK_EXCL);
748 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
749 FI_REMAPF_LOCKED);
750 if (ret)
751 return ret;
752 }
753
Dave Chinnereda77982011-01-11 10:22:40 +1100754 /*
755 * If we are doing unaligned IO, wait for all other IO to drain,
756 * otherwise demote the lock if we had to flush cached pages
757 */
758 if (unaligned_io)
759 xfs_ioend_wait(ip);
760 else if (*iolock == XFS_IOLOCK_EXCL) {
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100761 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
762 *iolock = XFS_IOLOCK_SHARED;
763 }
764
765 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
766 ret = generic_file_direct_write(iocb, iovp,
767 &nr_segs, pos, &iocb->ki_pos, count, ocount);
768
769 /* No fallback to buffered IO on errors for XFS. */
770 ASSERT(ret < 0 || ret == count);
771 return ret;
772}
773
Christoph Hellwig00258e32010-02-15 09:44:47 +0000774STATIC ssize_t
Dave Chinner637bbc72011-01-11 10:17:30 +1100775xfs_file_buffered_aio_write(
776 struct kiocb *iocb,
777 const struct iovec *iovp,
778 unsigned long nr_segs,
779 loff_t pos,
780 size_t ocount,
781 int *iolock)
782{
783 struct file *file = iocb->ki_filp;
784 struct address_space *mapping = file->f_mapping;
785 struct inode *inode = mapping->host;
786 struct xfs_inode *ip = XFS_I(inode);
787 ssize_t ret;
788 int enospc = 0;
Dave Chinner637bbc72011-01-11 10:17:30 +1100789 size_t count = ocount;
790
791 *iolock = XFS_IOLOCK_EXCL;
792 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
793
Dave Chinner4d8d1582011-01-11 10:23:42 +1100794 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
795 if (ret)
Dave Chinner637bbc72011-01-11 10:17:30 +1100796 return ret;
797
798 /* We can write back this queue in page reclaim */
799 current->backing_dev_info = mapping->backing_dev_info;
800
801write_retry:
802 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
803 ret = generic_file_buffered_write(iocb, iovp, nr_segs,
804 pos, &iocb->ki_pos, count, ret);
805 /*
806 * if we just got an ENOSPC, flush the inode now we aren't holding any
807 * page locks and retry *once*
808 */
809 if (ret == -ENOSPC && !enospc) {
810 ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
811 if (ret)
812 return ret;
813 enospc = 1;
814 goto write_retry;
815 }
816 current->backing_dev_info = NULL;
817 return ret;
818}
819
820STATIC ssize_t
Christoph Hellwig00258e32010-02-15 09:44:47 +0000821xfs_file_aio_write(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000822 struct kiocb *iocb,
823 const struct iovec *iovp,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000824 unsigned long nr_segs,
825 loff_t pos)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000826{
827 struct file *file = iocb->ki_filp;
828 struct address_space *mapping = file->f_mapping;
829 struct inode *inode = mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000830 struct xfs_inode *ip = XFS_I(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100831 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000832 int iolock;
Dave Chinner637bbc72011-01-11 10:17:30 +1100833 size_t ocount = 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000834
835 XFS_STATS_INC(xs_write_calls);
836
Christoph Hellwig00258e32010-02-15 09:44:47 +0000837 BUG_ON(iocb->ki_pos != pos);
838
Dave Chinnera363f0c2011-01-11 10:13:53 +1100839 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
840 if (ret)
841 return ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000842
Dave Chinner637bbc72011-01-11 10:17:30 +1100843 if (ocount == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000844 return 0;
845
Dave Chinner637bbc72011-01-11 10:17:30 +1100846 xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000847
Dave Chinner637bbc72011-01-11 10:17:30 +1100848 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000849 return -EIO;
850
Dave Chinner637bbc72011-01-11 10:17:30 +1100851 if (unlikely(file->f_flags & O_DIRECT))
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100852 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
853 ocount, &iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100854 else
855 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
856 ocount, &iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000857
Dave Chinneredafb6d2011-01-11 10:14:06 +1100858 xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000859
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000860 if (ret <= 0)
Dave Chinner637bbc72011-01-11 10:17:30 +1100861 goto out_unlock;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000862
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000863 /* Handle various SYNC-type writes */
864 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
865 loff_t end = pos + ret - 1;
Dave Chinnera363f0c2011-01-11 10:13:53 +1100866 int error, error2;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000867
Dave Chinner487f84f2011-01-12 11:37:10 +1100868 xfs_rw_iunlock(ip, iolock);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100869 error = filemap_write_and_wait_range(mapping, pos, end);
Dave Chinner487f84f2011-01-12 11:37:10 +1100870 xfs_rw_ilock(ip, iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000871
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200872 error2 = -xfs_file_fsync(file,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000873 (file->f_flags & __O_SYNC) ? 0 : 1);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100874 if (error)
875 ret = error;
876 else if (error2)
877 ret = error2;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000878 }
879
Dave Chinner637bbc72011-01-11 10:17:30 +1100880out_unlock:
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100881 xfs_aio_write_newsize_update(ip);
Dave Chinner487f84f2011-01-12 11:37:10 +1100882 xfs_rw_iunlock(ip, iolock);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100883 return ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000884}
885
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100886STATIC long
887xfs_file_fallocate(
888 struct file *file,
889 int mode,
890 loff_t offset,
891 loff_t len)
892{
893 struct inode *inode = file->f_path.dentry->d_inode;
894 long error;
895 loff_t new_size = 0;
896 xfs_flock64_t bf;
897 xfs_inode_t *ip = XFS_I(inode);
898 int cmd = XFS_IOC_RESVSP;
Dave Chinner82878892011-03-26 09:13:08 +1100899 int attr_flags = XFS_ATTR_NOLOCK;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100900
901 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
902 return -EOPNOTSUPP;
903
904 bf.l_whence = 0;
905 bf.l_start = offset;
906 bf.l_len = len;
907
908 xfs_ilock(ip, XFS_IOLOCK_EXCL);
909
910 if (mode & FALLOC_FL_PUNCH_HOLE)
911 cmd = XFS_IOC_UNRESVSP;
912
913 /* check the new inode size is valid before allocating */
914 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
915 offset + len > i_size_read(inode)) {
916 new_size = offset + len;
917 error = inode_newsize_ok(inode, new_size);
918 if (error)
919 goto out_unlock;
920 }
921
Dave Chinner82878892011-03-26 09:13:08 +1100922 if (file->f_flags & O_DSYNC)
923 attr_flags |= XFS_ATTR_SYNC;
924
925 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100926 if (error)
927 goto out_unlock;
928
929 /* Change file size if needed */
930 if (new_size) {
931 struct iattr iattr;
932
933 iattr.ia_valid = ATTR_SIZE;
934 iattr.ia_size = new_size;
935 error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
936 }
937
938out_unlock:
939 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
940 return error;
941}
942
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100945xfs_file_open(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 struct inode *inode,
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100947 struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100949 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 return -EFBIG;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100951 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
952 return -EIO;
953 return 0;
954}
955
956STATIC int
957xfs_dir_open(
958 struct inode *inode,
959 struct file *file)
960{
961 struct xfs_inode *ip = XFS_I(inode);
962 int mode;
963 int error;
964
965 error = xfs_file_open(inode, file);
966 if (error)
967 return error;
968
969 /*
970 * If there are any blocks, read-ahead block 0 as we're almost
971 * certain to have the next operation be a read there.
972 */
973 mode = xfs_ilock_map_shared(ip);
974 if (ip->i_d.di_nextents > 0)
975 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
976 xfs_iunlock(ip, mode);
977 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978}
979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100981xfs_file_release(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 struct inode *inode,
983 struct file *filp)
984{
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000985 return -xfs_release(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986}
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100989xfs_file_readdir(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 struct file *filp,
991 void *dirent,
992 filldir_t filldir)
993{
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000994 struct inode *inode = filp->f_path.dentry->d_inode;
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000995 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000996 int error;
997 size_t bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000999 /*
1000 * The Linux API doesn't pass down the total size of the buffer
1001 * we read into down to the filesystem. With the filldir concept
1002 * it's not needed for correct information, but the XFS dir2 leaf
1003 * code wants an estimate of the buffer size to calculate it's
1004 * readahead window and size the buffers used for mapping to
1005 * physical blocks.
1006 *
1007 * Try to give it an estimate that's good enough, maybe at some
1008 * point we can change the ->readdir prototype to include the
Eric Sandeena9cc7992010-02-03 17:50:13 +00001009 * buffer size. For now we use the current glibc buffer size.
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001010 */
Eric Sandeena9cc7992010-02-03 17:50:13 +00001011 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001013 error = xfs_readdir(ip, dirent, bufsize,
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001014 (xfs_off_t *)&filp->f_pos, filldir);
1015 if (error)
1016 return -error;
1017 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018}
1019
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001021xfs_file_mmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 struct file *filp,
1023 struct vm_area_struct *vma)
1024{
Nathan Scott3562fd42006-03-14 14:00:35 +11001025 vma->vm_ops = &xfs_file_vm_ops;
Nick Piggind0217ac2007-07-19 01:47:03 -07001026 vma->vm_flags |= VM_CAN_NONLINEAR;
Dean Roehrich6fac0cb2005-06-21 14:07:45 +10001027
Nathan Scottfbc14622006-06-09 14:52:13 +10001028 file_accessed(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 return 0;
1030}
1031
David Chinner4f57dbc2007-07-19 16:28:17 +10001032/*
1033 * mmap()d file has taken write protection fault and is being made
1034 * writable. We can set the page state up correctly for a writable
1035 * page, which means we can do correct delalloc accounting (ENOSPC
1036 * checking!) and unwritten extent mapping.
1037 */
1038STATIC int
1039xfs_vm_page_mkwrite(
1040 struct vm_area_struct *vma,
Nick Pigginc2ec1752009-03-31 15:23:21 -07001041 struct vm_fault *vmf)
David Chinner4f57dbc2007-07-19 16:28:17 +10001042{
Nick Pigginc2ec1752009-03-31 15:23:21 -07001043 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
David Chinner4f57dbc2007-07-19 16:28:17 +10001044}
1045
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001046const struct file_operations xfs_file_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 .llseek = generic_file_llseek,
1048 .read = do_sync_read,
Dean Roehrichbb3f7242005-09-02 15:43:05 +10001049 .write = do_sync_write,
Nathan Scott3562fd42006-03-14 14:00:35 +11001050 .aio_read = xfs_file_aio_read,
1051 .aio_write = xfs_file_aio_write,
Nathan Scott1b895842006-03-31 13:08:59 +10001052 .splice_read = xfs_file_splice_read,
1053 .splice_write = xfs_file_splice_write,
Nathan Scott3562fd42006-03-14 14:00:35 +11001054 .unlocked_ioctl = xfs_file_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001056 .compat_ioctl = xfs_file_compat_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057#endif
Nathan Scott3562fd42006-03-14 14:00:35 +11001058 .mmap = xfs_file_mmap,
1059 .open = xfs_file_open,
1060 .release = xfs_file_release,
1061 .fsync = xfs_file_fsync,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001062 .fallocate = xfs_file_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063};
1064
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001065const struct file_operations xfs_dir_file_operations = {
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001066 .open = xfs_dir_open,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 .read = generic_read_dir,
Nathan Scott3562fd42006-03-14 14:00:35 +11001068 .readdir = xfs_file_readdir,
Al Viro59af1582008-08-24 07:24:41 -04001069 .llseek = generic_file_llseek,
Nathan Scott3562fd42006-03-14 14:00:35 +11001070 .unlocked_ioctl = xfs_file_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001071#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001072 .compat_ioctl = xfs_file_compat_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001073#endif
Nathan Scott3562fd42006-03-14 14:00:35 +11001074 .fsync = xfs_file_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075};
1076
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04001077static const struct vm_operations_struct xfs_file_vm_ops = {
Nick Piggin54cb8822007-07-19 01:46:59 -07001078 .fault = filemap_fault,
David Chinner4f57dbc2007-07-19 16:28:17 +10001079 .page_mkwrite = xfs_vm_page_mkwrite,
Dean Roehrich6fac0cb2005-06-21 14:07:45 +10001080};