blob: c04f0c063c58577a153102fd1262fccfc41fa0bf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32/*
33 * fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
34 *
35 */
36
37#include "xfs.h"
38
39#include "xfs_fs.h"
40#include "xfs_inum.h"
41#include "xfs_log.h"
42#include "xfs_trans.h"
43#include "xfs_sb.h"
44#include "xfs_ag.h"
45#include "xfs_dir.h"
46#include "xfs_dir2.h"
47#include "xfs_alloc.h"
48#include "xfs_dmapi.h"
49#include "xfs_quota.h"
50#include "xfs_mount.h"
51#include "xfs_alloc_btree.h"
52#include "xfs_bmap_btree.h"
53#include "xfs_ialloc_btree.h"
54#include "xfs_btree.h"
55#include "xfs_ialloc.h"
56#include "xfs_attr_sf.h"
57#include "xfs_dir_sf.h"
58#include "xfs_dir2_sf.h"
59#include "xfs_dinode.h"
60#include "xfs_inode.h"
61#include "xfs_bmap.h"
62#include "xfs_bit.h"
63#include "xfs_rtalloc.h"
64#include "xfs_error.h"
65#include "xfs_itable.h"
66#include "xfs_rw.h"
67#include "xfs_acl.h"
68#include "xfs_cap.h"
69#include "xfs_mac.h"
70#include "xfs_attr.h"
71#include "xfs_inode_item.h"
72#include "xfs_buf_item.h"
73#include "xfs_utils.h"
74#include "xfs_iomap.h"
75
76#include <linux/capability.h>
77#include <linux/writeback.h>
78
79
80#if defined(XFS_RW_TRACE)
81void
82xfs_rw_enter_trace(
83 int tag,
84 xfs_iocore_t *io,
85 void *data,
86 size_t segs,
87 loff_t offset,
88 int ioflags)
89{
90 xfs_inode_t *ip = XFS_IO_INODE(io);
91
92 if (ip->i_rwtrace == NULL)
93 return;
94 ktrace_enter(ip->i_rwtrace,
95 (void *)(unsigned long)tag,
96 (void *)ip,
97 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
99 (void *)data,
100 (void *)((unsigned long)segs),
101 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
102 (void *)((unsigned long)(offset & 0xffffffff)),
103 (void *)((unsigned long)ioflags),
104 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
105 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
106 (void *)NULL,
107 (void *)NULL,
108 (void *)NULL,
109 (void *)NULL,
110 (void *)NULL);
111}
112
113void
114xfs_inval_cached_trace(
115 xfs_iocore_t *io,
116 xfs_off_t offset,
117 xfs_off_t len,
118 xfs_off_t first,
119 xfs_off_t last)
120{
121 xfs_inode_t *ip = XFS_IO_INODE(io);
122
123 if (ip->i_rwtrace == NULL)
124 return;
125 ktrace_enter(ip->i_rwtrace,
126 (void *)(__psint_t)XFS_INVAL_CACHED,
127 (void *)ip,
128 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
129 (void *)((unsigned long)(offset & 0xffffffff)),
130 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
131 (void *)((unsigned long)(len & 0xffffffff)),
132 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
133 (void *)((unsigned long)(first & 0xffffffff)),
134 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
135 (void *)((unsigned long)(last & 0xffffffff)),
136 (void *)NULL,
137 (void *)NULL,
138 (void *)NULL,
139 (void *)NULL,
140 (void *)NULL,
141 (void *)NULL);
142}
143#endif
144
145/*
146 * xfs_iozero
147 *
148 * xfs_iozero clears the specified range of buffer supplied,
149 * and marks all the affected blocks as valid and modified. If
150 * an affected block is not allocated, it will be allocated. If
151 * an affected block is not completely overwritten, and is not
152 * valid before the operation, it will be read from disk before
153 * being partially zeroed.
154 */
155STATIC int
156xfs_iozero(
157 struct inode *ip, /* inode */
158 loff_t pos, /* offset in file */
159 size_t count, /* size of data to zero */
160 loff_t end_size) /* max file size to set */
161{
162 unsigned bytes;
163 struct page *page;
164 struct address_space *mapping;
165 char *kaddr;
166 int status;
167
168 mapping = ip->i_mapping;
169 do {
170 unsigned long index, offset;
171
172 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
173 index = pos >> PAGE_CACHE_SHIFT;
174 bytes = PAGE_CACHE_SIZE - offset;
175 if (bytes > count)
176 bytes = count;
177
178 status = -ENOMEM;
179 page = grab_cache_page(mapping, index);
180 if (!page)
181 break;
182
183 kaddr = kmap(page);
184 status = mapping->a_ops->prepare_write(NULL, page, offset,
185 offset + bytes);
186 if (status) {
187 goto unlock;
188 }
189
190 memset((void *) (kaddr + offset), 0, bytes);
191 flush_dcache_page(page);
192 status = mapping->a_ops->commit_write(NULL, page, offset,
193 offset + bytes);
194 if (!status) {
195 pos += bytes;
196 count -= bytes;
197 if (pos > i_size_read(ip))
198 i_size_write(ip, pos < end_size ? pos : end_size);
199 }
200
201unlock:
202 kunmap(page);
203 unlock_page(page);
204 page_cache_release(page);
205 if (status)
206 break;
207 } while (count);
208
209 return (-status);
210}
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212ssize_t /* bytes read, or (-) error */
213xfs_read(
214 bhv_desc_t *bdp,
215 struct kiocb *iocb,
216 const struct iovec *iovp,
217 unsigned int segs,
218 loff_t *offset,
219 int ioflags,
220 cred_t *credp)
221{
222 struct file *file = iocb->ki_filp;
223 struct inode *inode = file->f_mapping->host;
224 size_t size = 0;
225 ssize_t ret;
226 xfs_fsize_t n;
227 xfs_inode_t *ip;
228 xfs_mount_t *mp;
229 vnode_t *vp;
230 unsigned long seg;
231
232 ip = XFS_BHVTOI(bdp);
233 vp = BHV_TO_VNODE(bdp);
234 mp = ip->i_mount;
235
236 XFS_STATS_INC(xs_read_calls);
237
238 /* START copy & waste from filemap.c */
239 for (seg = 0; seg < segs; seg++) {
240 const struct iovec *iv = &iovp[seg];
241
242 /*
243 * If any segment has a negative length, or the cumulative
244 * length ever wraps negative then return -EINVAL.
245 */
246 size += iv->iov_len;
247 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
248 return XFS_ERROR(-EINVAL);
249 }
250 /* END copy & waste from filemap.c */
251
252 if (unlikely(ioflags & IO_ISDIRECT)) {
253 xfs_buftarg_t *target =
254 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
255 mp->m_rtdev_targp : mp->m_ddev_targp;
256 if ((*offset & target->pbr_smask) ||
257 (size & target->pbr_smask)) {
258 if (*offset == ip->i_d.di_size) {
259 return (0);
260 }
261 return -XFS_ERROR(EINVAL);
262 }
263 }
264
265 n = XFS_MAXIOFFSET(mp) - *offset;
266 if ((n <= 0) || (size == 0))
267 return 0;
268
269 if (n < size)
270 size = n;
271
272 if (XFS_FORCED_SHUTDOWN(mp)) {
273 return -EIO;
274 }
275
276 if (unlikely(ioflags & IO_ISDIRECT))
277 down(&inode->i_sem);
278 xfs_ilock(ip, XFS_IOLOCK_SHARED);
279
280 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
281 !(ioflags & IO_INVIS)) {
282 vrwlock_t locktype = VRWLOCK_READ;
Dean Roehriche1a40fa2005-06-22 10:20:44 +1000283 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
286 BHV_TO_VNODE(bdp), *offset, size,
Dean Roehriche1a40fa2005-06-22 10:20:44 +1000287 dmflags, &locktype);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 if (ret) {
289 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
290 goto unlock_isem;
291 }
292 }
293
294 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
295 (void *)iovp, segs, *offset, ioflags);
296 ret = __generic_file_aio_read(iocb, iovp, segs, offset);
297 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
298 ret = wait_on_sync_kiocb(iocb);
299 if (ret > 0)
300 XFS_STATS_ADD(xs_read_bytes, ret);
301
302 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
303
304 if (likely(!(ioflags & IO_INVIS)))
Nathan Scott4aeb6642005-11-02 11:43:58 +1100305 xfs_ichgtime_fast(ip, inode, XFS_ICHGTIME_ACC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307unlock_isem:
308 if (unlikely(ioflags & IO_ISDIRECT))
309 up(&inode->i_sem);
310 return ret;
311}
312
313ssize_t
314xfs_sendfile(
315 bhv_desc_t *bdp,
316 struct file *filp,
317 loff_t *offset,
318 int ioflags,
319 size_t count,
320 read_actor_t actor,
321 void *target,
322 cred_t *credp)
323{
324 ssize_t ret;
325 xfs_fsize_t n;
326 xfs_inode_t *ip;
327 xfs_mount_t *mp;
328 vnode_t *vp;
329
330 ip = XFS_BHVTOI(bdp);
331 vp = BHV_TO_VNODE(bdp);
332 mp = ip->i_mount;
333
334 XFS_STATS_INC(xs_read_calls);
335
336 n = XFS_MAXIOFFSET(mp) - *offset;
337 if ((n <= 0) || (count == 0))
338 return 0;
339
340 if (n < count)
341 count = n;
342
343 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
344 return -EIO;
345
346 xfs_ilock(ip, XFS_IOLOCK_SHARED);
347
348 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
349 (!(ioflags & IO_INVIS))) {
350 vrwlock_t locktype = VRWLOCK_READ;
351 int error;
352
353 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
354 FILP_DELAY_FLAG(filp), &locktype);
355 if (error) {
356 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
357 return -error;
358 }
359 }
360 xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
361 (void *)(unsigned long)target, count, *offset, ioflags);
362 ret = generic_file_sendfile(filp, offset, count, actor, target);
363
364 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
365
366 if (ret > 0)
367 XFS_STATS_ADD(xs_read_bytes, ret);
368
369 if (likely(!(ioflags & IO_INVIS)))
Nathan Scott4aeb6642005-11-02 11:43:58 +1100370 xfs_ichgtime_fast(ip, LINVFS_GET_IP(vp), XFS_ICHGTIME_ACC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 return ret;
373}
374
375/*
376 * This routine is called to handle zeroing any space in the last
377 * block of the file that is beyond the EOF. We do this since the
378 * size is being increased without writing anything to that block
379 * and we don't want anyone to read the garbage on the disk.
380 */
381STATIC int /* error (positive) */
382xfs_zero_last_block(
383 struct inode *ip,
384 xfs_iocore_t *io,
385 xfs_off_t offset,
386 xfs_fsize_t isize,
387 xfs_fsize_t end_size)
388{
389 xfs_fileoff_t last_fsb;
390 xfs_mount_t *mp;
391 int nimaps;
392 int zero_offset;
393 int zero_len;
394 int isize_fsb_offset;
395 int error = 0;
396 xfs_bmbt_irec_t imap;
397 loff_t loff;
398 size_t lsize;
399
400 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
401 ASSERT(offset > isize);
402
403 mp = io->io_mount;
404
405 isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
406 if (isize_fsb_offset == 0) {
407 /*
408 * There are no extra bytes in the last block on disk to
409 * zero, so return.
410 */
411 return 0;
412 }
413
414 last_fsb = XFS_B_TO_FSBT(mp, isize);
415 nimaps = 1;
416 error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
417 &nimaps, NULL);
418 if (error) {
419 return error;
420 }
421 ASSERT(nimaps > 0);
422 /*
423 * If the block underlying isize is just a hole, then there
424 * is nothing to zero.
425 */
426 if (imap.br_startblock == HOLESTARTBLOCK) {
427 return 0;
428 }
429 /*
430 * Zero the part of the last block beyond the EOF, and write it
431 * out sync. We need to drop the ilock while we do this so we
432 * don't deadlock when the buffer cache calls back to us.
433 */
434 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
435 loff = XFS_FSB_TO_B(mp, last_fsb);
436 lsize = XFS_FSB_TO_B(mp, 1);
437
438 zero_offset = isize_fsb_offset;
439 zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
440
441 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
442
443 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
444 ASSERT(error >= 0);
445 return error;
446}
447
448/*
449 * Zero any on disk space between the current EOF and the new,
450 * larger EOF. This handles the normal case of zeroing the remainder
451 * of the last block in the file and the unusual case of zeroing blocks
452 * out beyond the size of the file. This second case only happens
453 * with fixed size extents and when the system crashes before the inode
454 * size was updated but after blocks were allocated. If fill is set,
455 * then any holes in the range are filled and zeroed. If not, the holes
456 * are left alone as holes.
457 */
458
459int /* error (positive) */
460xfs_zero_eof(
461 vnode_t *vp,
462 xfs_iocore_t *io,
463 xfs_off_t offset, /* starting I/O offset */
464 xfs_fsize_t isize, /* current inode size */
465 xfs_fsize_t end_size) /* terminal inode size */
466{
467 struct inode *ip = LINVFS_GET_IP(vp);
468 xfs_fileoff_t start_zero_fsb;
469 xfs_fileoff_t end_zero_fsb;
470 xfs_fileoff_t prev_zero_fsb;
471 xfs_fileoff_t zero_count_fsb;
472 xfs_fileoff_t last_fsb;
473 xfs_extlen_t buf_len_fsb;
474 xfs_extlen_t prev_zero_count;
475 xfs_mount_t *mp;
476 int nimaps;
477 int error = 0;
478 xfs_bmbt_irec_t imap;
479 loff_t loff;
480 size_t lsize;
481
482 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
483 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
484
485 mp = io->io_mount;
486
487 /*
488 * First handle zeroing the block on which isize resides.
489 * We only zero a part of that block so it is handled specially.
490 */
491 error = xfs_zero_last_block(ip, io, offset, isize, end_size);
492 if (error) {
493 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
494 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
495 return error;
496 }
497
498 /*
499 * Calculate the range between the new size and the old
500 * where blocks needing to be zeroed may exist. To get the
501 * block where the last byte in the file currently resides,
502 * we need to subtract one from the size and truncate back
503 * to a block boundary. We subtract 1 in case the size is
504 * exactly on a block boundary.
505 */
506 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
507 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
508 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
509 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
510 if (last_fsb == end_zero_fsb) {
511 /*
512 * The size was only incremented on its last block.
513 * We took care of that above, so just return.
514 */
515 return 0;
516 }
517
518 ASSERT(start_zero_fsb <= end_zero_fsb);
519 prev_zero_fsb = NULLFILEOFF;
520 prev_zero_count = 0;
521 while (start_zero_fsb <= end_zero_fsb) {
522 nimaps = 1;
523 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
524 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
525 0, NULL, 0, &imap, &nimaps, NULL);
526 if (error) {
527 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
528 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
529 return error;
530 }
531 ASSERT(nimaps > 0);
532
533 if (imap.br_state == XFS_EXT_UNWRITTEN ||
534 imap.br_startblock == HOLESTARTBLOCK) {
535 /*
536 * This loop handles initializing pages that were
537 * partially initialized by the code below this
538 * loop. It basically zeroes the part of the page
539 * that sits on a hole and sets the page as P_HOLE
540 * and calls remapf if it is a mapped file.
541 */
542 prev_zero_fsb = NULLFILEOFF;
543 prev_zero_count = 0;
544 start_zero_fsb = imap.br_startoff +
545 imap.br_blockcount;
546 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
547 continue;
548 }
549
550 /*
551 * There are blocks in the range requested.
552 * Zero them a single write at a time. We actually
553 * don't zero the entire range returned if it is
554 * too big and simply loop around to get the rest.
555 * That is not the most efficient thing to do, but it
556 * is simple and this path should not be exercised often.
557 */
558 buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
559 mp->m_writeio_blocks << 8);
560 /*
561 * Drop the inode lock while we're doing the I/O.
562 * We'll still have the iolock to protect us.
563 */
564 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
565
566 loff = XFS_FSB_TO_B(mp, start_zero_fsb);
567 lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
568
569 error = xfs_iozero(ip, loff, lsize, end_size);
570
571 if (error) {
572 goto out_lock;
573 }
574
575 prev_zero_fsb = start_zero_fsb;
576 prev_zero_count = buf_len_fsb;
577 start_zero_fsb = imap.br_startoff + buf_len_fsb;
578 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
579
580 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
581 }
582
583 return 0;
584
585out_lock:
586
587 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
588 ASSERT(error >= 0);
589 return error;
590}
591
592ssize_t /* bytes written, or (-) error */
593xfs_write(
594 bhv_desc_t *bdp,
595 struct kiocb *iocb,
596 const struct iovec *iovp,
597 unsigned int nsegs,
598 loff_t *offset,
599 int ioflags,
600 cred_t *credp)
601{
602 struct file *file = iocb->ki_filp;
603 struct address_space *mapping = file->f_mapping;
604 struct inode *inode = mapping->host;
605 unsigned long segs = nsegs;
606 xfs_inode_t *xip;
607 xfs_mount_t *mp;
608 ssize_t ret = 0, error = 0;
609 xfs_fsize_t isize, new_size;
610 xfs_iocore_t *io;
611 vnode_t *vp;
612 unsigned long seg;
613 int iolock;
614 int eventsent = 0;
615 vrwlock_t locktype;
616 size_t ocount = 0, count;
617 loff_t pos;
618 int need_isem = 1, need_flush = 0;
619
620 XFS_STATS_INC(xs_write_calls);
621
622 vp = BHV_TO_VNODE(bdp);
623 xip = XFS_BHVTOI(bdp);
624
625 for (seg = 0; seg < segs; seg++) {
626 const struct iovec *iv = &iovp[seg];
627
628 /*
629 * If any segment has a negative length, or the cumulative
630 * length ever wraps negative then return -EINVAL.
631 */
632 ocount += iv->iov_len;
633 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
634 return -EINVAL;
635 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
636 continue;
637 if (seg == 0)
638 return -EFAULT;
639 segs = seg;
640 ocount -= iv->iov_len; /* This segment is no good */
641 break;
642 }
643
644 count = ocount;
645 pos = *offset;
646
647 if (count == 0)
648 return 0;
649
650 io = &xip->i_iocore;
651 mp = io->io_mount;
652
653 if (XFS_FORCED_SHUTDOWN(mp))
654 return -EIO;
655
656 fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
657
658 if (ioflags & IO_ISDIRECT) {
659 xfs_buftarg_t *target =
660 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
661 mp->m_rtdev_targp : mp->m_ddev_targp;
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if ((pos & target->pbr_smask) || (count & target->pbr_smask))
664 return XFS_ERROR(-EINVAL);
665
666 if (!VN_CACHED(vp) && pos < i_size_read(inode))
667 need_isem = 0;
668
669 if (VN_CACHED(vp))
670 need_flush = 1;
671 }
672
673relock:
674 if (need_isem) {
675 iolock = XFS_IOLOCK_EXCL;
676 locktype = VRWLOCK_WRITE;
677
678 down(&inode->i_sem);
679 } else {
680 iolock = XFS_IOLOCK_SHARED;
681 locktype = VRWLOCK_WRITE_DIRECT;
682 }
683
684 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
685
686 isize = i_size_read(inode);
687
688 if (file->f_flags & O_APPEND)
689 *offset = isize;
690
691start:
692 error = -generic_write_checks(file, &pos, &count,
693 S_ISBLK(inode->i_mode));
694 if (error) {
695 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
696 goto out_unlock_isem;
697 }
698
699 new_size = pos + count;
700 if (new_size > isize)
701 io->io_new_size = new_size;
702
703 if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
704 !(ioflags & IO_INVIS) && !eventsent)) {
705 loff_t savedsize = pos;
706 int dmflags = FILP_DELAY_FLAG(file);
707
708 if (need_isem)
709 dmflags |= DM_FLAGS_ISEM;
710
711 xfs_iunlock(xip, XFS_ILOCK_EXCL);
712 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
713 pos, count,
714 dmflags, &locktype);
715 if (error) {
716 xfs_iunlock(xip, iolock);
717 goto out_unlock_isem;
718 }
719 xfs_ilock(xip, XFS_ILOCK_EXCL);
720 eventsent = 1;
721
722 /*
723 * The iolock was dropped and reaquired in XFS_SEND_DATA
724 * so we have to recheck the size when appending.
725 * We will only "goto start;" once, since having sent the
726 * event prevents another call to XFS_SEND_DATA, which is
727 * what allows the size to change in the first place.
728 */
729 if ((file->f_flags & O_APPEND) && savedsize != isize) {
730 pos = isize = xip->i_d.di_size;
731 goto start;
732 }
733 }
734
Nathan Scott4aeb6642005-11-02 11:43:58 +1100735 if (likely(!(ioflags & IO_INVIS))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 inode_update_time(inode, 1);
Nathan Scott4aeb6642005-11-02 11:43:58 +1100737 xfs_ichgtime_fast(xip, inode,
738 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 }
740
741 /*
742 * If the offset is beyond the size of the file, we have a couple
743 * of things to do. First, if there is already space allocated
744 * we need to either create holes or zero the disk or ...
745 *
746 * If there is a page where the previous size lands, we need
747 * to zero it out up to the new size.
748 */
749
750 if (pos > isize) {
751 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
752 isize, pos + count);
753 if (error) {
754 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
755 goto out_unlock_isem;
756 }
757 }
758 xfs_iunlock(xip, XFS_ILOCK_EXCL);
759
760 /*
761 * If we're writing the file then make sure to clear the
762 * setuid and setgid bits if the process is not being run
763 * by root. This keeps people from modifying setuid and
764 * setgid binaries.
765 */
766
767 if (((xip->i_d.di_mode & S_ISUID) ||
768 ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
769 (S_ISGID | S_IXGRP))) &&
770 !capable(CAP_FSETID)) {
771 error = xfs_write_clear_setuid(xip);
772 if (likely(!error))
773 error = -remove_suid(file->f_dentry);
774 if (unlikely(error)) {
775 xfs_iunlock(xip, iolock);
776 goto out_unlock_isem;
777 }
778 }
779
780retry:
781 /* We can write back this queue in page reclaim */
782 current->backing_dev_info = mapping->backing_dev_info;
783
784 if ((ioflags & IO_ISDIRECT)) {
785 if (need_flush) {
786 xfs_inval_cached_trace(io, pos, -1,
787 ctooff(offtoct(pos)), -1);
788 VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
789 -1, FI_REMAPF_LOCKED);
790 }
791
792 if (need_isem) {
793 /* demote the lock now the cached pages are gone */
794 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
795 up(&inode->i_sem);
796
797 iolock = XFS_IOLOCK_SHARED;
798 locktype = VRWLOCK_WRITE_DIRECT;
799 need_isem = 0;
800 }
801
802 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
803 *offset, ioflags);
804 ret = generic_file_direct_write(iocb, iovp,
805 &segs, pos, offset, count, ocount);
806
807 /*
808 * direct-io write to a hole: fall through to buffered I/O
809 * for completing the rest of the request.
810 */
811 if (ret >= 0 && ret != count) {
812 XFS_STATS_ADD(xs_write_bytes, ret);
813
814 pos += ret;
815 count -= ret;
816
817 need_isem = 1;
818 ioflags &= ~IO_ISDIRECT;
819 xfs_iunlock(xip, iolock);
820 goto relock;
821 }
822 } else {
823 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
824 *offset, ioflags);
825 ret = generic_file_buffered_write(iocb, iovp, segs,
826 pos, offset, count, ret);
827 }
828
829 current->backing_dev_info = NULL;
830
831 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
832 ret = wait_on_sync_kiocb(iocb);
833
834 if ((ret == -ENOSPC) &&
835 DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
836 !(ioflags & IO_INVIS)) {
837
838 xfs_rwunlock(bdp, locktype);
Dean Roehriche1a40fa2005-06-22 10:20:44 +1000839 if (need_isem)
840 up(&inode->i_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
842 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
843 0, 0, 0); /* Delay flag intentionally unused */
844 if (error)
Dean Roehriche1a40fa2005-06-22 10:20:44 +1000845 goto out_nounlocks;
846 if (need_isem)
847 down(&inode->i_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 xfs_rwlock(bdp, locktype);
849 pos = xip->i_d.di_size;
850 ret = 0;
851 goto retry;
852 }
853
854 if (*offset > xip->i_d.di_size) {
855 xfs_ilock(xip, XFS_ILOCK_EXCL);
856 if (*offset > xip->i_d.di_size) {
857 xip->i_d.di_size = *offset;
858 i_size_write(inode, *offset);
859 xip->i_update_core = 1;
860 xip->i_update_size = 1;
861 }
862 xfs_iunlock(xip, XFS_ILOCK_EXCL);
863 }
864
865 error = -ret;
866 if (ret <= 0)
867 goto out_unlock_internal;
868
869 XFS_STATS_ADD(xs_write_bytes, ret);
870
871 /* Handle various SYNC-type writes */
872 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
873 /*
874 * If we're treating this as O_DSYNC and we have not updated the
875 * size, force the log.
876 */
877 if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
878 !(xip->i_update_size)) {
879 xfs_inode_log_item_t *iip = xip->i_itemp;
880
881 /*
882 * If an allocation transaction occurred
883 * without extending the size, then we have to force
884 * the log up the proper point to ensure that the
885 * allocation is permanent. We can't count on
886 * the fact that buffered writes lock out direct I/O
887 * writes - the direct I/O write could have extended
888 * the size nontransactionally, then finished before
889 * we started. xfs_write_file will think that the file
890 * didn't grow but the update isn't safe unless the
891 * size change is logged.
892 *
893 * Force the log if we've committed a transaction
894 * against the inode or if someone else has and
895 * the commit record hasn't gone to disk (e.g.
896 * the inode is pinned). This guarantees that
897 * all changes affecting the inode are permanent
898 * when we return.
899 */
900 if (iip && iip->ili_last_lsn) {
901 xfs_log_force(mp, iip->ili_last_lsn,
902 XFS_LOG_FORCE | XFS_LOG_SYNC);
903 } else if (xfs_ipincount(xip) > 0) {
904 xfs_log_force(mp, (xfs_lsn_t)0,
905 XFS_LOG_FORCE | XFS_LOG_SYNC);
906 }
907
908 } else {
909 xfs_trans_t *tp;
910
911 /*
912 * O_SYNC or O_DSYNC _with_ a size update are handled
913 * the same way.
914 *
915 * If the write was synchronous then we need to make
916 * sure that the inode modification time is permanent.
917 * We'll have updated the timestamp above, so here
918 * we use a synchronous transaction to log the inode.
919 * It's not fast, but it's necessary.
920 *
921 * If this a dsync write and the size got changed
922 * non-transactionally, then we need to ensure that
923 * the size change gets logged in a synchronous
924 * transaction.
925 */
926
927 tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
928 if ((error = xfs_trans_reserve(tp, 0,
929 XFS_SWRITE_LOG_RES(mp),
930 0, 0, 0))) {
931 /* Transaction reserve failed */
932 xfs_trans_cancel(tp, 0);
933 } else {
934 /* Transaction reserve successful */
935 xfs_ilock(xip, XFS_ILOCK_EXCL);
936 xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
937 xfs_trans_ihold(tp, xip);
938 xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
939 xfs_trans_set_sync(tp);
940 error = xfs_trans_commit(tp, 0, NULL);
941 xfs_iunlock(xip, XFS_ILOCK_EXCL);
942 }
943 if (error)
944 goto out_unlock_internal;
945 }
946
947 xfs_rwunlock(bdp, locktype);
948 if (need_isem)
949 up(&inode->i_sem);
950
951 error = sync_page_range(inode, mapping, pos, ret);
952 if (!error)
953 error = ret;
954 return error;
955 }
956
957 out_unlock_internal:
958 xfs_rwunlock(bdp, locktype);
959 out_unlock_isem:
960 if (need_isem)
961 up(&inode->i_sem);
Dean Roehriche1a40fa2005-06-22 10:20:44 +1000962 out_nounlocks:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 return -error;
964}
965
966/*
967 * All xfs metadata buffers except log state machine buffers
968 * get this attached as their b_bdstrat callback function.
969 * This is so that we can catch a buffer
970 * after prematurely unpinning it to forcibly shutdown the filesystem.
971 */
972int
973xfs_bdstrat_cb(struct xfs_buf *bp)
974{
975 xfs_mount_t *mp;
976
977 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
978 if (!XFS_FORCED_SHUTDOWN(mp)) {
979 pagebuf_iorequest(bp);
980 return 0;
981 } else {
982 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
983 /*
984 * Metadata write that didn't get logged but
985 * written delayed anyway. These aren't associated
986 * with a transaction, and can be ignored.
987 */
988 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
989 (XFS_BUF_ISREAD(bp)) == 0)
990 return (xfs_bioerror_relse(bp));
991 else
992 return (xfs_bioerror(bp));
993 }
994}
995
996
997int
998xfs_bmap(bhv_desc_t *bdp,
999 xfs_off_t offset,
1000 ssize_t count,
1001 int flags,
1002 xfs_iomap_t *iomapp,
1003 int *niomaps)
1004{
1005 xfs_inode_t *ip = XFS_BHVTOI(bdp);
1006 xfs_iocore_t *io = &ip->i_iocore;
1007
1008 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
1009 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
1010 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
1011
1012 return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
1013}
1014
1015/*
1016 * Wrapper around bdstrat so that we can stop data
1017 * from going to disk in case we are shutting down the filesystem.
1018 * Typically user data goes thru this path; one of the exceptions
1019 * is the superblock.
1020 */
1021int
1022xfsbdstrat(
1023 struct xfs_mount *mp,
1024 struct xfs_buf *bp)
1025{
1026 ASSERT(mp);
1027 if (!XFS_FORCED_SHUTDOWN(mp)) {
1028 /* Grio redirection would go here
1029 * if (XFS_BUF_IS_GRIO(bp)) {
1030 */
1031
1032 pagebuf_iorequest(bp);
1033 return 0;
1034 }
1035
1036 xfs_buftrace("XFSBDSTRAT IOERROR", bp);
1037 return (xfs_bioerror_relse(bp));
1038}
1039
1040/*
1041 * If the underlying (data/log/rt) device is readonly, there are some
1042 * operations that cannot proceed.
1043 */
1044int
1045xfs_dev_is_read_only(
1046 xfs_mount_t *mp,
1047 char *message)
1048{
1049 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1050 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1051 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1052 cmn_err(CE_NOTE,
1053 "XFS: %s required on read-only device.", message);
1054 cmn_err(CE_NOTE,
1055 "XFS: write access unavailable, cannot proceed.");
1056 return EROFS;
1057 }
1058 return 0;
1059}