blob: b5232d094418b8136f94e9d755e3ced1a9f60652 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_format.h"
22#include "xfs_bit.h"
23#include "xfs_log.h"
24#include "xfs_inum.h"
25#include "xfs_trans.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h"
29#include "xfs_da_btree.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
35#include "xfs_btree.h"
36#include "xfs_extfree_item.h"
37#include "xfs_alloc.h"
38#include "xfs_bmap.h"
39#include "xfs_bmap_util.h"
40#include "xfs_rtalloc.h"
41#include "xfs_error.h"
42#include "xfs_quota.h"
43#include "xfs_trans_space.h"
44#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100045#include "xfs_icache.h"
Dave Chinner68988112013-08-12 20:49:42 +100046
47/* Kernel only BMAP related definitions and functions */
48
49/*
50 * Convert the given file system block to a disk block. We have to treat it
51 * differently based on whether the file is a real time file or not, because the
52 * bmap code does.
53 */
54xfs_daddr_t
55xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
56{
57 return (XFS_IS_REALTIME_INODE(ip) ? \
58 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
59 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
60}
61
62/*
63 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
64 * caller. Frees all the extents that need freeing, which must be done
65 * last due to locking considerations. We never free any extents in
66 * the first transaction.
67 *
68 * Return 1 if the given transaction was committed and a new one
69 * started, and 0 otherwise in the committed parameter.
70 */
71int /* error */
72xfs_bmap_finish(
73 xfs_trans_t **tp, /* transaction pointer addr */
74 xfs_bmap_free_t *flist, /* i/o: list extents to free */
75 int *committed) /* xact committed or not */
76{
77 xfs_efd_log_item_t *efd; /* extent free data */
78 xfs_efi_log_item_t *efi; /* extent free intention */
79 int error; /* error return value */
80 xfs_bmap_free_item_t *free; /* free extent item */
81 unsigned int logres; /* new log reservation */
82 unsigned int logcount; /* new log count */
83 xfs_mount_t *mp; /* filesystem mount structure */
84 xfs_bmap_free_item_t *next; /* next item on free list */
85 xfs_trans_t *ntp; /* new transaction pointer */
86
87 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
88 if (flist->xbf_count == 0) {
89 *committed = 0;
90 return 0;
91 }
92 ntp = *tp;
93 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
94 for (free = flist->xbf_first; free; free = free->xbfi_next)
95 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
96 free->xbfi_blockcount);
97 logres = ntp->t_log_res;
98 logcount = ntp->t_log_count;
99 ntp = xfs_trans_dup(*tp);
100 error = xfs_trans_commit(*tp, 0);
101 *tp = ntp;
102 *committed = 1;
103 /*
104 * We have a new transaction, so we should return committed=1,
105 * even though we're returning an error.
106 */
107 if (error)
108 return error;
109
110 /*
111 * transaction commit worked ok so we can drop the extra ticket
112 * reference that we gained in xfs_trans_dup()
113 */
114 xfs_log_ticket_put(ntp->t_ticket);
115
116 if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
117 logcount)))
118 return error;
119 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
120 for (free = flist->xbf_first; free != NULL; free = next) {
121 next = free->xbfi_next;
122 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
123 free->xbfi_blockcount))) {
124 /*
125 * The bmap free list will be cleaned up at a
126 * higher level. The EFI will be canceled when
127 * this transaction is aborted.
128 * Need to force shutdown here to make sure it
129 * happens, since this transaction may not be
130 * dirty yet.
131 */
132 mp = ntp->t_mountp;
133 if (!XFS_FORCED_SHUTDOWN(mp))
134 xfs_force_shutdown(mp,
135 (error == EFSCORRUPTED) ?
136 SHUTDOWN_CORRUPT_INCORE :
137 SHUTDOWN_META_IO_ERROR);
138 return error;
139 }
140 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
141 free->xbfi_blockcount);
142 xfs_bmap_del_free(flist, NULL, free);
143 }
144 return 0;
145}
146
147int
148xfs_bmap_rtalloc(
149 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
150{
151 xfs_alloctype_t atype = 0; /* type for allocation routines */
152 int error; /* error return value */
153 xfs_mount_t *mp; /* mount point structure */
154 xfs_extlen_t prod = 0; /* product factor for allocators */
155 xfs_extlen_t ralen = 0; /* realtime allocation length */
156 xfs_extlen_t align; /* minimum allocation alignment */
157 xfs_rtblock_t rtb;
158
159 mp = ap->ip->i_mount;
160 align = xfs_get_extsz_hint(ap->ip);
161 prod = align / mp->m_sb.sb_rextsize;
162 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
163 align, 1, ap->eof, 0,
164 ap->conv, &ap->offset, &ap->length);
165 if (error)
166 return error;
167 ASSERT(ap->length);
168 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
169
170 /*
171 * If the offset & length are not perfectly aligned
172 * then kill prod, it will just get us in trouble.
173 */
174 if (do_mod(ap->offset, align) || ap->length % align)
175 prod = 1;
176 /*
177 * Set ralen to be the actual requested length in rtextents.
178 */
179 ralen = ap->length / mp->m_sb.sb_rextsize;
180 /*
181 * If the old value was close enough to MAXEXTLEN that
182 * we rounded up to it, cut it back so it's valid again.
183 * Note that if it's a really large request (bigger than
184 * MAXEXTLEN), we don't hear about that number, and can't
185 * adjust the starting point to match it.
186 */
187 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
188 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
189
190 /*
191 * Lock out other modifications to the RT bitmap inode.
192 */
193 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
194 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
195
196 /*
197 * If it's an allocation to an empty file at offset 0,
198 * pick an extent that will space things out in the rt area.
199 */
200 if (ap->eof && ap->offset == 0) {
201 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
202
203 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
204 if (error)
205 return error;
206 ap->blkno = rtx * mp->m_sb.sb_rextsize;
207 } else {
208 ap->blkno = 0;
209 }
210
211 xfs_bmap_adjacent(ap);
212
213 /*
214 * Realtime allocation, done through xfs_rtallocate_extent.
215 */
216 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
217 do_div(ap->blkno, mp->m_sb.sb_rextsize);
218 rtb = ap->blkno;
219 ap->length = ralen;
220 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
221 &ralen, atype, ap->wasdel, prod, &rtb)))
222 return error;
223 if (rtb == NULLFSBLOCK && prod > 1 &&
224 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
225 ap->length, &ralen, atype,
226 ap->wasdel, 1, &rtb)))
227 return error;
228 ap->blkno = rtb;
229 if (ap->blkno != NULLFSBLOCK) {
230 ap->blkno *= mp->m_sb.sb_rextsize;
231 ralen *= mp->m_sb.sb_rextsize;
232 ap->length = ralen;
233 ap->ip->i_d.di_nblocks += ralen;
234 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
235 if (ap->wasdel)
236 ap->ip->i_delayed_blks -= ralen;
237 /*
238 * Adjust the disk quota also. This was reserved
239 * earlier.
240 */
241 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
242 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
243 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
244 } else {
245 ap->length = 0;
246 }
247 return 0;
248}
249
250/*
251 * Stack switching interfaces for allocation
252 */
253static void
254xfs_bmapi_allocate_worker(
255 struct work_struct *work)
256{
257 struct xfs_bmalloca *args = container_of(work,
258 struct xfs_bmalloca, work);
259 unsigned long pflags;
260
261 /* we are in a transaction context here */
262 current_set_flags_nested(&pflags, PF_FSTRANS);
263
264 args->result = __xfs_bmapi_allocate(args);
265 complete(args->done);
266
267 current_restore_flags_nested(&pflags, PF_FSTRANS);
268}
269
270/*
271 * Some allocation requests often come in with little stack to work on. Push
272 * them off to a worker thread so there is lots of stack to use. Otherwise just
273 * call directly to avoid the context switch overhead here.
274 */
275int
276xfs_bmapi_allocate(
277 struct xfs_bmalloca *args)
278{
279 DECLARE_COMPLETION_ONSTACK(done);
280
281 if (!args->stack_switch)
282 return __xfs_bmapi_allocate(args);
283
284
285 args->done = &done;
286 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
287 queue_work(xfs_alloc_wq, &args->work);
288 wait_for_completion(&done);
289 return args->result;
290}
291
292/*
293 * Check if the endoff is outside the last extent. If so the caller will grow
294 * the allocation to a stripe unit boundary. All offsets are considered outside
295 * the end of file for an empty fork, so 1 is returned in *eof in that case.
296 */
297int
298xfs_bmap_eof(
299 struct xfs_inode *ip,
300 xfs_fileoff_t endoff,
301 int whichfork,
302 int *eof)
303{
304 struct xfs_bmbt_irec rec;
305 int error;
306
307 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
308 if (error || *eof)
309 return error;
310
311 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
312 return 0;
313}
314
315/*
316 * Extent tree block counting routines.
317 */
318
319/*
320 * Count leaf blocks given a range of extent records.
321 */
322STATIC void
323xfs_bmap_count_leaves(
324 xfs_ifork_t *ifp,
325 xfs_extnum_t idx,
326 int numrecs,
327 int *count)
328{
329 int b;
330
331 for (b = 0; b < numrecs; b++) {
332 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
333 *count += xfs_bmbt_get_blockcount(frp);
334 }
335}
336
337/*
338 * Count leaf blocks given a range of extent records originally
339 * in btree format.
340 */
341STATIC void
342xfs_bmap_disk_count_leaves(
343 struct xfs_mount *mp,
344 struct xfs_btree_block *block,
345 int numrecs,
346 int *count)
347{
348 int b;
349 xfs_bmbt_rec_t *frp;
350
351 for (b = 1; b <= numrecs; b++) {
352 frp = XFS_BMBT_REC_ADDR(mp, block, b);
353 *count += xfs_bmbt_disk_get_blockcount(frp);
354 }
355}
356
357/*
358 * Recursively walks each level of a btree
359 * to count total fsblocks is use.
360 */
361STATIC int /* error */
362xfs_bmap_count_tree(
363 xfs_mount_t *mp, /* file system mount point */
364 xfs_trans_t *tp, /* transaction pointer */
365 xfs_ifork_t *ifp, /* inode fork pointer */
366 xfs_fsblock_t blockno, /* file system block number */
367 int levelin, /* level in btree */
368 int *count) /* Count of blocks */
369{
370 int error;
371 xfs_buf_t *bp, *nbp;
372 int level = levelin;
373 __be64 *pp;
374 xfs_fsblock_t bno = blockno;
375 xfs_fsblock_t nextbno;
376 struct xfs_btree_block *block, *nextblock;
377 int numrecs;
378
379 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
380 &xfs_bmbt_buf_ops);
381 if (error)
382 return error;
383 *count += 1;
384 block = XFS_BUF_TO_BLOCK(bp);
385
386 if (--level) {
387 /* Not at node above leaves, count this level of nodes */
388 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
389 while (nextbno != NULLFSBLOCK) {
390 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
391 XFS_BMAP_BTREE_REF,
392 &xfs_bmbt_buf_ops);
393 if (error)
394 return error;
395 *count += 1;
396 nextblock = XFS_BUF_TO_BLOCK(nbp);
397 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
398 xfs_trans_brelse(tp, nbp);
399 }
400
401 /* Dive to the next level */
402 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
403 bno = be64_to_cpu(*pp);
404 if (unlikely((error =
405 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
406 xfs_trans_brelse(tp, bp);
407 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
408 XFS_ERRLEVEL_LOW, mp);
409 return XFS_ERROR(EFSCORRUPTED);
410 }
411 xfs_trans_brelse(tp, bp);
412 } else {
413 /* count all level 1 nodes and their leaves */
414 for (;;) {
415 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
416 numrecs = be16_to_cpu(block->bb_numrecs);
417 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
418 xfs_trans_brelse(tp, bp);
419 if (nextbno == NULLFSBLOCK)
420 break;
421 bno = nextbno;
422 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
423 XFS_BMAP_BTREE_REF,
424 &xfs_bmbt_buf_ops);
425 if (error)
426 return error;
427 *count += 1;
428 block = XFS_BUF_TO_BLOCK(bp);
429 }
430 }
431 return 0;
432}
433
434/*
435 * Count fsblocks of the given fork.
436 */
437int /* error */
438xfs_bmap_count_blocks(
439 xfs_trans_t *tp, /* transaction pointer */
440 xfs_inode_t *ip, /* incore inode */
441 int whichfork, /* data or attr fork */
442 int *count) /* out: count of blocks */
443{
444 struct xfs_btree_block *block; /* current btree block */
445 xfs_fsblock_t bno; /* block # of "block" */
446 xfs_ifork_t *ifp; /* fork structure */
447 int level; /* btree level, for checking */
448 xfs_mount_t *mp; /* file system mount structure */
449 __be64 *pp; /* pointer to block address */
450
451 bno = NULLFSBLOCK;
452 mp = ip->i_mount;
453 ifp = XFS_IFORK_PTR(ip, whichfork);
454 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
455 xfs_bmap_count_leaves(ifp, 0,
456 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
457 count);
458 return 0;
459 }
460
461 /*
462 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
463 */
464 block = ifp->if_broot;
465 level = be16_to_cpu(block->bb_level);
466 ASSERT(level > 0);
467 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
468 bno = be64_to_cpu(*pp);
469 ASSERT(bno != NULLDFSBNO);
470 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
471 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
472
473 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
474 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
475 mp);
476 return XFS_ERROR(EFSCORRUPTED);
477 }
478
479 return 0;
480}
481
482/*
483 * returns 1 for success, 0 if we failed to map the extent.
484 */
485STATIC int
486xfs_getbmapx_fix_eof_hole(
487 xfs_inode_t *ip, /* xfs incore inode pointer */
488 struct getbmapx *out, /* output structure */
489 int prealloced, /* this is a file with
490 * preallocated data space */
491 __int64_t end, /* last block requested */
492 xfs_fsblock_t startblock)
493{
494 __int64_t fixlen;
495 xfs_mount_t *mp; /* file system mount point */
496 xfs_ifork_t *ifp; /* inode fork pointer */
497 xfs_extnum_t lastx; /* last extent pointer */
498 xfs_fileoff_t fileblock;
499
500 if (startblock == HOLESTARTBLOCK) {
501 mp = ip->i_mount;
502 out->bmv_block = -1;
503 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
504 fixlen -= out->bmv_offset;
505 if (prealloced && out->bmv_offset + out->bmv_length == end) {
506 /* Came to hole at EOF. Trim it. */
507 if (fixlen <= 0)
508 return 0;
509 out->bmv_length = fixlen;
510 }
511 } else {
512 if (startblock == DELAYSTARTBLOCK)
513 out->bmv_block = -2;
514 else
515 out->bmv_block = xfs_fsb_to_db(ip, startblock);
516 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
517 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
518 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
519 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
520 out->bmv_oflags |= BMV_OF_LAST;
521 }
522
523 return 1;
524}
525
526/*
527 * Get inode's extents as described in bmv, and format for output.
528 * Calls formatter to fill the user's buffer until all extents
529 * are mapped, until the passed-in bmv->bmv_count slots have
530 * been filled, or until the formatter short-circuits the loop,
531 * if it is tracking filled-in extents on its own.
532 */
533int /* error code */
534xfs_getbmap(
535 xfs_inode_t *ip,
536 struct getbmapx *bmv, /* user bmap structure */
537 xfs_bmap_format_t formatter, /* format to user */
538 void *arg) /* formatter arg */
539{
540 __int64_t bmvend; /* last block requested */
541 int error = 0; /* return value */
542 __int64_t fixlen; /* length for -1 case */
543 int i; /* extent number */
544 int lock; /* lock state */
545 xfs_bmbt_irec_t *map; /* buffer for user's data */
546 xfs_mount_t *mp; /* file system mount point */
547 int nex; /* # of user extents can do */
548 int nexleft; /* # of user extents left */
549 int subnex; /* # of bmapi's can do */
550 int nmap; /* number of map entries */
551 struct getbmapx *out; /* output structure */
552 int whichfork; /* data or attr fork */
553 int prealloced; /* this is a file with
554 * preallocated data space */
555 int iflags; /* interface flags */
556 int bmapi_flags; /* flags for xfs_bmapi */
557 int cur_ext = 0;
558
559 mp = ip->i_mount;
560 iflags = bmv->bmv_iflags;
561 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
562
563 if (whichfork == XFS_ATTR_FORK) {
564 if (XFS_IFORK_Q(ip)) {
565 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
566 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
567 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
568 return XFS_ERROR(EINVAL);
569 } else if (unlikely(
570 ip->i_d.di_aformat != 0 &&
571 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
572 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
573 ip->i_mount);
574 return XFS_ERROR(EFSCORRUPTED);
575 }
576
577 prealloced = 0;
578 fixlen = 1LL << 32;
579 } else {
580 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
581 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
582 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
583 return XFS_ERROR(EINVAL);
584
585 if (xfs_get_extsz_hint(ip) ||
586 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
587 prealloced = 1;
588 fixlen = mp->m_super->s_maxbytes;
589 } else {
590 prealloced = 0;
591 fixlen = XFS_ISIZE(ip);
592 }
593 }
594
595 if (bmv->bmv_length == -1) {
596 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
597 bmv->bmv_length =
598 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
599 } else if (bmv->bmv_length == 0) {
600 bmv->bmv_entries = 0;
601 return 0;
602 } else if (bmv->bmv_length < 0) {
603 return XFS_ERROR(EINVAL);
604 }
605
606 nex = bmv->bmv_count - 1;
607 if (nex <= 0)
608 return XFS_ERROR(EINVAL);
609 bmvend = bmv->bmv_offset + bmv->bmv_length;
610
611
612 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
613 return XFS_ERROR(ENOMEM);
614 out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
615 if (!out) {
616 out = kmem_zalloc_large(bmv->bmv_count *
617 sizeof(struct getbmapx));
618 if (!out)
619 return XFS_ERROR(ENOMEM);
620 }
621
622 xfs_ilock(ip, XFS_IOLOCK_SHARED);
623 if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
624 if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
625 error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
626 if (error)
627 goto out_unlock_iolock;
628 }
629 /*
630 * even after flushing the inode, there can still be delalloc
631 * blocks on the inode beyond EOF due to speculative
632 * preallocation. These are not removed until the release
633 * function is called or the inode is inactivated. Hence we
634 * cannot assert here that ip->i_delayed_blks == 0.
635 */
636 }
637
638 lock = xfs_ilock_map_shared(ip);
639
640 /*
641 * Don't let nex be bigger than the number of extents
642 * we can have assuming alternating holes and real extents.
643 */
644 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
645 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
646
647 bmapi_flags = xfs_bmapi_aflag(whichfork);
648 if (!(iflags & BMV_IF_PREALLOC))
649 bmapi_flags |= XFS_BMAPI_IGSTATE;
650
651 /*
652 * Allocate enough space to handle "subnex" maps at a time.
653 */
654 error = ENOMEM;
655 subnex = 16;
656 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
657 if (!map)
658 goto out_unlock_ilock;
659
660 bmv->bmv_entries = 0;
661
662 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
663 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
664 error = 0;
665 goto out_free_map;
666 }
667
668 nexleft = nex;
669
670 do {
671 nmap = (nexleft > subnex) ? subnex : nexleft;
672 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
673 XFS_BB_TO_FSB(mp, bmv->bmv_length),
674 map, &nmap, bmapi_flags);
675 if (error)
676 goto out_free_map;
677 ASSERT(nmap <= subnex);
678
679 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
680 out[cur_ext].bmv_oflags = 0;
681 if (map[i].br_state == XFS_EXT_UNWRITTEN)
682 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
683 else if (map[i].br_startblock == DELAYSTARTBLOCK)
684 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
685 out[cur_ext].bmv_offset =
686 XFS_FSB_TO_BB(mp, map[i].br_startoff);
687 out[cur_ext].bmv_length =
688 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
689 out[cur_ext].bmv_unused1 = 0;
690 out[cur_ext].bmv_unused2 = 0;
691
692 /*
693 * delayed allocation extents that start beyond EOF can
694 * occur due to speculative EOF allocation when the
695 * delalloc extent is larger than the largest freespace
696 * extent at conversion time. These extents cannot be
697 * converted by data writeback, so can exist here even
698 * if we are not supposed to be finding delalloc
699 * extents.
700 */
701 if (map[i].br_startblock == DELAYSTARTBLOCK &&
702 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
703 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
704
705 if (map[i].br_startblock == HOLESTARTBLOCK &&
706 whichfork == XFS_ATTR_FORK) {
707 /* came to the end of attribute fork */
708 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
709 goto out_free_map;
710 }
711
712 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
713 prealloced, bmvend,
714 map[i].br_startblock))
715 goto out_free_map;
716
717 bmv->bmv_offset =
718 out[cur_ext].bmv_offset +
719 out[cur_ext].bmv_length;
720 bmv->bmv_length =
721 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
722
723 /*
724 * In case we don't want to return the hole,
725 * don't increase cur_ext so that we can reuse
726 * it in the next loop.
727 */
728 if ((iflags & BMV_IF_NO_HOLES) &&
729 map[i].br_startblock == HOLESTARTBLOCK) {
730 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
731 continue;
732 }
733
734 nexleft--;
735 bmv->bmv_entries++;
736 cur_ext++;
737 }
738 } while (nmap && nexleft && bmv->bmv_length);
739
740 out_free_map:
741 kmem_free(map);
742 out_unlock_ilock:
743 xfs_iunlock_map_shared(ip, lock);
744 out_unlock_iolock:
745 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
746
747 for (i = 0; i < cur_ext; i++) {
748 int full = 0; /* user array is full */
749
750 /* format results & advance arg */
751 error = formatter(&arg, &out[i], &full);
752 if (error || full)
753 break;
754 }
755
756 if (is_vmalloc_addr(out))
757 kmem_free_large(out);
758 else
759 kmem_free(out);
760 return error;
761}
762
763/*
764 * dead simple method of punching delalyed allocation blocks from a range in
765 * the inode. Walks a block at a time so will be slow, but is only executed in
766 * rare error cases so the overhead is not critical. This will alays punch out
767 * both the start and end blocks, even if the ranges only partially overlap
768 * them, so it is up to the caller to ensure that partial blocks are not
769 * passed in.
770 */
771int
772xfs_bmap_punch_delalloc_range(
773 struct xfs_inode *ip,
774 xfs_fileoff_t start_fsb,
775 xfs_fileoff_t length)
776{
777 xfs_fileoff_t remaining = length;
778 int error = 0;
779
780 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
781
782 do {
783 int done;
784 xfs_bmbt_irec_t imap;
785 int nimaps = 1;
786 xfs_fsblock_t firstblock;
787 xfs_bmap_free_t flist;
788
789 /*
790 * Map the range first and check that it is a delalloc extent
791 * before trying to unmap the range. Otherwise we will be
792 * trying to remove a real extent (which requires a
793 * transaction) or a hole, which is probably a bad idea...
794 */
795 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
796 XFS_BMAPI_ENTIRE);
797
798 if (error) {
799 /* something screwed, just bail */
800 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
801 xfs_alert(ip->i_mount,
802 "Failed delalloc mapping lookup ino %lld fsb %lld.",
803 ip->i_ino, start_fsb);
804 }
805 break;
806 }
807 if (!nimaps) {
808 /* nothing there */
809 goto next_block;
810 }
811 if (imap.br_startblock != DELAYSTARTBLOCK) {
812 /* been converted, ignore */
813 goto next_block;
814 }
815 WARN_ON(imap.br_blockcount == 0);
816
817 /*
818 * Note: while we initialise the firstblock/flist pair, they
819 * should never be used because blocks should never be
820 * allocated or freed for a delalloc extent and hence we need
821 * don't cancel or finish them after the xfs_bunmapi() call.
822 */
823 xfs_bmap_init(&flist, &firstblock);
824 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
825 &flist, &done);
826 if (error)
827 break;
828
829 ASSERT(!flist.xbf_count && !flist.xbf_first);
830next_block:
831 start_fsb++;
832 remaining--;
833 } while(remaining > 0);
834
835 return error;
836}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000837
838/*
839 * Test whether it is appropriate to check an inode for and free post EOF
840 * blocks. The 'force' parameter determines whether we should also consider
841 * regular files that are marked preallocated or append-only.
842 */
843bool
844xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
845{
846 /* prealloc/delalloc exists only on regular files */
847 if (!S_ISREG(ip->i_d.di_mode))
848 return false;
849
850 /*
851 * Zero sized files with no cached pages and delalloc blocks will not
852 * have speculative prealloc/delalloc blocks to remove.
853 */
854 if (VFS_I(ip)->i_size == 0 &&
855 VN_CACHED(VFS_I(ip)) == 0 &&
856 ip->i_delayed_blks == 0)
857 return false;
858
859 /* If we haven't read in the extent list, then don't do it now. */
860 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
861 return false;
862
863 /*
864 * Do not free real preallocated or append-only files unless the file
865 * has delalloc blocks and we are forced to remove them.
866 */
867 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
868 if (!force || ip->i_delayed_blks == 0)
869 return false;
870
871 return true;
872}
873
874/*
875 * This is called by xfs_inactive to free any blocks beyond eof
876 * when the link count isn't zero and by xfs_dm_punch_hole() when
877 * punching a hole to EOF.
878 */
879int
880xfs_free_eofblocks(
881 xfs_mount_t *mp,
882 xfs_inode_t *ip,
883 bool need_iolock)
884{
885 xfs_trans_t *tp;
886 int error;
887 xfs_fileoff_t end_fsb;
888 xfs_fileoff_t last_fsb;
889 xfs_filblks_t map_len;
890 int nimaps;
891 xfs_bmbt_irec_t imap;
892
893 /*
894 * Figure out if there are any blocks beyond the end
895 * of the file. If not, then there is nothing to do.
896 */
897 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
898 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
899 if (last_fsb <= end_fsb)
900 return 0;
901 map_len = last_fsb - end_fsb;
902
903 nimaps = 1;
904 xfs_ilock(ip, XFS_ILOCK_SHARED);
905 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
906 xfs_iunlock(ip, XFS_ILOCK_SHARED);
907
908 if (!error && (nimaps != 0) &&
909 (imap.br_startblock != HOLESTARTBLOCK ||
910 ip->i_delayed_blks)) {
911 /*
912 * Attach the dquots to the inode up front.
913 */
914 error = xfs_qm_dqattach(ip, 0);
915 if (error)
916 return error;
917
918 /*
919 * There are blocks after the end of file.
920 * Free them up now by truncating the file to
921 * its current size.
922 */
923 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
924
925 if (need_iolock) {
926 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
927 xfs_trans_cancel(tp, 0);
928 return EAGAIN;
929 }
930 }
931
932 error = xfs_trans_reserve(tp, 0,
933 XFS_ITRUNCATE_LOG_RES(mp),
934 0, XFS_TRANS_PERM_LOG_RES,
935 XFS_ITRUNCATE_LOG_COUNT);
936 if (error) {
937 ASSERT(XFS_FORCED_SHUTDOWN(mp));
938 xfs_trans_cancel(tp, 0);
939 if (need_iolock)
940 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
941 return error;
942 }
943
944 xfs_ilock(ip, XFS_ILOCK_EXCL);
945 xfs_trans_ijoin(tp, ip, 0);
946
947 /*
948 * Do not update the on-disk file size. If we update the
949 * on-disk file size and then the system crashes before the
950 * contents of the file are flushed to disk then the files
951 * may be full of holes (ie NULL files bug).
952 */
953 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
954 XFS_ISIZE(ip));
955 if (error) {
956 /*
957 * If we get an error at this point we simply don't
958 * bother truncating the file.
959 */
960 xfs_trans_cancel(tp,
961 (XFS_TRANS_RELEASE_LOG_RES |
962 XFS_TRANS_ABORT));
963 } else {
964 error = xfs_trans_commit(tp,
965 XFS_TRANS_RELEASE_LOG_RES);
966 if (!error)
967 xfs_inode_clear_eofblocks_tag(ip);
968 }
969
970 xfs_iunlock(ip, XFS_ILOCK_EXCL);
971 if (need_iolock)
972 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
973 }
974 return error;
975}
976
977/*
978 * xfs_alloc_file_space()
979 * This routine allocates disk space for the given file.
980 *
981 * If alloc_type == 0, this request is for an ALLOCSP type
982 * request which will change the file size. In this case, no
983 * DMAPI event will be generated by the call. A TRUNCATE event
984 * will be generated later by xfs_setattr.
985 *
986 * If alloc_type != 0, this request is for a RESVSP type
987 * request, and a DMAPI DM_EVENT_WRITE will be generated if the
988 * lower block boundary byte address is less than the file's
989 * length.
990 *
991 * RETURNS:
992 * 0 on success
993 * errno on error
994 *
995 */
996STATIC int
997xfs_alloc_file_space(
998 xfs_inode_t *ip,
999 xfs_off_t offset,
1000 xfs_off_t len,
1001 int alloc_type,
1002 int attr_flags)
1003{
1004 xfs_mount_t *mp = ip->i_mount;
1005 xfs_off_t count;
1006 xfs_filblks_t allocated_fsb;
1007 xfs_filblks_t allocatesize_fsb;
1008 xfs_extlen_t extsz, temp;
1009 xfs_fileoff_t startoffset_fsb;
1010 xfs_fsblock_t firstfsb;
1011 int nimaps;
1012 int quota_flag;
1013 int rt;
1014 xfs_trans_t *tp;
1015 xfs_bmbt_irec_t imaps[1], *imapp;
1016 xfs_bmap_free_t free_list;
1017 uint qblocks, resblks, resrtextents;
1018 int committed;
1019 int error;
1020
1021 trace_xfs_alloc_file_space(ip);
1022
1023 if (XFS_FORCED_SHUTDOWN(mp))
1024 return XFS_ERROR(EIO);
1025
1026 error = xfs_qm_dqattach(ip, 0);
1027 if (error)
1028 return error;
1029
1030 if (len <= 0)
1031 return XFS_ERROR(EINVAL);
1032
1033 rt = XFS_IS_REALTIME_INODE(ip);
1034 extsz = xfs_get_extsz_hint(ip);
1035
1036 count = len;
1037 imapp = &imaps[0];
1038 nimaps = 1;
1039 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1040 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1041
1042 /*
1043 * Allocate file space until done or until there is an error
1044 */
1045 while (allocatesize_fsb && !error) {
1046 xfs_fileoff_t s, e;
1047
1048 /*
1049 * Determine space reservations for data/realtime.
1050 */
1051 if (unlikely(extsz)) {
1052 s = startoffset_fsb;
1053 do_div(s, extsz);
1054 s *= extsz;
1055 e = startoffset_fsb + allocatesize_fsb;
1056 if ((temp = do_mod(startoffset_fsb, extsz)))
1057 e += temp;
1058 if ((temp = do_mod(e, extsz)))
1059 e += extsz - temp;
1060 } else {
1061 s = 0;
1062 e = allocatesize_fsb;
1063 }
1064
1065 /*
1066 * The transaction reservation is limited to a 32-bit block
1067 * count, hence we need to limit the number of blocks we are
1068 * trying to reserve to avoid an overflow. We can't allocate
1069 * more than @nimaps extents, and an extent is limited on disk
1070 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1071 */
1072 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1073 if (unlikely(rt)) {
1074 resrtextents = qblocks = resblks;
1075 resrtextents /= mp->m_sb.sb_rextsize;
1076 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1077 quota_flag = XFS_QMOPT_RES_RTBLKS;
1078 } else {
1079 resrtextents = 0;
1080 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1081 quota_flag = XFS_QMOPT_RES_REGBLKS;
1082 }
1083
1084 /*
1085 * Allocate and setup the transaction.
1086 */
1087 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1088 error = xfs_trans_reserve(tp, resblks,
1089 XFS_WRITE_LOG_RES(mp), resrtextents,
1090 XFS_TRANS_PERM_LOG_RES,
1091 XFS_WRITE_LOG_COUNT);
1092 /*
1093 * Check for running out of space
1094 */
1095 if (error) {
1096 /*
1097 * Free the transaction structure.
1098 */
1099 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1100 xfs_trans_cancel(tp, 0);
1101 break;
1102 }
1103 xfs_ilock(ip, XFS_ILOCK_EXCL);
1104 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1105 0, quota_flag);
1106 if (error)
1107 goto error1;
1108
1109 xfs_trans_ijoin(tp, ip, 0);
1110
1111 xfs_bmap_init(&free_list, &firstfsb);
1112 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1113 allocatesize_fsb, alloc_type, &firstfsb,
1114 0, imapp, &nimaps, &free_list);
1115 if (error) {
1116 goto error0;
1117 }
1118
1119 /*
1120 * Complete the transaction
1121 */
1122 error = xfs_bmap_finish(&tp, &free_list, &committed);
1123 if (error) {
1124 goto error0;
1125 }
1126
1127 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1128 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1129 if (error) {
1130 break;
1131 }
1132
1133 allocated_fsb = imapp->br_blockcount;
1134
1135 if (nimaps == 0) {
1136 error = XFS_ERROR(ENOSPC);
1137 break;
1138 }
1139
1140 startoffset_fsb += allocated_fsb;
1141 allocatesize_fsb -= allocated_fsb;
1142 }
1143
1144 return error;
1145
1146error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1147 xfs_bmap_cancel(&free_list);
1148 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1149
1150error1: /* Just cancel transaction */
1151 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1152 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1153 return error;
1154}
1155
1156/*
1157 * Zero file bytes between startoff and endoff inclusive.
1158 * The iolock is held exclusive and no blocks are buffered.
1159 *
1160 * This function is used by xfs_free_file_space() to zero
1161 * partial blocks when the range to free is not block aligned.
1162 * When unreserving space with boundaries that are not block
1163 * aligned we round up the start and round down the end
1164 * boundaries and then use this function to zero the parts of
1165 * the blocks that got dropped during the rounding.
1166 */
1167STATIC int
1168xfs_zero_remaining_bytes(
1169 xfs_inode_t *ip,
1170 xfs_off_t startoff,
1171 xfs_off_t endoff)
1172{
1173 xfs_bmbt_irec_t imap;
1174 xfs_fileoff_t offset_fsb;
1175 xfs_off_t lastoffset;
1176 xfs_off_t offset;
1177 xfs_buf_t *bp;
1178 xfs_mount_t *mp = ip->i_mount;
1179 int nimap;
1180 int error = 0;
1181
1182 /*
1183 * Avoid doing I/O beyond eof - it's not necessary
1184 * since nothing can read beyond eof. The space will
1185 * be zeroed when the file is extended anyway.
1186 */
1187 if (startoff >= XFS_ISIZE(ip))
1188 return 0;
1189
1190 if (endoff > XFS_ISIZE(ip))
1191 endoff = XFS_ISIZE(ip);
1192
1193 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
1194 mp->m_rtdev_targp : mp->m_ddev_targp,
1195 BTOBB(mp->m_sb.sb_blocksize), 0);
1196 if (!bp)
1197 return XFS_ERROR(ENOMEM);
1198
1199 xfs_buf_unlock(bp);
1200
1201 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1202 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1203 nimap = 1;
1204 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1205 if (error || nimap < 1)
1206 break;
1207 ASSERT(imap.br_blockcount >= 1);
1208 ASSERT(imap.br_startoff == offset_fsb);
1209 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1210 if (lastoffset > endoff)
1211 lastoffset = endoff;
1212 if (imap.br_startblock == HOLESTARTBLOCK)
1213 continue;
1214 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1215 if (imap.br_state == XFS_EXT_UNWRITTEN)
1216 continue;
1217 XFS_BUF_UNDONE(bp);
1218 XFS_BUF_UNWRITE(bp);
1219 XFS_BUF_READ(bp);
1220 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1221 xfsbdstrat(mp, bp);
1222 error = xfs_buf_iowait(bp);
1223 if (error) {
1224 xfs_buf_ioerror_alert(bp,
1225 "xfs_zero_remaining_bytes(read)");
1226 break;
1227 }
1228 memset(bp->b_addr +
1229 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1230 0, lastoffset - offset + 1);
1231 XFS_BUF_UNDONE(bp);
1232 XFS_BUF_UNREAD(bp);
1233 XFS_BUF_WRITE(bp);
1234 xfsbdstrat(mp, bp);
1235 error = xfs_buf_iowait(bp);
1236 if (error) {
1237 xfs_buf_ioerror_alert(bp,
1238 "xfs_zero_remaining_bytes(write)");
1239 break;
1240 }
1241 }
1242 xfs_buf_free(bp);
1243 return error;
1244}
1245
1246/*
1247 * xfs_free_file_space()
1248 * This routine frees disk space for the given file.
1249 *
1250 * This routine is only called by xfs_change_file_space
1251 * for an UNRESVSP type call.
1252 *
1253 * RETURNS:
1254 * 0 on success
1255 * errno on error
1256 *
1257 */
1258STATIC int
1259xfs_free_file_space(
1260 xfs_inode_t *ip,
1261 xfs_off_t offset,
1262 xfs_off_t len,
1263 int attr_flags)
1264{
1265 int committed;
1266 int done;
1267 xfs_fileoff_t endoffset_fsb;
1268 int error;
1269 xfs_fsblock_t firstfsb;
1270 xfs_bmap_free_t free_list;
1271 xfs_bmbt_irec_t imap;
1272 xfs_off_t ioffset;
1273 xfs_extlen_t mod=0;
1274 xfs_mount_t *mp;
1275 int nimap;
1276 uint resblks;
1277 xfs_off_t rounding;
1278 int rt;
1279 xfs_fileoff_t startoffset_fsb;
1280 xfs_trans_t *tp;
1281 int need_iolock = 1;
1282
1283 mp = ip->i_mount;
1284
1285 trace_xfs_free_file_space(ip);
1286
1287 error = xfs_qm_dqattach(ip, 0);
1288 if (error)
1289 return error;
1290
1291 error = 0;
1292 if (len <= 0) /* if nothing being freed */
1293 return error;
1294 rt = XFS_IS_REALTIME_INODE(ip);
1295 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1296 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1297
1298 if (attr_flags & XFS_ATTR_NOLOCK)
1299 need_iolock = 0;
1300 if (need_iolock) {
1301 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1302 /* wait for the completion of any pending DIOs */
1303 inode_dio_wait(VFS_I(ip));
1304 }
1305
1306 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1307 ioffset = offset & ~(rounding - 1);
1308 error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1309 ioffset, -1);
1310 if (error)
1311 goto out_unlock_iolock;
1312 truncate_pagecache_range(VFS_I(ip), ioffset, -1);
1313
1314 /*
1315 * Need to zero the stuff we're not freeing, on disk.
1316 * If it's a realtime file & can't use unwritten extents then we
1317 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1318 * will take care of it for us.
1319 */
1320 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1321 nimap = 1;
1322 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1323 &imap, &nimap, 0);
1324 if (error)
1325 goto out_unlock_iolock;
1326 ASSERT(nimap == 0 || nimap == 1);
1327 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1328 xfs_daddr_t block;
1329
1330 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1331 block = imap.br_startblock;
1332 mod = do_div(block, mp->m_sb.sb_rextsize);
1333 if (mod)
1334 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1335 }
1336 nimap = 1;
1337 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1338 &imap, &nimap, 0);
1339 if (error)
1340 goto out_unlock_iolock;
1341 ASSERT(nimap == 0 || nimap == 1);
1342 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1343 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1344 mod++;
1345 if (mod && (mod != mp->m_sb.sb_rextsize))
1346 endoffset_fsb -= mod;
1347 }
1348 }
1349 if ((done = (endoffset_fsb <= startoffset_fsb)))
1350 /*
1351 * One contiguous piece to clear
1352 */
1353 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1354 else {
1355 /*
1356 * Some full blocks, possibly two pieces to clear
1357 */
1358 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1359 error = xfs_zero_remaining_bytes(ip, offset,
1360 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1361 if (!error &&
1362 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1363 error = xfs_zero_remaining_bytes(ip,
1364 XFS_FSB_TO_B(mp, endoffset_fsb),
1365 offset + len - 1);
1366 }
1367
1368 /*
1369 * free file space until done or until there is an error
1370 */
1371 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1372 while (!error && !done) {
1373
1374 /*
1375 * allocate and setup the transaction. Allow this
1376 * transaction to dip into the reserve blocks to ensure
1377 * the freeing of the space succeeds at ENOSPC.
1378 */
1379 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1380 tp->t_flags |= XFS_TRANS_RESERVE;
1381 error = xfs_trans_reserve(tp,
1382 resblks,
1383 XFS_WRITE_LOG_RES(mp),
1384 0,
1385 XFS_TRANS_PERM_LOG_RES,
1386 XFS_WRITE_LOG_COUNT);
1387
1388 /*
1389 * check for running out of space
1390 */
1391 if (error) {
1392 /*
1393 * Free the transaction structure.
1394 */
1395 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1396 xfs_trans_cancel(tp, 0);
1397 break;
1398 }
1399 xfs_ilock(ip, XFS_ILOCK_EXCL);
1400 error = xfs_trans_reserve_quota(tp, mp,
1401 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1402 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1403 if (error)
1404 goto error1;
1405
1406 xfs_trans_ijoin(tp, ip, 0);
1407
1408 /*
1409 * issue the bunmapi() call to free the blocks
1410 */
1411 xfs_bmap_init(&free_list, &firstfsb);
1412 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1413 endoffset_fsb - startoffset_fsb,
1414 0, 2, &firstfsb, &free_list, &done);
1415 if (error) {
1416 goto error0;
1417 }
1418
1419 /*
1420 * complete the transaction
1421 */
1422 error = xfs_bmap_finish(&tp, &free_list, &committed);
1423 if (error) {
1424 goto error0;
1425 }
1426
1427 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1428 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1429 }
1430
1431 out_unlock_iolock:
1432 if (need_iolock)
1433 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1434 return error;
1435
1436 error0:
1437 xfs_bmap_cancel(&free_list);
1438 error1:
1439 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1440 xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
1441 XFS_ILOCK_EXCL);
1442 return error;
1443}
1444
1445
1446STATIC int
1447xfs_zero_file_space(
1448 struct xfs_inode *ip,
1449 xfs_off_t offset,
1450 xfs_off_t len,
1451 int attr_flags)
1452{
1453 struct xfs_mount *mp = ip->i_mount;
1454 uint granularity;
1455 xfs_off_t start_boundary;
1456 xfs_off_t end_boundary;
1457 int error;
1458
1459 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1460
1461 /*
1462 * Round the range of extents we are going to convert inwards. If the
1463 * offset is aligned, then it doesn't get changed so we zero from the
1464 * start of the block offset points to.
1465 */
1466 start_boundary = round_up(offset, granularity);
1467 end_boundary = round_down(offset + len, granularity);
1468
1469 ASSERT(start_boundary >= offset);
1470 ASSERT(end_boundary <= offset + len);
1471
1472 if (!(attr_flags & XFS_ATTR_NOLOCK))
1473 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1474
1475 if (start_boundary < end_boundary - 1) {
1476 /* punch out the page cache over the conversion range */
1477 truncate_pagecache_range(VFS_I(ip), start_boundary,
1478 end_boundary - 1);
1479 /* convert the blocks */
1480 error = xfs_alloc_file_space(ip, start_boundary,
1481 end_boundary - start_boundary - 1,
1482 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
1483 attr_flags);
1484 if (error)
1485 goto out_unlock;
1486
1487 /* We've handled the interior of the range, now for the edges */
1488 if (start_boundary != offset)
1489 error = xfs_iozero(ip, offset, start_boundary - offset);
1490 if (error)
1491 goto out_unlock;
1492
1493 if (end_boundary != offset + len)
1494 error = xfs_iozero(ip, end_boundary,
1495 offset + len - end_boundary);
1496
1497 } else {
1498 /*
1499 * It's either a sub-granularity range or the range spanned lies
1500 * partially across two adjacent blocks.
1501 */
1502 error = xfs_iozero(ip, offset, len);
1503 }
1504
1505out_unlock:
1506 if (!(attr_flags & XFS_ATTR_NOLOCK))
1507 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1508 return error;
1509
1510}
1511
1512/*
1513 * xfs_change_file_space()
1514 * This routine allocates or frees disk space for the given file.
1515 * The user specified parameters are checked for alignment and size
1516 * limitations.
1517 *
1518 * RETURNS:
1519 * 0 on success
1520 * errno on error
1521 *
1522 */
1523int
1524xfs_change_file_space(
1525 xfs_inode_t *ip,
1526 int cmd,
1527 xfs_flock64_t *bf,
1528 xfs_off_t offset,
1529 int attr_flags)
1530{
1531 xfs_mount_t *mp = ip->i_mount;
1532 int clrprealloc;
1533 int error;
1534 xfs_fsize_t fsize;
1535 int setprealloc;
1536 xfs_off_t startoffset;
1537 xfs_trans_t *tp;
1538 struct iattr iattr;
1539
1540 if (!S_ISREG(ip->i_d.di_mode))
1541 return XFS_ERROR(EINVAL);
1542
1543 switch (bf->l_whence) {
1544 case 0: /*SEEK_SET*/
1545 break;
1546 case 1: /*SEEK_CUR*/
1547 bf->l_start += offset;
1548 break;
1549 case 2: /*SEEK_END*/
1550 bf->l_start += XFS_ISIZE(ip);
1551 break;
1552 default:
1553 return XFS_ERROR(EINVAL);
1554 }
1555
1556 /*
1557 * length of <= 0 for resv/unresv/zero is invalid. length for
1558 * alloc/free is ignored completely and we have no idea what userspace
1559 * might have set it to, so set it to zero to allow range
1560 * checks to pass.
1561 */
1562 switch (cmd) {
1563 case XFS_IOC_ZERO_RANGE:
1564 case XFS_IOC_RESVSP:
1565 case XFS_IOC_RESVSP64:
1566 case XFS_IOC_UNRESVSP:
1567 case XFS_IOC_UNRESVSP64:
1568 if (bf->l_len <= 0)
1569 return XFS_ERROR(EINVAL);
1570 break;
1571 default:
1572 bf->l_len = 0;
1573 break;
1574 }
1575
1576 if (bf->l_start < 0 ||
1577 bf->l_start > mp->m_super->s_maxbytes ||
1578 bf->l_start + bf->l_len < 0 ||
1579 bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
1580 return XFS_ERROR(EINVAL);
1581
1582 bf->l_whence = 0;
1583
1584 startoffset = bf->l_start;
1585 fsize = XFS_ISIZE(ip);
1586
1587 setprealloc = clrprealloc = 0;
1588 switch (cmd) {
1589 case XFS_IOC_ZERO_RANGE:
1590 error = xfs_zero_file_space(ip, startoffset, bf->l_len,
1591 attr_flags);
1592 if (error)
1593 return error;
1594 setprealloc = 1;
1595 break;
1596
1597 case XFS_IOC_RESVSP:
1598 case XFS_IOC_RESVSP64:
1599 error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
1600 XFS_BMAPI_PREALLOC, attr_flags);
1601 if (error)
1602 return error;
1603 setprealloc = 1;
1604 break;
1605
1606 case XFS_IOC_UNRESVSP:
1607 case XFS_IOC_UNRESVSP64:
1608 if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
1609 attr_flags)))
1610 return error;
1611 break;
1612
1613 case XFS_IOC_ALLOCSP:
1614 case XFS_IOC_ALLOCSP64:
1615 case XFS_IOC_FREESP:
1616 case XFS_IOC_FREESP64:
1617 /*
1618 * These operations actually do IO when extending the file, but
1619 * the allocation is done seperately to the zeroing that is
1620 * done. This set of operations need to be serialised against
1621 * other IO operations, such as truncate and buffered IO. We
1622 * need to take the IOLOCK here to serialise the allocation and
1623 * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
1624 * truncate, direct IO) from racing against the transient
1625 * allocated but not written state we can have here.
1626 */
1627 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1628 if (startoffset > fsize) {
1629 error = xfs_alloc_file_space(ip, fsize,
1630 startoffset - fsize, 0,
1631 attr_flags | XFS_ATTR_NOLOCK);
1632 if (error) {
1633 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1634 break;
1635 }
1636 }
1637
1638 iattr.ia_valid = ATTR_SIZE;
1639 iattr.ia_size = startoffset;
1640
1641 error = xfs_setattr_size(ip, &iattr,
1642 attr_flags | XFS_ATTR_NOLOCK);
1643 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1644
1645 if (error)
1646 return error;
1647
1648 clrprealloc = 1;
1649 break;
1650
1651 default:
1652 ASSERT(0);
1653 return XFS_ERROR(EINVAL);
1654 }
1655
1656 /*
1657 * update the inode timestamp, mode, and prealloc flag bits
1658 */
1659 tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
1660
1661 if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp),
1662 0, 0, 0))) {
1663 /* ASSERT(0); */
1664 xfs_trans_cancel(tp, 0);
1665 return error;
1666 }
1667
1668 xfs_ilock(ip, XFS_ILOCK_EXCL);
1669 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1670
1671 if ((attr_flags & XFS_ATTR_DMI) == 0) {
1672 ip->i_d.di_mode &= ~S_ISUID;
1673
1674 /*
1675 * Note that we don't have to worry about mandatory
1676 * file locking being disabled here because we only
1677 * clear the S_ISGID bit if the Group execute bit is
1678 * on, but if it was on then mandatory locking wouldn't
1679 * have been enabled.
1680 */
1681 if (ip->i_d.di_mode & S_IXGRP)
1682 ip->i_d.di_mode &= ~S_ISGID;
1683
1684 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1685 }
1686 if (setprealloc)
1687 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
1688 else if (clrprealloc)
1689 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
1690
1691 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1692 if (attr_flags & XFS_ATTR_SYNC)
1693 xfs_trans_set_sync(tp);
1694 return xfs_trans_commit(tp, 0);
1695}
Dave Chinnera133d952013-08-12 20:49:48 +10001696
1697/*
1698 * We need to check that the format of the data fork in the temporary inode is
1699 * valid for the target inode before doing the swap. This is not a problem with
1700 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1701 * data fork depending on the space the attribute fork is taking so we can get
1702 * invalid formats on the target inode.
1703 *
1704 * E.g. target has space for 7 extents in extent format, temp inode only has
1705 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1706 * btree, but when swapped it needs to be in extent format. Hence we can't just
1707 * blindly swap data forks on attr2 filesystems.
1708 *
1709 * Note that we check the swap in both directions so that we don't end up with
1710 * a corrupt temporary inode, either.
1711 *
1712 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1713 * inode will prevent this situation from occurring, so all we do here is
1714 * reject and log the attempt. basically we are putting the responsibility on
1715 * userspace to get this right.
1716 */
1717static int
1718xfs_swap_extents_check_format(
1719 xfs_inode_t *ip, /* target inode */
1720 xfs_inode_t *tip) /* tmp inode */
1721{
1722
1723 /* Should never get a local format */
1724 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1725 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1726 return EINVAL;
1727
1728 /*
1729 * if the target inode has less extents that then temporary inode then
1730 * why did userspace call us?
1731 */
1732 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1733 return EINVAL;
1734
1735 /*
1736 * if the target inode is in extent form and the temp inode is in btree
1737 * form then we will end up with the target inode in the wrong format
1738 * as we already know there are less extents in the temp inode.
1739 */
1740 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1741 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1742 return EINVAL;
1743
1744 /* Check temp in extent form to max in target */
1745 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1746 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1747 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1748 return EINVAL;
1749
1750 /* Check target in extent form to max in temp */
1751 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1752 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1753 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1754 return EINVAL;
1755
1756 /*
1757 * If we are in a btree format, check that the temp root block will fit
1758 * in the target and that it has enough extents to be in btree format
1759 * in the target.
1760 *
1761 * Note that we have to be careful to allow btree->extent conversions
1762 * (a common defrag case) which will occur when the temp inode is in
1763 * extent format...
1764 */
1765 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1766 if (XFS_IFORK_BOFF(ip) &&
1767 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1768 return EINVAL;
1769 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1770 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1771 return EINVAL;
1772 }
1773
1774 /* Reciprocal target->temp btree format checks */
1775 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1776 if (XFS_IFORK_BOFF(tip) &&
1777 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1778 return EINVAL;
1779 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1780 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1781 return EINVAL;
1782 }
1783
1784 return 0;
1785}
1786
1787int
1788xfs_swap_extents(
1789 xfs_inode_t *ip, /* target inode */
1790 xfs_inode_t *tip, /* tmp inode */
1791 xfs_swapext_t *sxp)
1792{
1793 xfs_mount_t *mp = ip->i_mount;
1794 xfs_trans_t *tp;
1795 xfs_bstat_t *sbp = &sxp->sx_stat;
1796 xfs_ifork_t *tempifp, *ifp, *tifp;
1797 int src_log_flags, target_log_flags;
1798 int error = 0;
1799 int aforkblks = 0;
1800 int taforkblks = 0;
1801 __uint64_t tmp;
1802
1803 /*
1804 * We have no way of updating owner information in the BMBT blocks for
1805 * each inode on CRC enabled filesystems, so to avoid corrupting the
1806 * this metadata we simply don't allow extent swaps to occur.
1807 */
1808 if (xfs_sb_version_hascrc(&mp->m_sb))
1809 return XFS_ERROR(EINVAL);
1810
1811 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1812 if (!tempifp) {
1813 error = XFS_ERROR(ENOMEM);
1814 goto out;
1815 }
1816
1817 /*
1818 * we have to do two separate lock calls here to keep lockdep
1819 * happy. If we try to get all the locks in one call, lock will
1820 * report false positives when we drop the ILOCK and regain them
1821 * below.
1822 */
1823 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1824 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1825
1826 /* Verify that both files have the same format */
1827 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1828 error = XFS_ERROR(EINVAL);
1829 goto out_unlock;
1830 }
1831
1832 /* Verify both files are either real-time or non-realtime */
1833 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1834 error = XFS_ERROR(EINVAL);
1835 goto out_unlock;
1836 }
1837
1838 error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
1839 if (error)
1840 goto out_unlock;
1841 truncate_pagecache_range(VFS_I(tip), 0, -1);
1842
1843 /* Verify O_DIRECT for ftmp */
1844 if (VN_CACHED(VFS_I(tip)) != 0) {
1845 error = XFS_ERROR(EINVAL);
1846 goto out_unlock;
1847 }
1848
1849 /* Verify all data are being swapped */
1850 if (sxp->sx_offset != 0 ||
1851 sxp->sx_length != ip->i_d.di_size ||
1852 sxp->sx_length != tip->i_d.di_size) {
1853 error = XFS_ERROR(EFAULT);
1854 goto out_unlock;
1855 }
1856
1857 trace_xfs_swap_extent_before(ip, 0);
1858 trace_xfs_swap_extent_before(tip, 1);
1859
1860 /* check inode formats now that data is flushed */
1861 error = xfs_swap_extents_check_format(ip, tip);
1862 if (error) {
1863 xfs_notice(mp,
1864 "%s: inode 0x%llx format is incompatible for exchanging.",
1865 __func__, ip->i_ino);
1866 goto out_unlock;
1867 }
1868
1869 /*
1870 * Compare the current change & modify times with that
1871 * passed in. If they differ, we abort this swap.
1872 * This is the mechanism used to ensure the calling
1873 * process that the file was not changed out from
1874 * under it.
1875 */
1876 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1877 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1878 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1879 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1880 error = XFS_ERROR(EBUSY);
1881 goto out_unlock;
1882 }
1883
1884 /* We need to fail if the file is memory mapped. Once we have tossed
1885 * all existing pages, the page fault will have no option
1886 * but to go to the filesystem for pages. By making the page fault call
1887 * vop_read (or write in the case of autogrow) they block on the iolock
1888 * until we have switched the extents.
1889 */
1890 if (VN_MAPPED(VFS_I(ip))) {
1891 error = XFS_ERROR(EBUSY);
1892 goto out_unlock;
1893 }
1894
1895 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1896 xfs_iunlock(tip, XFS_ILOCK_EXCL);
1897
1898 /*
1899 * There is a race condition here since we gave up the
1900 * ilock. However, the data fork will not change since
1901 * we have the iolock (locked for truncation too) so we
1902 * are safe. We don't really care if non-io related
1903 * fields change.
1904 */
1905 truncate_pagecache_range(VFS_I(ip), 0, -1);
1906
1907 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1908 if ((error = xfs_trans_reserve(tp, 0,
1909 XFS_ICHANGE_LOG_RES(mp), 0,
1910 0, 0))) {
1911 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1912 xfs_iunlock(tip, XFS_IOLOCK_EXCL);
1913 xfs_trans_cancel(tp, 0);
1914 goto out;
1915 }
1916 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1917
1918 /*
1919 * Count the number of extended attribute blocks
1920 */
1921 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1922 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1923 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1924 if (error)
1925 goto out_trans_cancel;
1926 }
1927 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1928 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1929 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1930 &taforkblks);
1931 if (error)
1932 goto out_trans_cancel;
1933 }
1934
1935 /*
1936 * Swap the data forks of the inodes
1937 */
1938 ifp = &ip->i_df;
1939 tifp = &tip->i_df;
1940 *tempifp = *ifp; /* struct copy */
1941 *ifp = *tifp; /* struct copy */
1942 *tifp = *tempifp; /* struct copy */
1943
1944 /*
1945 * Fix the on-disk inode values
1946 */
1947 tmp = (__uint64_t)ip->i_d.di_nblocks;
1948 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1949 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1950
1951 tmp = (__uint64_t) ip->i_d.di_nextents;
1952 ip->i_d.di_nextents = tip->i_d.di_nextents;
1953 tip->i_d.di_nextents = tmp;
1954
1955 tmp = (__uint64_t) ip->i_d.di_format;
1956 ip->i_d.di_format = tip->i_d.di_format;
1957 tip->i_d.di_format = tmp;
1958
1959 /*
1960 * The extents in the source inode could still contain speculative
1961 * preallocation beyond EOF (e.g. the file is open but not modified
1962 * while defrag is in progress). In that case, we need to copy over the
1963 * number of delalloc blocks the data fork in the source inode is
1964 * tracking beyond EOF so that when the fork is truncated away when the
1965 * temporary inode is unlinked we don't underrun the i_delayed_blks
1966 * counter on that inode.
1967 */
1968 ASSERT(tip->i_delayed_blks == 0);
1969 tip->i_delayed_blks = ip->i_delayed_blks;
1970 ip->i_delayed_blks = 0;
1971
1972 src_log_flags = XFS_ILOG_CORE;
1973 switch (ip->i_d.di_format) {
1974 case XFS_DINODE_FMT_EXTENTS:
1975 /* If the extents fit in the inode, fix the
1976 * pointer. Otherwise it's already NULL or
1977 * pointing to the extent.
1978 */
1979 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1980 ifp->if_u1.if_extents =
1981 ifp->if_u2.if_inline_ext;
1982 }
1983 src_log_flags |= XFS_ILOG_DEXT;
1984 break;
1985 case XFS_DINODE_FMT_BTREE:
1986 src_log_flags |= XFS_ILOG_DBROOT;
1987 break;
1988 }
1989
1990 target_log_flags = XFS_ILOG_CORE;
1991 switch (tip->i_d.di_format) {
1992 case XFS_DINODE_FMT_EXTENTS:
1993 /* If the extents fit in the inode, fix the
1994 * pointer. Otherwise it's already NULL or
1995 * pointing to the extent.
1996 */
1997 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1998 tifp->if_u1.if_extents =
1999 tifp->if_u2.if_inline_ext;
2000 }
2001 target_log_flags |= XFS_ILOG_DEXT;
2002 break;
2003 case XFS_DINODE_FMT_BTREE:
2004 target_log_flags |= XFS_ILOG_DBROOT;
2005 break;
2006 }
2007
2008
2009 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
2010 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
2011
2012 xfs_trans_log_inode(tp, ip, src_log_flags);
2013 xfs_trans_log_inode(tp, tip, target_log_flags);
2014
2015 /*
2016 * If this is a synchronous mount, make sure that the
2017 * transaction goes to disk before returning to the user.
2018 */
2019 if (mp->m_flags & XFS_MOUNT_WSYNC)
2020 xfs_trans_set_sync(tp);
2021
2022 error = xfs_trans_commit(tp, 0);
2023
2024 trace_xfs_swap_extent_after(ip, 0);
2025 trace_xfs_swap_extent_after(tip, 1);
2026out:
2027 kmem_free(tempifp);
2028 return error;
2029
2030out_unlock:
2031 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
2032 xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
2033 goto out;
2034
2035out_trans_cancel:
2036 xfs_trans_cancel(tp, 0);
2037 goto out_unlock;
2038}