blob: 64731ef3324d4b44a938aeac30fc3b816d890222 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
Dave Chinner70a9883c2013-10-23 10:36:05 +110021#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100026#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110029#include "xfs_da_format.h"
Dave Chinner68988112013-08-12 20:49:42 +100030#include "xfs_inode.h"
31#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110032#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100033#include "xfs_extfree_item.h"
34#include "xfs_alloc.h"
35#include "xfs_bmap.h"
36#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110037#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100038#include "xfs_rtalloc.h"
39#include "xfs_error.h"
40#include "xfs_quota.h"
41#include "xfs_trans_space.h"
42#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100043#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110044#include "xfs_log.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110045#include "xfs_dinode.h"
Dave Chinner68988112013-08-12 20:49:42 +100046
47/* Kernel only BMAP related definitions and functions */
48
49/*
50 * Convert the given file system block to a disk block. We have to treat it
51 * differently based on whether the file is a real time file or not, because the
52 * bmap code does.
53 */
54xfs_daddr_t
55xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
56{
57 return (XFS_IS_REALTIME_INODE(ip) ? \
58 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
59 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
60}
61
62/*
63 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
64 * caller. Frees all the extents that need freeing, which must be done
65 * last due to locking considerations. We never free any extents in
66 * the first transaction.
67 *
68 * Return 1 if the given transaction was committed and a new one
69 * started, and 0 otherwise in the committed parameter.
70 */
71int /* error */
72xfs_bmap_finish(
73 xfs_trans_t **tp, /* transaction pointer addr */
74 xfs_bmap_free_t *flist, /* i/o: list extents to free */
75 int *committed) /* xact committed or not */
76{
77 xfs_efd_log_item_t *efd; /* extent free data */
78 xfs_efi_log_item_t *efi; /* extent free intention */
79 int error; /* error return value */
80 xfs_bmap_free_item_t *free; /* free extent item */
Jie Liu3d3c8b52013-08-12 20:49:59 +100081 struct xfs_trans_res tres; /* new log reservation */
Dave Chinner68988112013-08-12 20:49:42 +100082 xfs_mount_t *mp; /* filesystem mount structure */
83 xfs_bmap_free_item_t *next; /* next item on free list */
84 xfs_trans_t *ntp; /* new transaction pointer */
85
86 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
87 if (flist->xbf_count == 0) {
88 *committed = 0;
89 return 0;
90 }
91 ntp = *tp;
92 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
93 for (free = flist->xbf_first; free; free = free->xbfi_next)
94 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
95 free->xbfi_blockcount);
Jie Liu3d3c8b52013-08-12 20:49:59 +100096
97 tres.tr_logres = ntp->t_log_res;
98 tres.tr_logcount = ntp->t_log_count;
99 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
Dave Chinner68988112013-08-12 20:49:42 +1000100 ntp = xfs_trans_dup(*tp);
101 error = xfs_trans_commit(*tp, 0);
102 *tp = ntp;
103 *committed = 1;
104 /*
105 * We have a new transaction, so we should return committed=1,
106 * even though we're returning an error.
107 */
108 if (error)
109 return error;
110
111 /*
112 * transaction commit worked ok so we can drop the extra ticket
113 * reference that we gained in xfs_trans_dup()
114 */
115 xfs_log_ticket_put(ntp->t_ticket);
116
Jie Liu3d3c8b52013-08-12 20:49:59 +1000117 error = xfs_trans_reserve(ntp, &tres, 0, 0);
118 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000119 return error;
120 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
121 for (free = flist->xbf_first; free != NULL; free = next) {
122 next = free->xbfi_next;
123 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
124 free->xbfi_blockcount))) {
125 /*
126 * The bmap free list will be cleaned up at a
127 * higher level. The EFI will be canceled when
128 * this transaction is aborted.
129 * Need to force shutdown here to make sure it
130 * happens, since this transaction may not be
131 * dirty yet.
132 */
133 mp = ntp->t_mountp;
134 if (!XFS_FORCED_SHUTDOWN(mp))
135 xfs_force_shutdown(mp,
136 (error == EFSCORRUPTED) ?
137 SHUTDOWN_CORRUPT_INCORE :
138 SHUTDOWN_META_IO_ERROR);
139 return error;
140 }
141 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
142 free->xbfi_blockcount);
143 xfs_bmap_del_free(flist, NULL, free);
144 }
145 return 0;
146}
147
148int
149xfs_bmap_rtalloc(
150 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
151{
152 xfs_alloctype_t atype = 0; /* type for allocation routines */
153 int error; /* error return value */
154 xfs_mount_t *mp; /* mount point structure */
155 xfs_extlen_t prod = 0; /* product factor for allocators */
156 xfs_extlen_t ralen = 0; /* realtime allocation length */
157 xfs_extlen_t align; /* minimum allocation alignment */
158 xfs_rtblock_t rtb;
159
160 mp = ap->ip->i_mount;
161 align = xfs_get_extsz_hint(ap->ip);
162 prod = align / mp->m_sb.sb_rextsize;
163 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
164 align, 1, ap->eof, 0,
165 ap->conv, &ap->offset, &ap->length);
166 if (error)
167 return error;
168 ASSERT(ap->length);
169 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
170
171 /*
172 * If the offset & length are not perfectly aligned
173 * then kill prod, it will just get us in trouble.
174 */
175 if (do_mod(ap->offset, align) || ap->length % align)
176 prod = 1;
177 /*
178 * Set ralen to be the actual requested length in rtextents.
179 */
180 ralen = ap->length / mp->m_sb.sb_rextsize;
181 /*
182 * If the old value was close enough to MAXEXTLEN that
183 * we rounded up to it, cut it back so it's valid again.
184 * Note that if it's a really large request (bigger than
185 * MAXEXTLEN), we don't hear about that number, and can't
186 * adjust the starting point to match it.
187 */
188 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
189 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
190
191 /*
192 * Lock out other modifications to the RT bitmap inode.
193 */
194 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
195 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
196
197 /*
198 * If it's an allocation to an empty file at offset 0,
199 * pick an extent that will space things out in the rt area.
200 */
201 if (ap->eof && ap->offset == 0) {
202 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
203
204 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
205 if (error)
206 return error;
207 ap->blkno = rtx * mp->m_sb.sb_rextsize;
208 } else {
209 ap->blkno = 0;
210 }
211
212 xfs_bmap_adjacent(ap);
213
214 /*
215 * Realtime allocation, done through xfs_rtallocate_extent.
216 */
217 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
218 do_div(ap->blkno, mp->m_sb.sb_rextsize);
219 rtb = ap->blkno;
220 ap->length = ralen;
221 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
222 &ralen, atype, ap->wasdel, prod, &rtb)))
223 return error;
224 if (rtb == NULLFSBLOCK && prod > 1 &&
225 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
226 ap->length, &ralen, atype,
227 ap->wasdel, 1, &rtb)))
228 return error;
229 ap->blkno = rtb;
230 if (ap->blkno != NULLFSBLOCK) {
231 ap->blkno *= mp->m_sb.sb_rextsize;
232 ralen *= mp->m_sb.sb_rextsize;
233 ap->length = ralen;
234 ap->ip->i_d.di_nblocks += ralen;
235 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
236 if (ap->wasdel)
237 ap->ip->i_delayed_blks -= ralen;
238 /*
239 * Adjust the disk quota also. This was reserved
240 * earlier.
241 */
242 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
243 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
244 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
245 } else {
246 ap->length = 0;
247 }
248 return 0;
249}
250
251/*
Dave Chinner68988112013-08-12 20:49:42 +1000252 * Check if the endoff is outside the last extent. If so the caller will grow
253 * the allocation to a stripe unit boundary. All offsets are considered outside
254 * the end of file for an empty fork, so 1 is returned in *eof in that case.
255 */
256int
257xfs_bmap_eof(
258 struct xfs_inode *ip,
259 xfs_fileoff_t endoff,
260 int whichfork,
261 int *eof)
262{
263 struct xfs_bmbt_irec rec;
264 int error;
265
266 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
267 if (error || *eof)
268 return error;
269
270 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
271 return 0;
272}
273
274/*
275 * Extent tree block counting routines.
276 */
277
278/*
279 * Count leaf blocks given a range of extent records.
280 */
281STATIC void
282xfs_bmap_count_leaves(
283 xfs_ifork_t *ifp,
284 xfs_extnum_t idx,
285 int numrecs,
286 int *count)
287{
288 int b;
289
290 for (b = 0; b < numrecs; b++) {
291 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
292 *count += xfs_bmbt_get_blockcount(frp);
293 }
294}
295
296/*
297 * Count leaf blocks given a range of extent records originally
298 * in btree format.
299 */
300STATIC void
301xfs_bmap_disk_count_leaves(
302 struct xfs_mount *mp,
303 struct xfs_btree_block *block,
304 int numrecs,
305 int *count)
306{
307 int b;
308 xfs_bmbt_rec_t *frp;
309
310 for (b = 1; b <= numrecs; b++) {
311 frp = XFS_BMBT_REC_ADDR(mp, block, b);
312 *count += xfs_bmbt_disk_get_blockcount(frp);
313 }
314}
315
316/*
317 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000318 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000319 */
320STATIC int /* error */
321xfs_bmap_count_tree(
322 xfs_mount_t *mp, /* file system mount point */
323 xfs_trans_t *tp, /* transaction pointer */
324 xfs_ifork_t *ifp, /* inode fork pointer */
325 xfs_fsblock_t blockno, /* file system block number */
326 int levelin, /* level in btree */
327 int *count) /* Count of blocks */
328{
329 int error;
330 xfs_buf_t *bp, *nbp;
331 int level = levelin;
332 __be64 *pp;
333 xfs_fsblock_t bno = blockno;
334 xfs_fsblock_t nextbno;
335 struct xfs_btree_block *block, *nextblock;
336 int numrecs;
337
338 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
339 &xfs_bmbt_buf_ops);
340 if (error)
341 return error;
342 *count += 1;
343 block = XFS_BUF_TO_BLOCK(bp);
344
345 if (--level) {
346 /* Not at node above leaves, count this level of nodes */
347 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
348 while (nextbno != NULLFSBLOCK) {
349 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
350 XFS_BMAP_BTREE_REF,
351 &xfs_bmbt_buf_ops);
352 if (error)
353 return error;
354 *count += 1;
355 nextblock = XFS_BUF_TO_BLOCK(nbp);
356 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
357 xfs_trans_brelse(tp, nbp);
358 }
359
360 /* Dive to the next level */
361 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
362 bno = be64_to_cpu(*pp);
363 if (unlikely((error =
364 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
365 xfs_trans_brelse(tp, bp);
366 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
367 XFS_ERRLEVEL_LOW, mp);
368 return XFS_ERROR(EFSCORRUPTED);
369 }
370 xfs_trans_brelse(tp, bp);
371 } else {
372 /* count all level 1 nodes and their leaves */
373 for (;;) {
374 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
375 numrecs = be16_to_cpu(block->bb_numrecs);
376 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
377 xfs_trans_brelse(tp, bp);
378 if (nextbno == NULLFSBLOCK)
379 break;
380 bno = nextbno;
381 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
382 XFS_BMAP_BTREE_REF,
383 &xfs_bmbt_buf_ops);
384 if (error)
385 return error;
386 *count += 1;
387 block = XFS_BUF_TO_BLOCK(bp);
388 }
389 }
390 return 0;
391}
392
393/*
394 * Count fsblocks of the given fork.
395 */
396int /* error */
397xfs_bmap_count_blocks(
398 xfs_trans_t *tp, /* transaction pointer */
399 xfs_inode_t *ip, /* incore inode */
400 int whichfork, /* data or attr fork */
401 int *count) /* out: count of blocks */
402{
403 struct xfs_btree_block *block; /* current btree block */
404 xfs_fsblock_t bno; /* block # of "block" */
405 xfs_ifork_t *ifp; /* fork structure */
406 int level; /* btree level, for checking */
407 xfs_mount_t *mp; /* file system mount structure */
408 __be64 *pp; /* pointer to block address */
409
410 bno = NULLFSBLOCK;
411 mp = ip->i_mount;
412 ifp = XFS_IFORK_PTR(ip, whichfork);
413 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
414 xfs_bmap_count_leaves(ifp, 0,
415 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
416 count);
417 return 0;
418 }
419
420 /*
421 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
422 */
423 block = ifp->if_broot;
424 level = be16_to_cpu(block->bb_level);
425 ASSERT(level > 0);
426 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
427 bno = be64_to_cpu(*pp);
428 ASSERT(bno != NULLDFSBNO);
429 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
430 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
431
432 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
433 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
434 mp);
435 return XFS_ERROR(EFSCORRUPTED);
436 }
437
438 return 0;
439}
440
441/*
442 * returns 1 for success, 0 if we failed to map the extent.
443 */
444STATIC int
445xfs_getbmapx_fix_eof_hole(
446 xfs_inode_t *ip, /* xfs incore inode pointer */
447 struct getbmapx *out, /* output structure */
448 int prealloced, /* this is a file with
449 * preallocated data space */
450 __int64_t end, /* last block requested */
451 xfs_fsblock_t startblock)
452{
453 __int64_t fixlen;
454 xfs_mount_t *mp; /* file system mount point */
455 xfs_ifork_t *ifp; /* inode fork pointer */
456 xfs_extnum_t lastx; /* last extent pointer */
457 xfs_fileoff_t fileblock;
458
459 if (startblock == HOLESTARTBLOCK) {
460 mp = ip->i_mount;
461 out->bmv_block = -1;
462 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
463 fixlen -= out->bmv_offset;
464 if (prealloced && out->bmv_offset + out->bmv_length == end) {
465 /* Came to hole at EOF. Trim it. */
466 if (fixlen <= 0)
467 return 0;
468 out->bmv_length = fixlen;
469 }
470 } else {
471 if (startblock == DELAYSTARTBLOCK)
472 out->bmv_block = -2;
473 else
474 out->bmv_block = xfs_fsb_to_db(ip, startblock);
475 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
476 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
477 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
478 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
479 out->bmv_oflags |= BMV_OF_LAST;
480 }
481
482 return 1;
483}
484
485/*
486 * Get inode's extents as described in bmv, and format for output.
487 * Calls formatter to fill the user's buffer until all extents
488 * are mapped, until the passed-in bmv->bmv_count slots have
489 * been filled, or until the formatter short-circuits the loop,
490 * if it is tracking filled-in extents on its own.
491 */
492int /* error code */
493xfs_getbmap(
494 xfs_inode_t *ip,
495 struct getbmapx *bmv, /* user bmap structure */
496 xfs_bmap_format_t formatter, /* format to user */
497 void *arg) /* formatter arg */
498{
499 __int64_t bmvend; /* last block requested */
500 int error = 0; /* return value */
501 __int64_t fixlen; /* length for -1 case */
502 int i; /* extent number */
503 int lock; /* lock state */
504 xfs_bmbt_irec_t *map; /* buffer for user's data */
505 xfs_mount_t *mp; /* file system mount point */
506 int nex; /* # of user extents can do */
507 int nexleft; /* # of user extents left */
508 int subnex; /* # of bmapi's can do */
509 int nmap; /* number of map entries */
510 struct getbmapx *out; /* output structure */
511 int whichfork; /* data or attr fork */
512 int prealloced; /* this is a file with
513 * preallocated data space */
514 int iflags; /* interface flags */
515 int bmapi_flags; /* flags for xfs_bmapi */
516 int cur_ext = 0;
517
518 mp = ip->i_mount;
519 iflags = bmv->bmv_iflags;
520 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
521
522 if (whichfork == XFS_ATTR_FORK) {
523 if (XFS_IFORK_Q(ip)) {
524 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
525 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
526 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
527 return XFS_ERROR(EINVAL);
528 } else if (unlikely(
529 ip->i_d.di_aformat != 0 &&
530 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
531 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
532 ip->i_mount);
533 return XFS_ERROR(EFSCORRUPTED);
534 }
535
536 prealloced = 0;
537 fixlen = 1LL << 32;
538 } else {
539 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
540 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
541 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
542 return XFS_ERROR(EINVAL);
543
544 if (xfs_get_extsz_hint(ip) ||
545 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
546 prealloced = 1;
547 fixlen = mp->m_super->s_maxbytes;
548 } else {
549 prealloced = 0;
550 fixlen = XFS_ISIZE(ip);
551 }
552 }
553
554 if (bmv->bmv_length == -1) {
555 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
556 bmv->bmv_length =
557 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
558 } else if (bmv->bmv_length == 0) {
559 bmv->bmv_entries = 0;
560 return 0;
561 } else if (bmv->bmv_length < 0) {
562 return XFS_ERROR(EINVAL);
563 }
564
565 nex = bmv->bmv_count - 1;
566 if (nex <= 0)
567 return XFS_ERROR(EINVAL);
568 bmvend = bmv->bmv_offset + bmv->bmv_length;
569
570
571 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
572 return XFS_ERROR(ENOMEM);
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000573 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
574 if (!out)
575 return XFS_ERROR(ENOMEM);
Dave Chinner68988112013-08-12 20:49:42 +1000576
577 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800578 if (whichfork == XFS_DATA_FORK) {
579 if (!(iflags & BMV_IF_DELALLOC) &&
580 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner68988112013-08-12 20:49:42 +1000581 error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
582 if (error)
583 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000584
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800585 /*
586 * Even after flushing the inode, there can still be
587 * delalloc blocks on the inode beyond EOF due to
588 * speculative preallocation. These are not removed
589 * until the release function is called or the inode
590 * is inactivated. Hence we cannot assert here that
591 * ip->i_delayed_blks == 0.
592 */
593 }
594
595 lock = xfs_ilock_data_map_shared(ip);
596 } else {
597 lock = xfs_ilock_attr_map_shared(ip);
598 }
Dave Chinner68988112013-08-12 20:49:42 +1000599
600 /*
601 * Don't let nex be bigger than the number of extents
602 * we can have assuming alternating holes and real extents.
603 */
604 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
605 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
606
607 bmapi_flags = xfs_bmapi_aflag(whichfork);
608 if (!(iflags & BMV_IF_PREALLOC))
609 bmapi_flags |= XFS_BMAPI_IGSTATE;
610
611 /*
612 * Allocate enough space to handle "subnex" maps at a time.
613 */
614 error = ENOMEM;
615 subnex = 16;
616 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
617 if (!map)
618 goto out_unlock_ilock;
619
620 bmv->bmv_entries = 0;
621
622 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
623 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
624 error = 0;
625 goto out_free_map;
626 }
627
628 nexleft = nex;
629
630 do {
631 nmap = (nexleft > subnex) ? subnex : nexleft;
632 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
633 XFS_BB_TO_FSB(mp, bmv->bmv_length),
634 map, &nmap, bmapi_flags);
635 if (error)
636 goto out_free_map;
637 ASSERT(nmap <= subnex);
638
639 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
640 out[cur_ext].bmv_oflags = 0;
641 if (map[i].br_state == XFS_EXT_UNWRITTEN)
642 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
643 else if (map[i].br_startblock == DELAYSTARTBLOCK)
644 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
645 out[cur_ext].bmv_offset =
646 XFS_FSB_TO_BB(mp, map[i].br_startoff);
647 out[cur_ext].bmv_length =
648 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
649 out[cur_ext].bmv_unused1 = 0;
650 out[cur_ext].bmv_unused2 = 0;
651
652 /*
653 * delayed allocation extents that start beyond EOF can
654 * occur due to speculative EOF allocation when the
655 * delalloc extent is larger than the largest freespace
656 * extent at conversion time. These extents cannot be
657 * converted by data writeback, so can exist here even
658 * if we are not supposed to be finding delalloc
659 * extents.
660 */
661 if (map[i].br_startblock == DELAYSTARTBLOCK &&
662 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
663 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
664
665 if (map[i].br_startblock == HOLESTARTBLOCK &&
666 whichfork == XFS_ATTR_FORK) {
667 /* came to the end of attribute fork */
668 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
669 goto out_free_map;
670 }
671
672 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
673 prealloced, bmvend,
674 map[i].br_startblock))
675 goto out_free_map;
676
677 bmv->bmv_offset =
678 out[cur_ext].bmv_offset +
679 out[cur_ext].bmv_length;
680 bmv->bmv_length =
681 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
682
683 /*
684 * In case we don't want to return the hole,
685 * don't increase cur_ext so that we can reuse
686 * it in the next loop.
687 */
688 if ((iflags & BMV_IF_NO_HOLES) &&
689 map[i].br_startblock == HOLESTARTBLOCK) {
690 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
691 continue;
692 }
693
694 nexleft--;
695 bmv->bmv_entries++;
696 cur_ext++;
697 }
698 } while (nmap && nexleft && bmv->bmv_length);
699
700 out_free_map:
701 kmem_free(map);
702 out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800703 xfs_iunlock(ip, lock);
Dave Chinner68988112013-08-12 20:49:42 +1000704 out_unlock_iolock:
705 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
706
707 for (i = 0; i < cur_ext; i++) {
708 int full = 0; /* user array is full */
709
710 /* format results & advance arg */
711 error = formatter(&arg, &out[i], &full);
712 if (error || full)
713 break;
714 }
715
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000716 kmem_free(out);
Dave Chinner68988112013-08-12 20:49:42 +1000717 return error;
718}
719
720/*
721 * dead simple method of punching delalyed allocation blocks from a range in
722 * the inode. Walks a block at a time so will be slow, but is only executed in
Zhi Yong Wuad4809b2013-08-12 03:14:55 +0000723 * rare error cases so the overhead is not critical. This will always punch out
Dave Chinner68988112013-08-12 20:49:42 +1000724 * both the start and end blocks, even if the ranges only partially overlap
725 * them, so it is up to the caller to ensure that partial blocks are not
726 * passed in.
727 */
728int
729xfs_bmap_punch_delalloc_range(
730 struct xfs_inode *ip,
731 xfs_fileoff_t start_fsb,
732 xfs_fileoff_t length)
733{
734 xfs_fileoff_t remaining = length;
735 int error = 0;
736
737 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
738
739 do {
740 int done;
741 xfs_bmbt_irec_t imap;
742 int nimaps = 1;
743 xfs_fsblock_t firstblock;
744 xfs_bmap_free_t flist;
745
746 /*
747 * Map the range first and check that it is a delalloc extent
748 * before trying to unmap the range. Otherwise we will be
749 * trying to remove a real extent (which requires a
750 * transaction) or a hole, which is probably a bad idea...
751 */
752 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
753 XFS_BMAPI_ENTIRE);
754
755 if (error) {
756 /* something screwed, just bail */
757 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
758 xfs_alert(ip->i_mount,
759 "Failed delalloc mapping lookup ino %lld fsb %lld.",
760 ip->i_ino, start_fsb);
761 }
762 break;
763 }
764 if (!nimaps) {
765 /* nothing there */
766 goto next_block;
767 }
768 if (imap.br_startblock != DELAYSTARTBLOCK) {
769 /* been converted, ignore */
770 goto next_block;
771 }
772 WARN_ON(imap.br_blockcount == 0);
773
774 /*
775 * Note: while we initialise the firstblock/flist pair, they
776 * should never be used because blocks should never be
777 * allocated or freed for a delalloc extent and hence we need
778 * don't cancel or finish them after the xfs_bunmapi() call.
779 */
780 xfs_bmap_init(&flist, &firstblock);
781 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
782 &flist, &done);
783 if (error)
784 break;
785
786 ASSERT(!flist.xbf_count && !flist.xbf_first);
787next_block:
788 start_fsb++;
789 remaining--;
790 } while(remaining > 0);
791
792 return error;
793}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000794
795/*
796 * Test whether it is appropriate to check an inode for and free post EOF
797 * blocks. The 'force' parameter determines whether we should also consider
798 * regular files that are marked preallocated or append-only.
799 */
800bool
801xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
802{
803 /* prealloc/delalloc exists only on regular files */
804 if (!S_ISREG(ip->i_d.di_mode))
805 return false;
806
807 /*
808 * Zero sized files with no cached pages and delalloc blocks will not
809 * have speculative prealloc/delalloc blocks to remove.
810 */
811 if (VFS_I(ip)->i_size == 0 &&
812 VN_CACHED(VFS_I(ip)) == 0 &&
813 ip->i_delayed_blks == 0)
814 return false;
815
816 /* If we haven't read in the extent list, then don't do it now. */
817 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
818 return false;
819
820 /*
821 * Do not free real preallocated or append-only files unless the file
822 * has delalloc blocks and we are forced to remove them.
823 */
824 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
825 if (!force || ip->i_delayed_blks == 0)
826 return false;
827
828 return true;
829}
830
831/*
832 * This is called by xfs_inactive to free any blocks beyond eof
833 * when the link count isn't zero and by xfs_dm_punch_hole() when
834 * punching a hole to EOF.
835 */
836int
837xfs_free_eofblocks(
838 xfs_mount_t *mp,
839 xfs_inode_t *ip,
840 bool need_iolock)
841{
842 xfs_trans_t *tp;
843 int error;
844 xfs_fileoff_t end_fsb;
845 xfs_fileoff_t last_fsb;
846 xfs_filblks_t map_len;
847 int nimaps;
848 xfs_bmbt_irec_t imap;
849
850 /*
851 * Figure out if there are any blocks beyond the end
852 * of the file. If not, then there is nothing to do.
853 */
854 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
855 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
856 if (last_fsb <= end_fsb)
857 return 0;
858 map_len = last_fsb - end_fsb;
859
860 nimaps = 1;
861 xfs_ilock(ip, XFS_ILOCK_SHARED);
862 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
863 xfs_iunlock(ip, XFS_ILOCK_SHARED);
864
865 if (!error && (nimaps != 0) &&
866 (imap.br_startblock != HOLESTARTBLOCK ||
867 ip->i_delayed_blks)) {
868 /*
869 * Attach the dquots to the inode up front.
870 */
871 error = xfs_qm_dqattach(ip, 0);
872 if (error)
873 return error;
874
875 /*
876 * There are blocks after the end of file.
877 * Free them up now by truncating the file to
878 * its current size.
879 */
880 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
881
882 if (need_iolock) {
883 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
884 xfs_trans_cancel(tp, 0);
885 return EAGAIN;
886 }
887 }
888
Jie Liu3d3c8b52013-08-12 20:49:59 +1000889 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000890 if (error) {
891 ASSERT(XFS_FORCED_SHUTDOWN(mp));
892 xfs_trans_cancel(tp, 0);
893 if (need_iolock)
894 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
895 return error;
896 }
897
898 xfs_ilock(ip, XFS_ILOCK_EXCL);
899 xfs_trans_ijoin(tp, ip, 0);
900
901 /*
902 * Do not update the on-disk file size. If we update the
903 * on-disk file size and then the system crashes before the
904 * contents of the file are flushed to disk then the files
905 * may be full of holes (ie NULL files bug).
906 */
907 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
908 XFS_ISIZE(ip));
909 if (error) {
910 /*
911 * If we get an error at this point we simply don't
912 * bother truncating the file.
913 */
914 xfs_trans_cancel(tp,
915 (XFS_TRANS_RELEASE_LOG_RES |
916 XFS_TRANS_ABORT));
917 } else {
918 error = xfs_trans_commit(tp,
919 XFS_TRANS_RELEASE_LOG_RES);
920 if (!error)
921 xfs_inode_clear_eofblocks_tag(ip);
922 }
923
924 xfs_iunlock(ip, XFS_ILOCK_EXCL);
925 if (need_iolock)
926 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
927 }
928 return error;
929}
930
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700931int
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000932xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700933 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000934 xfs_off_t offset,
935 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -0700936 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000937{
938 xfs_mount_t *mp = ip->i_mount;
939 xfs_off_t count;
940 xfs_filblks_t allocated_fsb;
941 xfs_filblks_t allocatesize_fsb;
942 xfs_extlen_t extsz, temp;
943 xfs_fileoff_t startoffset_fsb;
944 xfs_fsblock_t firstfsb;
945 int nimaps;
946 int quota_flag;
947 int rt;
948 xfs_trans_t *tp;
949 xfs_bmbt_irec_t imaps[1], *imapp;
950 xfs_bmap_free_t free_list;
951 uint qblocks, resblks, resrtextents;
952 int committed;
953 int error;
954
955 trace_xfs_alloc_file_space(ip);
956
957 if (XFS_FORCED_SHUTDOWN(mp))
958 return XFS_ERROR(EIO);
959
960 error = xfs_qm_dqattach(ip, 0);
961 if (error)
962 return error;
963
964 if (len <= 0)
965 return XFS_ERROR(EINVAL);
966
967 rt = XFS_IS_REALTIME_INODE(ip);
968 extsz = xfs_get_extsz_hint(ip);
969
970 count = len;
971 imapp = &imaps[0];
972 nimaps = 1;
973 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
974 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
975
976 /*
977 * Allocate file space until done or until there is an error
978 */
979 while (allocatesize_fsb && !error) {
980 xfs_fileoff_t s, e;
981
982 /*
983 * Determine space reservations for data/realtime.
984 */
985 if (unlikely(extsz)) {
986 s = startoffset_fsb;
987 do_div(s, extsz);
988 s *= extsz;
989 e = startoffset_fsb + allocatesize_fsb;
990 if ((temp = do_mod(startoffset_fsb, extsz)))
991 e += temp;
992 if ((temp = do_mod(e, extsz)))
993 e += extsz - temp;
994 } else {
995 s = 0;
996 e = allocatesize_fsb;
997 }
998
999 /*
1000 * The transaction reservation is limited to a 32-bit block
1001 * count, hence we need to limit the number of blocks we are
1002 * trying to reserve to avoid an overflow. We can't allocate
1003 * more than @nimaps extents, and an extent is limited on disk
1004 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1005 */
1006 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1007 if (unlikely(rt)) {
1008 resrtextents = qblocks = resblks;
1009 resrtextents /= mp->m_sb.sb_rextsize;
1010 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1011 quota_flag = XFS_QMOPT_RES_RTBLKS;
1012 } else {
1013 resrtextents = 0;
1014 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1015 quota_flag = XFS_QMOPT_RES_REGBLKS;
1016 }
1017
1018 /*
1019 * Allocate and setup the transaction.
1020 */
1021 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001022 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1023 resblks, resrtextents);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001024 /*
1025 * Check for running out of space
1026 */
1027 if (error) {
1028 /*
1029 * Free the transaction structure.
1030 */
1031 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1032 xfs_trans_cancel(tp, 0);
1033 break;
1034 }
1035 xfs_ilock(ip, XFS_ILOCK_EXCL);
1036 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1037 0, quota_flag);
1038 if (error)
1039 goto error1;
1040
1041 xfs_trans_ijoin(tp, ip, 0);
1042
1043 xfs_bmap_init(&free_list, &firstfsb);
1044 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1045 allocatesize_fsb, alloc_type, &firstfsb,
1046 0, imapp, &nimaps, &free_list);
1047 if (error) {
1048 goto error0;
1049 }
1050
1051 /*
1052 * Complete the transaction
1053 */
1054 error = xfs_bmap_finish(&tp, &free_list, &committed);
1055 if (error) {
1056 goto error0;
1057 }
1058
1059 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1060 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1061 if (error) {
1062 break;
1063 }
1064
1065 allocated_fsb = imapp->br_blockcount;
1066
1067 if (nimaps == 0) {
1068 error = XFS_ERROR(ENOSPC);
1069 break;
1070 }
1071
1072 startoffset_fsb += allocated_fsb;
1073 allocatesize_fsb -= allocated_fsb;
1074 }
1075
1076 return error;
1077
1078error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1079 xfs_bmap_cancel(&free_list);
1080 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1081
1082error1: /* Just cancel transaction */
1083 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1084 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1085 return error;
1086}
1087
1088/*
1089 * Zero file bytes between startoff and endoff inclusive.
1090 * The iolock is held exclusive and no blocks are buffered.
1091 *
1092 * This function is used by xfs_free_file_space() to zero
1093 * partial blocks when the range to free is not block aligned.
1094 * When unreserving space with boundaries that are not block
1095 * aligned we round up the start and round down the end
1096 * boundaries and then use this function to zero the parts of
1097 * the blocks that got dropped during the rounding.
1098 */
1099STATIC int
1100xfs_zero_remaining_bytes(
1101 xfs_inode_t *ip,
1102 xfs_off_t startoff,
1103 xfs_off_t endoff)
1104{
1105 xfs_bmbt_irec_t imap;
1106 xfs_fileoff_t offset_fsb;
1107 xfs_off_t lastoffset;
1108 xfs_off_t offset;
1109 xfs_buf_t *bp;
1110 xfs_mount_t *mp = ip->i_mount;
1111 int nimap;
1112 int error = 0;
1113
1114 /*
1115 * Avoid doing I/O beyond eof - it's not necessary
1116 * since nothing can read beyond eof. The space will
1117 * be zeroed when the file is extended anyway.
1118 */
1119 if (startoff >= XFS_ISIZE(ip))
1120 return 0;
1121
1122 if (endoff > XFS_ISIZE(ip))
1123 endoff = XFS_ISIZE(ip);
1124
1125 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
1126 mp->m_rtdev_targp : mp->m_ddev_targp,
1127 BTOBB(mp->m_sb.sb_blocksize), 0);
1128 if (!bp)
1129 return XFS_ERROR(ENOMEM);
1130
1131 xfs_buf_unlock(bp);
1132
1133 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
Christoph Hellwig4f317362013-12-06 12:30:12 -08001134 uint lock_mode;
1135
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001136 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1137 nimap = 1;
Christoph Hellwig4f317362013-12-06 12:30:12 -08001138
1139 lock_mode = xfs_ilock_data_map_shared(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001140 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
Christoph Hellwig4f317362013-12-06 12:30:12 -08001141 xfs_iunlock(ip, lock_mode);
1142
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001143 if (error || nimap < 1)
1144 break;
1145 ASSERT(imap.br_blockcount >= 1);
1146 ASSERT(imap.br_startoff == offset_fsb);
1147 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1148 if (lastoffset > endoff)
1149 lastoffset = endoff;
1150 if (imap.br_startblock == HOLESTARTBLOCK)
1151 continue;
1152 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1153 if (imap.br_state == XFS_EXT_UNWRITTEN)
1154 continue;
1155 XFS_BUF_UNDONE(bp);
1156 XFS_BUF_UNWRITE(bp);
1157 XFS_BUF_READ(bp);
1158 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001159
1160 if (XFS_FORCED_SHUTDOWN(mp)) {
1161 error = XFS_ERROR(EIO);
1162 break;
1163 }
1164 xfs_buf_iorequest(bp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001165 error = xfs_buf_iowait(bp);
1166 if (error) {
1167 xfs_buf_ioerror_alert(bp,
1168 "xfs_zero_remaining_bytes(read)");
1169 break;
1170 }
1171 memset(bp->b_addr +
1172 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1173 0, lastoffset - offset + 1);
1174 XFS_BUF_UNDONE(bp);
1175 XFS_BUF_UNREAD(bp);
1176 XFS_BUF_WRITE(bp);
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001177
1178 if (XFS_FORCED_SHUTDOWN(mp)) {
1179 error = XFS_ERROR(EIO);
1180 break;
1181 }
1182 xfs_buf_iorequest(bp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001183 error = xfs_buf_iowait(bp);
1184 if (error) {
1185 xfs_buf_ioerror_alert(bp,
1186 "xfs_zero_remaining_bytes(write)");
1187 break;
1188 }
1189 }
1190 xfs_buf_free(bp);
1191 return error;
1192}
1193
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001194int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001195xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001196 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001197 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001198 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001199{
1200 int committed;
1201 int done;
1202 xfs_fileoff_t endoffset_fsb;
1203 int error;
1204 xfs_fsblock_t firstfsb;
1205 xfs_bmap_free_t free_list;
1206 xfs_bmbt_irec_t imap;
1207 xfs_off_t ioffset;
1208 xfs_extlen_t mod=0;
1209 xfs_mount_t *mp;
1210 int nimap;
1211 uint resblks;
1212 xfs_off_t rounding;
1213 int rt;
1214 xfs_fileoff_t startoffset_fsb;
1215 xfs_trans_t *tp;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001216
1217 mp = ip->i_mount;
1218
1219 trace_xfs_free_file_space(ip);
1220
1221 error = xfs_qm_dqattach(ip, 0);
1222 if (error)
1223 return error;
1224
1225 error = 0;
1226 if (len <= 0) /* if nothing being freed */
1227 return error;
1228 rt = XFS_IS_REALTIME_INODE(ip);
1229 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1230 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1231
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001232 /* wait for the completion of any pending DIOs */
1233 inode_dio_wait(VFS_I(ip));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001234
1235 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1236 ioffset = offset & ~(rounding - 1);
1237 error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1238 ioffset, -1);
1239 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001240 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001241 truncate_pagecache_range(VFS_I(ip), ioffset, -1);
1242
1243 /*
1244 * Need to zero the stuff we're not freeing, on disk.
1245 * If it's a realtime file & can't use unwritten extents then we
1246 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1247 * will take care of it for us.
1248 */
1249 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1250 nimap = 1;
1251 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1252 &imap, &nimap, 0);
1253 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001254 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001255 ASSERT(nimap == 0 || nimap == 1);
1256 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1257 xfs_daddr_t block;
1258
1259 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1260 block = imap.br_startblock;
1261 mod = do_div(block, mp->m_sb.sb_rextsize);
1262 if (mod)
1263 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1264 }
1265 nimap = 1;
1266 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1267 &imap, &nimap, 0);
1268 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001269 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001270 ASSERT(nimap == 0 || nimap == 1);
1271 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1272 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1273 mod++;
1274 if (mod && (mod != mp->m_sb.sb_rextsize))
1275 endoffset_fsb -= mod;
1276 }
1277 }
1278 if ((done = (endoffset_fsb <= startoffset_fsb)))
1279 /*
1280 * One contiguous piece to clear
1281 */
1282 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1283 else {
1284 /*
1285 * Some full blocks, possibly two pieces to clear
1286 */
1287 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1288 error = xfs_zero_remaining_bytes(ip, offset,
1289 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1290 if (!error &&
1291 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1292 error = xfs_zero_remaining_bytes(ip,
1293 XFS_FSB_TO_B(mp, endoffset_fsb),
1294 offset + len - 1);
1295 }
1296
1297 /*
1298 * free file space until done or until there is an error
1299 */
1300 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1301 while (!error && !done) {
1302
1303 /*
1304 * allocate and setup the transaction. Allow this
1305 * transaction to dip into the reserve blocks to ensure
1306 * the freeing of the space succeeds at ENOSPC.
1307 */
1308 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001309 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001310
1311 /*
1312 * check for running out of space
1313 */
1314 if (error) {
1315 /*
1316 * Free the transaction structure.
1317 */
1318 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1319 xfs_trans_cancel(tp, 0);
1320 break;
1321 }
1322 xfs_ilock(ip, XFS_ILOCK_EXCL);
1323 error = xfs_trans_reserve_quota(tp, mp,
1324 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1325 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1326 if (error)
1327 goto error1;
1328
1329 xfs_trans_ijoin(tp, ip, 0);
1330
1331 /*
1332 * issue the bunmapi() call to free the blocks
1333 */
1334 xfs_bmap_init(&free_list, &firstfsb);
1335 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1336 endoffset_fsb - startoffset_fsb,
1337 0, 2, &firstfsb, &free_list, &done);
1338 if (error) {
1339 goto error0;
1340 }
1341
1342 /*
1343 * complete the transaction
1344 */
1345 error = xfs_bmap_finish(&tp, &free_list, &committed);
1346 if (error) {
1347 goto error0;
1348 }
1349
1350 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1351 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1352 }
1353
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001354 out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001355 return error;
1356
1357 error0:
1358 xfs_bmap_cancel(&free_list);
1359 error1:
1360 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001361 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1362 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001363}
1364
1365
Christoph Hellwig865e9442013-10-12 00:55:08 -07001366int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001367xfs_zero_file_space(
1368 struct xfs_inode *ip,
1369 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001370 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001371{
1372 struct xfs_mount *mp = ip->i_mount;
1373 uint granularity;
1374 xfs_off_t start_boundary;
1375 xfs_off_t end_boundary;
1376 int error;
1377
Dave Chinner897b73b2014-04-14 18:15:11 +10001378 trace_xfs_zero_file_space(ip);
1379
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001380 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1381
1382 /*
1383 * Round the range of extents we are going to convert inwards. If the
1384 * offset is aligned, then it doesn't get changed so we zero from the
1385 * start of the block offset points to.
1386 */
1387 start_boundary = round_up(offset, granularity);
1388 end_boundary = round_down(offset + len, granularity);
1389
1390 ASSERT(start_boundary >= offset);
1391 ASSERT(end_boundary <= offset + len);
1392
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001393 if (start_boundary < end_boundary - 1) {
Dave Chinner897b73b2014-04-14 18:15:11 +10001394 /*
1395 * punch out delayed allocation blocks and the page cache over
1396 * the conversion range
1397 */
1398 xfs_ilock(ip, XFS_ILOCK_EXCL);
1399 error = xfs_bmap_punch_delalloc_range(ip,
1400 XFS_B_TO_FSBT(mp, start_boundary),
1401 XFS_B_TO_FSB(mp, end_boundary - start_boundary));
1402 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001403 truncate_pagecache_range(VFS_I(ip), start_boundary,
1404 end_boundary - 1);
Dave Chinner897b73b2014-04-14 18:15:11 +10001405
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001406 /* convert the blocks */
1407 error = xfs_alloc_file_space(ip, start_boundary,
1408 end_boundary - start_boundary - 1,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001409 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001410 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001411 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001412
1413 /* We've handled the interior of the range, now for the edges */
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001414 if (start_boundary != offset) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001415 error = xfs_iozero(ip, offset, start_boundary - offset);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001416 if (error)
1417 goto out;
1418 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001419
1420 if (end_boundary != offset + len)
1421 error = xfs_iozero(ip, end_boundary,
1422 offset + len - end_boundary);
1423
1424 } else {
1425 /*
1426 * It's either a sub-granularity range or the range spanned lies
1427 * partially across two adjacent blocks.
1428 */
1429 error = xfs_iozero(ip, offset, len);
1430 }
1431
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001432out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001433 return error;
1434
1435}
1436
1437/*
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001438 * xfs_collapse_file_space()
1439 * This routine frees disk space and shift extent for the given file.
1440 * The first thing we do is to free data blocks in the specified range
1441 * by calling xfs_free_file_space(). It would also sync dirty data
1442 * and invalidate page cache over the region on which collapse range
1443 * is working. And Shift extent records to the left to cover a hole.
1444 * RETURNS:
1445 * 0 on success
1446 * errno on error
1447 *
1448 */
1449int
1450xfs_collapse_file_space(
1451 struct xfs_inode *ip,
1452 xfs_off_t offset,
1453 xfs_off_t len)
1454{
1455 int done = 0;
1456 struct xfs_mount *mp = ip->i_mount;
1457 struct xfs_trans *tp;
1458 int error;
1459 xfs_extnum_t current_ext = 0;
1460 struct xfs_bmap_free free_list;
1461 xfs_fsblock_t first_block;
1462 int committed;
1463 xfs_fileoff_t start_fsb;
1464 xfs_fileoff_t shift_fsb;
1465
1466 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1467
1468 trace_xfs_collapse_file_space(ip);
1469
1470 start_fsb = XFS_B_TO_FSB(mp, offset + len);
1471 shift_fsb = XFS_B_TO_FSB(mp, len);
1472
1473 error = xfs_free_file_space(ip, offset, len);
1474 if (error)
1475 return error;
1476
1477 while (!error && !done) {
1478 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001479 /*
1480 * We would need to reserve permanent block for transaction.
1481 * This will come into picture when after shifting extent into
1482 * hole we found that adjacent extents can be merged which
1483 * may lead to freeing of a block during record update.
1484 */
1485 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1486 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1487 if (error) {
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001488 xfs_trans_cancel(tp, 0);
1489 break;
1490 }
1491
1492 xfs_ilock(ip, XFS_ILOCK_EXCL);
1493 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1494 ip->i_gdquot, ip->i_pdquot,
1495 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1496 XFS_QMOPT_RES_REGBLKS);
1497 if (error)
1498 goto out;
1499
1500 xfs_trans_ijoin(tp, ip, 0);
1501
1502 xfs_bmap_init(&free_list, &first_block);
1503
1504 /*
1505 * We are using the write transaction in which max 2 bmbt
1506 * updates are allowed
1507 */
1508 error = xfs_bmap_shift_extents(tp, ip, &done, start_fsb,
1509 shift_fsb, &current_ext,
1510 &first_block, &free_list,
1511 XFS_BMAP_MAX_SHIFT_EXTENTS);
1512 if (error)
1513 goto out;
1514
1515 error = xfs_bmap_finish(&tp, &free_list, &committed);
1516 if (error)
1517 goto out;
1518
1519 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1520 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1521 }
1522
1523 return error;
1524
1525out:
1526 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1527 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1528 return error;
1529}
1530
1531/*
Dave Chinnera133d952013-08-12 20:49:48 +10001532 * We need to check that the format of the data fork in the temporary inode is
1533 * valid for the target inode before doing the swap. This is not a problem with
1534 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1535 * data fork depending on the space the attribute fork is taking so we can get
1536 * invalid formats on the target inode.
1537 *
1538 * E.g. target has space for 7 extents in extent format, temp inode only has
1539 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1540 * btree, but when swapped it needs to be in extent format. Hence we can't just
1541 * blindly swap data forks on attr2 filesystems.
1542 *
1543 * Note that we check the swap in both directions so that we don't end up with
1544 * a corrupt temporary inode, either.
1545 *
1546 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1547 * inode will prevent this situation from occurring, so all we do here is
1548 * reject and log the attempt. basically we are putting the responsibility on
1549 * userspace to get this right.
1550 */
1551static int
1552xfs_swap_extents_check_format(
1553 xfs_inode_t *ip, /* target inode */
1554 xfs_inode_t *tip) /* tmp inode */
1555{
1556
1557 /* Should never get a local format */
1558 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1559 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1560 return EINVAL;
1561
1562 /*
1563 * if the target inode has less extents that then temporary inode then
1564 * why did userspace call us?
1565 */
1566 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1567 return EINVAL;
1568
1569 /*
1570 * if the target inode is in extent form and the temp inode is in btree
1571 * form then we will end up with the target inode in the wrong format
1572 * as we already know there are less extents in the temp inode.
1573 */
1574 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1575 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1576 return EINVAL;
1577
1578 /* Check temp in extent form to max in target */
1579 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1580 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1581 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1582 return EINVAL;
1583
1584 /* Check target in extent form to max in temp */
1585 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1586 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1587 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1588 return EINVAL;
1589
1590 /*
1591 * If we are in a btree format, check that the temp root block will fit
1592 * in the target and that it has enough extents to be in btree format
1593 * in the target.
1594 *
1595 * Note that we have to be careful to allow btree->extent conversions
1596 * (a common defrag case) which will occur when the temp inode is in
1597 * extent format...
1598 */
1599 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1600 if (XFS_IFORK_BOFF(ip) &&
1601 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1602 return EINVAL;
1603 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1604 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1605 return EINVAL;
1606 }
1607
1608 /* Reciprocal target->temp btree format checks */
1609 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1610 if (XFS_IFORK_BOFF(tip) &&
1611 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1612 return EINVAL;
1613 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1614 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1615 return EINVAL;
1616 }
1617
1618 return 0;
1619}
1620
1621int
1622xfs_swap_extents(
1623 xfs_inode_t *ip, /* target inode */
1624 xfs_inode_t *tip, /* tmp inode */
1625 xfs_swapext_t *sxp)
1626{
1627 xfs_mount_t *mp = ip->i_mount;
1628 xfs_trans_t *tp;
1629 xfs_bstat_t *sbp = &sxp->sx_stat;
1630 xfs_ifork_t *tempifp, *ifp, *tifp;
1631 int src_log_flags, target_log_flags;
1632 int error = 0;
1633 int aforkblks = 0;
1634 int taforkblks = 0;
1635 __uint64_t tmp;
1636
Dave Chinnera133d952013-08-12 20:49:48 +10001637 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1638 if (!tempifp) {
1639 error = XFS_ERROR(ENOMEM);
1640 goto out;
1641 }
1642
1643 /*
1644 * we have to do two separate lock calls here to keep lockdep
1645 * happy. If we try to get all the locks in one call, lock will
1646 * report false positives when we drop the ILOCK and regain them
1647 * below.
1648 */
1649 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1650 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1651
1652 /* Verify that both files have the same format */
1653 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1654 error = XFS_ERROR(EINVAL);
1655 goto out_unlock;
1656 }
1657
1658 /* Verify both files are either real-time or non-realtime */
1659 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1660 error = XFS_ERROR(EINVAL);
1661 goto out_unlock;
1662 }
1663
1664 error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
1665 if (error)
1666 goto out_unlock;
1667 truncate_pagecache_range(VFS_I(tip), 0, -1);
1668
1669 /* Verify O_DIRECT for ftmp */
1670 if (VN_CACHED(VFS_I(tip)) != 0) {
1671 error = XFS_ERROR(EINVAL);
1672 goto out_unlock;
1673 }
1674
1675 /* Verify all data are being swapped */
1676 if (sxp->sx_offset != 0 ||
1677 sxp->sx_length != ip->i_d.di_size ||
1678 sxp->sx_length != tip->i_d.di_size) {
1679 error = XFS_ERROR(EFAULT);
1680 goto out_unlock;
1681 }
1682
1683 trace_xfs_swap_extent_before(ip, 0);
1684 trace_xfs_swap_extent_before(tip, 1);
1685
1686 /* check inode formats now that data is flushed */
1687 error = xfs_swap_extents_check_format(ip, tip);
1688 if (error) {
1689 xfs_notice(mp,
1690 "%s: inode 0x%llx format is incompatible for exchanging.",
1691 __func__, ip->i_ino);
1692 goto out_unlock;
1693 }
1694
1695 /*
1696 * Compare the current change & modify times with that
1697 * passed in. If they differ, we abort this swap.
1698 * This is the mechanism used to ensure the calling
1699 * process that the file was not changed out from
1700 * under it.
1701 */
1702 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1703 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1704 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1705 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1706 error = XFS_ERROR(EBUSY);
1707 goto out_unlock;
1708 }
1709
1710 /* We need to fail if the file is memory mapped. Once we have tossed
1711 * all existing pages, the page fault will have no option
1712 * but to go to the filesystem for pages. By making the page fault call
1713 * vop_read (or write in the case of autogrow) they block on the iolock
1714 * until we have switched the extents.
1715 */
1716 if (VN_MAPPED(VFS_I(ip))) {
1717 error = XFS_ERROR(EBUSY);
1718 goto out_unlock;
1719 }
1720
1721 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1722 xfs_iunlock(tip, XFS_ILOCK_EXCL);
1723
1724 /*
1725 * There is a race condition here since we gave up the
1726 * ilock. However, the data fork will not change since
1727 * we have the iolock (locked for truncation too) so we
1728 * are safe. We don't really care if non-io related
1729 * fields change.
1730 */
1731 truncate_pagecache_range(VFS_I(ip), 0, -1);
1732
1733 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001734 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1735 if (error) {
Dave Chinnera133d952013-08-12 20:49:48 +10001736 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1737 xfs_iunlock(tip, XFS_IOLOCK_EXCL);
1738 xfs_trans_cancel(tp, 0);
1739 goto out;
1740 }
1741 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1742
1743 /*
1744 * Count the number of extended attribute blocks
1745 */
1746 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1747 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1748 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1749 if (error)
1750 goto out_trans_cancel;
1751 }
1752 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1753 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1754 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1755 &taforkblks);
1756 if (error)
1757 goto out_trans_cancel;
1758 }
1759
Dave Chinner21b5c972013-08-30 10:23:44 +10001760 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1761 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1762
1763 /*
1764 * Before we've swapped the forks, lets set the owners of the forks
1765 * appropriately. We have to do this as we are demand paging the btree
1766 * buffers, and so the validation done on read will expect the owner
1767 * field to be correctly set. Once we change the owners, we can swap the
1768 * inode forks.
1769 *
1770 * Note the trickiness in setting the log flags - we set the owner log
1771 * flag on the opposite inode (i.e. the inode we are setting the new
1772 * owner to be) because once we swap the forks and log that, log
1773 * recovery is going to see the fork as owned by the swapped inode,
1774 * not the pre-swapped inodes.
1775 */
1776 src_log_flags = XFS_ILOG_CORE;
1777 target_log_flags = XFS_ILOG_CORE;
1778 if (ip->i_d.di_version == 3 &&
1779 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001780 target_log_flags |= XFS_ILOG_DOWNER;
1781 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1782 tip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001783 if (error)
1784 goto out_trans_cancel;
1785 }
1786
1787 if (tip->i_d.di_version == 3 &&
1788 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001789 src_log_flags |= XFS_ILOG_DOWNER;
1790 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1791 ip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001792 if (error)
1793 goto out_trans_cancel;
1794 }
1795
Dave Chinnera133d952013-08-12 20:49:48 +10001796 /*
1797 * Swap the data forks of the inodes
1798 */
1799 ifp = &ip->i_df;
1800 tifp = &tip->i_df;
1801 *tempifp = *ifp; /* struct copy */
1802 *ifp = *tifp; /* struct copy */
1803 *tifp = *tempifp; /* struct copy */
1804
1805 /*
1806 * Fix the on-disk inode values
1807 */
1808 tmp = (__uint64_t)ip->i_d.di_nblocks;
1809 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1810 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1811
1812 tmp = (__uint64_t) ip->i_d.di_nextents;
1813 ip->i_d.di_nextents = tip->i_d.di_nextents;
1814 tip->i_d.di_nextents = tmp;
1815
1816 tmp = (__uint64_t) ip->i_d.di_format;
1817 ip->i_d.di_format = tip->i_d.di_format;
1818 tip->i_d.di_format = tmp;
1819
1820 /*
1821 * The extents in the source inode could still contain speculative
1822 * preallocation beyond EOF (e.g. the file is open but not modified
1823 * while defrag is in progress). In that case, we need to copy over the
1824 * number of delalloc blocks the data fork in the source inode is
1825 * tracking beyond EOF so that when the fork is truncated away when the
1826 * temporary inode is unlinked we don't underrun the i_delayed_blks
1827 * counter on that inode.
1828 */
1829 ASSERT(tip->i_delayed_blks == 0);
1830 tip->i_delayed_blks = ip->i_delayed_blks;
1831 ip->i_delayed_blks = 0;
1832
Dave Chinnera133d952013-08-12 20:49:48 +10001833 switch (ip->i_d.di_format) {
1834 case XFS_DINODE_FMT_EXTENTS:
1835 /* If the extents fit in the inode, fix the
1836 * pointer. Otherwise it's already NULL or
1837 * pointing to the extent.
1838 */
1839 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1840 ifp->if_u1.if_extents =
1841 ifp->if_u2.if_inline_ext;
1842 }
1843 src_log_flags |= XFS_ILOG_DEXT;
1844 break;
1845 case XFS_DINODE_FMT_BTREE:
Dave Chinner21b5c972013-08-30 10:23:44 +10001846 ASSERT(ip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001847 (src_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001848 src_log_flags |= XFS_ILOG_DBROOT;
1849 break;
1850 }
1851
Dave Chinnera133d952013-08-12 20:49:48 +10001852 switch (tip->i_d.di_format) {
1853 case XFS_DINODE_FMT_EXTENTS:
1854 /* If the extents fit in the inode, fix the
1855 * pointer. Otherwise it's already NULL or
1856 * pointing to the extent.
1857 */
1858 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1859 tifp->if_u1.if_extents =
1860 tifp->if_u2.if_inline_ext;
1861 }
1862 target_log_flags |= XFS_ILOG_DEXT;
1863 break;
1864 case XFS_DINODE_FMT_BTREE:
1865 target_log_flags |= XFS_ILOG_DBROOT;
Dave Chinner21b5c972013-08-30 10:23:44 +10001866 ASSERT(tip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001867 (target_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001868 break;
1869 }
1870
Dave Chinnera133d952013-08-12 20:49:48 +10001871 xfs_trans_log_inode(tp, ip, src_log_flags);
1872 xfs_trans_log_inode(tp, tip, target_log_flags);
1873
1874 /*
1875 * If this is a synchronous mount, make sure that the
1876 * transaction goes to disk before returning to the user.
1877 */
1878 if (mp->m_flags & XFS_MOUNT_WSYNC)
1879 xfs_trans_set_sync(tp);
1880
1881 error = xfs_trans_commit(tp, 0);
1882
1883 trace_xfs_swap_extent_after(ip, 0);
1884 trace_xfs_swap_extent_after(tip, 1);
1885out:
1886 kmem_free(tempifp);
1887 return error;
1888
1889out_unlock:
1890 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1891 xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1892 goto out;
1893
1894out_trans_cancel:
1895 xfs_trans_cancel(tp, 0);
1896 goto out_unlock;
1897}