blob: 48fd7ea65b1ef7aa5697d19a2ad7e2b72b27b9b9 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
Dave Chinner70a9883c2013-10-23 10:36:05 +110021#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100026#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110029#include "xfs_da_format.h"
Dave Chinner68988112013-08-12 20:49:42 +100030#include "xfs_inode.h"
31#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110032#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100033#include "xfs_extfree_item.h"
34#include "xfs_alloc.h"
35#include "xfs_bmap.h"
36#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110037#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100038#include "xfs_rtalloc.h"
39#include "xfs_error.h"
40#include "xfs_quota.h"
41#include "xfs_trans_space.h"
42#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100043#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110044#include "xfs_log.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110045#include "xfs_dinode.h"
Dave Chinner68988112013-08-12 20:49:42 +100046
47/* Kernel only BMAP related definitions and functions */
48
49/*
50 * Convert the given file system block to a disk block. We have to treat it
51 * differently based on whether the file is a real time file or not, because the
52 * bmap code does.
53 */
54xfs_daddr_t
55xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
56{
57 return (XFS_IS_REALTIME_INODE(ip) ? \
58 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
59 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
60}
61
62/*
63 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
64 * caller. Frees all the extents that need freeing, which must be done
65 * last due to locking considerations. We never free any extents in
66 * the first transaction.
67 *
68 * Return 1 if the given transaction was committed and a new one
69 * started, and 0 otherwise in the committed parameter.
70 */
71int /* error */
72xfs_bmap_finish(
73 xfs_trans_t **tp, /* transaction pointer addr */
74 xfs_bmap_free_t *flist, /* i/o: list extents to free */
75 int *committed) /* xact committed or not */
76{
77 xfs_efd_log_item_t *efd; /* extent free data */
78 xfs_efi_log_item_t *efi; /* extent free intention */
79 int error; /* error return value */
80 xfs_bmap_free_item_t *free; /* free extent item */
Jie Liu3d3c8b52013-08-12 20:49:59 +100081 struct xfs_trans_res tres; /* new log reservation */
Dave Chinner68988112013-08-12 20:49:42 +100082 xfs_mount_t *mp; /* filesystem mount structure */
83 xfs_bmap_free_item_t *next; /* next item on free list */
84 xfs_trans_t *ntp; /* new transaction pointer */
85
86 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
87 if (flist->xbf_count == 0) {
88 *committed = 0;
89 return 0;
90 }
91 ntp = *tp;
92 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
93 for (free = flist->xbf_first; free; free = free->xbfi_next)
94 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
95 free->xbfi_blockcount);
Jie Liu3d3c8b52013-08-12 20:49:59 +100096
97 tres.tr_logres = ntp->t_log_res;
98 tres.tr_logcount = ntp->t_log_count;
99 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
Dave Chinner68988112013-08-12 20:49:42 +1000100 ntp = xfs_trans_dup(*tp);
101 error = xfs_trans_commit(*tp, 0);
102 *tp = ntp;
103 *committed = 1;
104 /*
105 * We have a new transaction, so we should return committed=1,
106 * even though we're returning an error.
107 */
108 if (error)
109 return error;
110
111 /*
112 * transaction commit worked ok so we can drop the extra ticket
113 * reference that we gained in xfs_trans_dup()
114 */
115 xfs_log_ticket_put(ntp->t_ticket);
116
Jie Liu3d3c8b52013-08-12 20:49:59 +1000117 error = xfs_trans_reserve(ntp, &tres, 0, 0);
118 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000119 return error;
120 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
121 for (free = flist->xbf_first; free != NULL; free = next) {
122 next = free->xbfi_next;
123 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
124 free->xbfi_blockcount))) {
125 /*
126 * The bmap free list will be cleaned up at a
127 * higher level. The EFI will be canceled when
128 * this transaction is aborted.
129 * Need to force shutdown here to make sure it
130 * happens, since this transaction may not be
131 * dirty yet.
132 */
133 mp = ntp->t_mountp;
134 if (!XFS_FORCED_SHUTDOWN(mp))
135 xfs_force_shutdown(mp,
Dave Chinner24513372014-06-25 14:58:08 +1000136 (error == -EFSCORRUPTED) ?
Dave Chinner68988112013-08-12 20:49:42 +1000137 SHUTDOWN_CORRUPT_INCORE :
138 SHUTDOWN_META_IO_ERROR);
139 return error;
140 }
141 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
142 free->xbfi_blockcount);
143 xfs_bmap_del_free(flist, NULL, free);
144 }
145 return 0;
146}
147
148int
149xfs_bmap_rtalloc(
150 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
151{
152 xfs_alloctype_t atype = 0; /* type for allocation routines */
153 int error; /* error return value */
154 xfs_mount_t *mp; /* mount point structure */
155 xfs_extlen_t prod = 0; /* product factor for allocators */
156 xfs_extlen_t ralen = 0; /* realtime allocation length */
157 xfs_extlen_t align; /* minimum allocation alignment */
158 xfs_rtblock_t rtb;
159
160 mp = ap->ip->i_mount;
161 align = xfs_get_extsz_hint(ap->ip);
162 prod = align / mp->m_sb.sb_rextsize;
163 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
164 align, 1, ap->eof, 0,
165 ap->conv, &ap->offset, &ap->length);
166 if (error)
167 return error;
168 ASSERT(ap->length);
169 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
170
171 /*
172 * If the offset & length are not perfectly aligned
173 * then kill prod, it will just get us in trouble.
174 */
175 if (do_mod(ap->offset, align) || ap->length % align)
176 prod = 1;
177 /*
178 * Set ralen to be the actual requested length in rtextents.
179 */
180 ralen = ap->length / mp->m_sb.sb_rextsize;
181 /*
182 * If the old value was close enough to MAXEXTLEN that
183 * we rounded up to it, cut it back so it's valid again.
184 * Note that if it's a really large request (bigger than
185 * MAXEXTLEN), we don't hear about that number, and can't
186 * adjust the starting point to match it.
187 */
188 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
189 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
190
191 /*
192 * Lock out other modifications to the RT bitmap inode.
193 */
194 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
195 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
196
197 /*
198 * If it's an allocation to an empty file at offset 0,
199 * pick an extent that will space things out in the rt area.
200 */
201 if (ap->eof && ap->offset == 0) {
202 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
203
204 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
205 if (error)
206 return error;
207 ap->blkno = rtx * mp->m_sb.sb_rextsize;
208 } else {
209 ap->blkno = 0;
210 }
211
212 xfs_bmap_adjacent(ap);
213
214 /*
215 * Realtime allocation, done through xfs_rtallocate_extent.
216 */
217 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
218 do_div(ap->blkno, mp->m_sb.sb_rextsize);
219 rtb = ap->blkno;
220 ap->length = ralen;
221 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
222 &ralen, atype, ap->wasdel, prod, &rtb)))
223 return error;
224 if (rtb == NULLFSBLOCK && prod > 1 &&
225 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
226 ap->length, &ralen, atype,
227 ap->wasdel, 1, &rtb)))
228 return error;
229 ap->blkno = rtb;
230 if (ap->blkno != NULLFSBLOCK) {
231 ap->blkno *= mp->m_sb.sb_rextsize;
232 ralen *= mp->m_sb.sb_rextsize;
233 ap->length = ralen;
234 ap->ip->i_d.di_nblocks += ralen;
235 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
236 if (ap->wasdel)
237 ap->ip->i_delayed_blks -= ralen;
238 /*
239 * Adjust the disk quota also. This was reserved
240 * earlier.
241 */
242 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
243 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
244 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
245 } else {
246 ap->length = 0;
247 }
248 return 0;
249}
250
251/*
252 * Stack switching interfaces for allocation
253 */
254static void
255xfs_bmapi_allocate_worker(
256 struct work_struct *work)
257{
258 struct xfs_bmalloca *args = container_of(work,
259 struct xfs_bmalloca, work);
260 unsigned long pflags;
Dave Chinner1f6d6482014-06-06 15:59:59 +1000261 unsigned long new_pflags = PF_FSTRANS;
Dave Chinner68988112013-08-12 20:49:42 +1000262
Dave Chinner1f6d6482014-06-06 15:59:59 +1000263 /*
264 * we are in a transaction context here, but may also be doing work
265 * in kswapd context, and hence we may need to inherit that state
266 * temporarily to ensure that we don't block waiting for memory reclaim
267 * in any way.
268 */
269 if (args->kswapd)
270 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
271
272 current_set_flags_nested(&pflags, new_pflags);
Dave Chinner68988112013-08-12 20:49:42 +1000273
274 args->result = __xfs_bmapi_allocate(args);
275 complete(args->done);
276
Dave Chinner1f6d6482014-06-06 15:59:59 +1000277 current_restore_flags_nested(&pflags, new_pflags);
Dave Chinner68988112013-08-12 20:49:42 +1000278}
279
280/*
281 * Some allocation requests often come in with little stack to work on. Push
282 * them off to a worker thread so there is lots of stack to use. Otherwise just
283 * call directly to avoid the context switch overhead here.
284 */
285int
286xfs_bmapi_allocate(
287 struct xfs_bmalloca *args)
288{
289 DECLARE_COMPLETION_ONSTACK(done);
290
291 if (!args->stack_switch)
292 return __xfs_bmapi_allocate(args);
293
294
295 args->done = &done;
Dave Chinner1f6d6482014-06-06 15:59:59 +1000296 args->kswapd = current_is_kswapd();
Dave Chinner68988112013-08-12 20:49:42 +1000297 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
298 queue_work(xfs_alloc_wq, &args->work);
299 wait_for_completion(&done);
Chuansheng Liu6f96b302014-01-07 16:53:34 +0800300 destroy_work_on_stack(&args->work);
Dave Chinner68988112013-08-12 20:49:42 +1000301 return args->result;
302}
303
304/*
305 * Check if the endoff is outside the last extent. If so the caller will grow
306 * the allocation to a stripe unit boundary. All offsets are considered outside
307 * the end of file for an empty fork, so 1 is returned in *eof in that case.
308 */
309int
310xfs_bmap_eof(
311 struct xfs_inode *ip,
312 xfs_fileoff_t endoff,
313 int whichfork,
314 int *eof)
315{
316 struct xfs_bmbt_irec rec;
317 int error;
318
319 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
320 if (error || *eof)
321 return error;
322
323 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
324 return 0;
325}
326
327/*
328 * Extent tree block counting routines.
329 */
330
331/*
332 * Count leaf blocks given a range of extent records.
333 */
334STATIC void
335xfs_bmap_count_leaves(
336 xfs_ifork_t *ifp,
337 xfs_extnum_t idx,
338 int numrecs,
339 int *count)
340{
341 int b;
342
343 for (b = 0; b < numrecs; b++) {
344 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
345 *count += xfs_bmbt_get_blockcount(frp);
346 }
347}
348
349/*
350 * Count leaf blocks given a range of extent records originally
351 * in btree format.
352 */
353STATIC void
354xfs_bmap_disk_count_leaves(
355 struct xfs_mount *mp,
356 struct xfs_btree_block *block,
357 int numrecs,
358 int *count)
359{
360 int b;
361 xfs_bmbt_rec_t *frp;
362
363 for (b = 1; b <= numrecs; b++) {
364 frp = XFS_BMBT_REC_ADDR(mp, block, b);
365 *count += xfs_bmbt_disk_get_blockcount(frp);
366 }
367}
368
369/*
370 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000371 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000372 */
373STATIC int /* error */
374xfs_bmap_count_tree(
375 xfs_mount_t *mp, /* file system mount point */
376 xfs_trans_t *tp, /* transaction pointer */
377 xfs_ifork_t *ifp, /* inode fork pointer */
378 xfs_fsblock_t blockno, /* file system block number */
379 int levelin, /* level in btree */
380 int *count) /* Count of blocks */
381{
382 int error;
383 xfs_buf_t *bp, *nbp;
384 int level = levelin;
385 __be64 *pp;
386 xfs_fsblock_t bno = blockno;
387 xfs_fsblock_t nextbno;
388 struct xfs_btree_block *block, *nextblock;
389 int numrecs;
390
391 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
392 &xfs_bmbt_buf_ops);
393 if (error)
394 return error;
395 *count += 1;
396 block = XFS_BUF_TO_BLOCK(bp);
397
398 if (--level) {
399 /* Not at node above leaves, count this level of nodes */
400 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
401 while (nextbno != NULLFSBLOCK) {
402 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
403 XFS_BMAP_BTREE_REF,
404 &xfs_bmbt_buf_ops);
405 if (error)
406 return error;
407 *count += 1;
408 nextblock = XFS_BUF_TO_BLOCK(nbp);
409 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
410 xfs_trans_brelse(tp, nbp);
411 }
412
413 /* Dive to the next level */
414 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
415 bno = be64_to_cpu(*pp);
416 if (unlikely((error =
417 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
418 xfs_trans_brelse(tp, bp);
419 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
420 XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000421 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000422 }
423 xfs_trans_brelse(tp, bp);
424 } else {
425 /* count all level 1 nodes and their leaves */
426 for (;;) {
427 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
428 numrecs = be16_to_cpu(block->bb_numrecs);
429 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
430 xfs_trans_brelse(tp, bp);
431 if (nextbno == NULLFSBLOCK)
432 break;
433 bno = nextbno;
434 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
435 XFS_BMAP_BTREE_REF,
436 &xfs_bmbt_buf_ops);
437 if (error)
438 return error;
439 *count += 1;
440 block = XFS_BUF_TO_BLOCK(bp);
441 }
442 }
443 return 0;
444}
445
446/*
447 * Count fsblocks of the given fork.
448 */
449int /* error */
450xfs_bmap_count_blocks(
451 xfs_trans_t *tp, /* transaction pointer */
452 xfs_inode_t *ip, /* incore inode */
453 int whichfork, /* data or attr fork */
454 int *count) /* out: count of blocks */
455{
456 struct xfs_btree_block *block; /* current btree block */
457 xfs_fsblock_t bno; /* block # of "block" */
458 xfs_ifork_t *ifp; /* fork structure */
459 int level; /* btree level, for checking */
460 xfs_mount_t *mp; /* file system mount structure */
461 __be64 *pp; /* pointer to block address */
462
463 bno = NULLFSBLOCK;
464 mp = ip->i_mount;
465 ifp = XFS_IFORK_PTR(ip, whichfork);
466 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
467 xfs_bmap_count_leaves(ifp, 0,
468 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
469 count);
470 return 0;
471 }
472
473 /*
474 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
475 */
476 block = ifp->if_broot;
477 level = be16_to_cpu(block->bb_level);
478 ASSERT(level > 0);
479 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
480 bno = be64_to_cpu(*pp);
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000481 ASSERT(bno != NULLFSBLOCK);
Dave Chinner68988112013-08-12 20:49:42 +1000482 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
483 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
484
485 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
486 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
487 mp);
Dave Chinner24513372014-06-25 14:58:08 +1000488 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000489 }
490
491 return 0;
492}
493
494/*
495 * returns 1 for success, 0 if we failed to map the extent.
496 */
497STATIC int
498xfs_getbmapx_fix_eof_hole(
499 xfs_inode_t *ip, /* xfs incore inode pointer */
500 struct getbmapx *out, /* output structure */
501 int prealloced, /* this is a file with
502 * preallocated data space */
503 __int64_t end, /* last block requested */
504 xfs_fsblock_t startblock)
505{
506 __int64_t fixlen;
507 xfs_mount_t *mp; /* file system mount point */
508 xfs_ifork_t *ifp; /* inode fork pointer */
509 xfs_extnum_t lastx; /* last extent pointer */
510 xfs_fileoff_t fileblock;
511
512 if (startblock == HOLESTARTBLOCK) {
513 mp = ip->i_mount;
514 out->bmv_block = -1;
515 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
516 fixlen -= out->bmv_offset;
517 if (prealloced && out->bmv_offset + out->bmv_length == end) {
518 /* Came to hole at EOF. Trim it. */
519 if (fixlen <= 0)
520 return 0;
521 out->bmv_length = fixlen;
522 }
523 } else {
524 if (startblock == DELAYSTARTBLOCK)
525 out->bmv_block = -2;
526 else
527 out->bmv_block = xfs_fsb_to_db(ip, startblock);
528 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
529 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
530 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
531 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
532 out->bmv_oflags |= BMV_OF_LAST;
533 }
534
535 return 1;
536}
537
538/*
539 * Get inode's extents as described in bmv, and format for output.
540 * Calls formatter to fill the user's buffer until all extents
541 * are mapped, until the passed-in bmv->bmv_count slots have
542 * been filled, or until the formatter short-circuits the loop,
543 * if it is tracking filled-in extents on its own.
544 */
545int /* error code */
546xfs_getbmap(
547 xfs_inode_t *ip,
548 struct getbmapx *bmv, /* user bmap structure */
549 xfs_bmap_format_t formatter, /* format to user */
550 void *arg) /* formatter arg */
551{
552 __int64_t bmvend; /* last block requested */
553 int error = 0; /* return value */
554 __int64_t fixlen; /* length for -1 case */
555 int i; /* extent number */
556 int lock; /* lock state */
557 xfs_bmbt_irec_t *map; /* buffer for user's data */
558 xfs_mount_t *mp; /* file system mount point */
559 int nex; /* # of user extents can do */
560 int nexleft; /* # of user extents left */
561 int subnex; /* # of bmapi's can do */
562 int nmap; /* number of map entries */
563 struct getbmapx *out; /* output structure */
564 int whichfork; /* data or attr fork */
565 int prealloced; /* this is a file with
566 * preallocated data space */
567 int iflags; /* interface flags */
568 int bmapi_flags; /* flags for xfs_bmapi */
569 int cur_ext = 0;
570
571 mp = ip->i_mount;
572 iflags = bmv->bmv_iflags;
573 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
574
575 if (whichfork == XFS_ATTR_FORK) {
576 if (XFS_IFORK_Q(ip)) {
577 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
578 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
579 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000580 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000581 } else if (unlikely(
582 ip->i_d.di_aformat != 0 &&
583 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
584 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
585 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000586 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000587 }
588
589 prealloced = 0;
590 fixlen = 1LL << 32;
591 } else {
592 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
593 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
594 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000595 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000596
597 if (xfs_get_extsz_hint(ip) ||
598 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
599 prealloced = 1;
600 fixlen = mp->m_super->s_maxbytes;
601 } else {
602 prealloced = 0;
603 fixlen = XFS_ISIZE(ip);
604 }
605 }
606
607 if (bmv->bmv_length == -1) {
608 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
609 bmv->bmv_length =
610 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
611 } else if (bmv->bmv_length == 0) {
612 bmv->bmv_entries = 0;
613 return 0;
614 } else if (bmv->bmv_length < 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000615 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000616 }
617
618 nex = bmv->bmv_count - 1;
619 if (nex <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000620 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000621 bmvend = bmv->bmv_offset + bmv->bmv_length;
622
623
624 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
Dave Chinner24513372014-06-25 14:58:08 +1000625 return -ENOMEM;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000626 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
627 if (!out)
Dave Chinner24513372014-06-25 14:58:08 +1000628 return -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000629
630 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800631 if (whichfork == XFS_DATA_FORK) {
632 if (!(iflags & BMV_IF_DELALLOC) &&
633 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner24513372014-06-25 14:58:08 +1000634 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
Dave Chinner68988112013-08-12 20:49:42 +1000635 if (error)
636 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000637
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800638 /*
639 * Even after flushing the inode, there can still be
640 * delalloc blocks on the inode beyond EOF due to
641 * speculative preallocation. These are not removed
642 * until the release function is called or the inode
643 * is inactivated. Hence we cannot assert here that
644 * ip->i_delayed_blks == 0.
645 */
646 }
647
648 lock = xfs_ilock_data_map_shared(ip);
649 } else {
650 lock = xfs_ilock_attr_map_shared(ip);
651 }
Dave Chinner68988112013-08-12 20:49:42 +1000652
653 /*
654 * Don't let nex be bigger than the number of extents
655 * we can have assuming alternating holes and real extents.
656 */
657 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
658 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
659
660 bmapi_flags = xfs_bmapi_aflag(whichfork);
661 if (!(iflags & BMV_IF_PREALLOC))
662 bmapi_flags |= XFS_BMAPI_IGSTATE;
663
664 /*
665 * Allocate enough space to handle "subnex" maps at a time.
666 */
Dave Chinner24513372014-06-25 14:58:08 +1000667 error = -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000668 subnex = 16;
669 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
670 if (!map)
671 goto out_unlock_ilock;
672
673 bmv->bmv_entries = 0;
674
675 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
676 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
677 error = 0;
678 goto out_free_map;
679 }
680
681 nexleft = nex;
682
683 do {
684 nmap = (nexleft > subnex) ? subnex : nexleft;
685 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
686 XFS_BB_TO_FSB(mp, bmv->bmv_length),
687 map, &nmap, bmapi_flags);
688 if (error)
689 goto out_free_map;
690 ASSERT(nmap <= subnex);
691
692 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
693 out[cur_ext].bmv_oflags = 0;
694 if (map[i].br_state == XFS_EXT_UNWRITTEN)
695 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
696 else if (map[i].br_startblock == DELAYSTARTBLOCK)
697 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
698 out[cur_ext].bmv_offset =
699 XFS_FSB_TO_BB(mp, map[i].br_startoff);
700 out[cur_ext].bmv_length =
701 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
702 out[cur_ext].bmv_unused1 = 0;
703 out[cur_ext].bmv_unused2 = 0;
704
705 /*
706 * delayed allocation extents that start beyond EOF can
707 * occur due to speculative EOF allocation when the
708 * delalloc extent is larger than the largest freespace
709 * extent at conversion time. These extents cannot be
710 * converted by data writeback, so can exist here even
711 * if we are not supposed to be finding delalloc
712 * extents.
713 */
714 if (map[i].br_startblock == DELAYSTARTBLOCK &&
715 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
716 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
717
718 if (map[i].br_startblock == HOLESTARTBLOCK &&
719 whichfork == XFS_ATTR_FORK) {
720 /* came to the end of attribute fork */
721 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
722 goto out_free_map;
723 }
724
725 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
726 prealloced, bmvend,
727 map[i].br_startblock))
728 goto out_free_map;
729
730 bmv->bmv_offset =
731 out[cur_ext].bmv_offset +
732 out[cur_ext].bmv_length;
733 bmv->bmv_length =
734 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
735
736 /*
737 * In case we don't want to return the hole,
738 * don't increase cur_ext so that we can reuse
739 * it in the next loop.
740 */
741 if ((iflags & BMV_IF_NO_HOLES) &&
742 map[i].br_startblock == HOLESTARTBLOCK) {
743 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
744 continue;
745 }
746
747 nexleft--;
748 bmv->bmv_entries++;
749 cur_ext++;
750 }
751 } while (nmap && nexleft && bmv->bmv_length);
752
753 out_free_map:
754 kmem_free(map);
755 out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800756 xfs_iunlock(ip, lock);
Dave Chinner68988112013-08-12 20:49:42 +1000757 out_unlock_iolock:
758 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
759
760 for (i = 0; i < cur_ext; i++) {
761 int full = 0; /* user array is full */
762
763 /* format results & advance arg */
764 error = formatter(&arg, &out[i], &full);
765 if (error || full)
766 break;
767 }
768
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000769 kmem_free(out);
Dave Chinner68988112013-08-12 20:49:42 +1000770 return error;
771}
772
773/*
774 * dead simple method of punching delalyed allocation blocks from a range in
775 * the inode. Walks a block at a time so will be slow, but is only executed in
Zhi Yong Wuad4809b2013-08-12 03:14:55 +0000776 * rare error cases so the overhead is not critical. This will always punch out
Dave Chinner68988112013-08-12 20:49:42 +1000777 * both the start and end blocks, even if the ranges only partially overlap
778 * them, so it is up to the caller to ensure that partial blocks are not
779 * passed in.
780 */
781int
782xfs_bmap_punch_delalloc_range(
783 struct xfs_inode *ip,
784 xfs_fileoff_t start_fsb,
785 xfs_fileoff_t length)
786{
787 xfs_fileoff_t remaining = length;
788 int error = 0;
789
790 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
791
792 do {
793 int done;
794 xfs_bmbt_irec_t imap;
795 int nimaps = 1;
796 xfs_fsblock_t firstblock;
797 xfs_bmap_free_t flist;
798
799 /*
800 * Map the range first and check that it is a delalloc extent
801 * before trying to unmap the range. Otherwise we will be
802 * trying to remove a real extent (which requires a
803 * transaction) or a hole, which is probably a bad idea...
804 */
805 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
806 XFS_BMAPI_ENTIRE);
807
808 if (error) {
809 /* something screwed, just bail */
810 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
811 xfs_alert(ip->i_mount,
812 "Failed delalloc mapping lookup ino %lld fsb %lld.",
813 ip->i_ino, start_fsb);
814 }
815 break;
816 }
817 if (!nimaps) {
818 /* nothing there */
819 goto next_block;
820 }
821 if (imap.br_startblock != DELAYSTARTBLOCK) {
822 /* been converted, ignore */
823 goto next_block;
824 }
825 WARN_ON(imap.br_blockcount == 0);
826
827 /*
828 * Note: while we initialise the firstblock/flist pair, they
829 * should never be used because blocks should never be
830 * allocated or freed for a delalloc extent and hence we need
831 * don't cancel or finish them after the xfs_bunmapi() call.
832 */
833 xfs_bmap_init(&flist, &firstblock);
834 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
835 &flist, &done);
836 if (error)
837 break;
838
839 ASSERT(!flist.xbf_count && !flist.xbf_first);
840next_block:
841 start_fsb++;
842 remaining--;
843 } while(remaining > 0);
844
845 return error;
846}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000847
848/*
849 * Test whether it is appropriate to check an inode for and free post EOF
850 * blocks. The 'force' parameter determines whether we should also consider
851 * regular files that are marked preallocated or append-only.
852 */
853bool
854xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
855{
856 /* prealloc/delalloc exists only on regular files */
857 if (!S_ISREG(ip->i_d.di_mode))
858 return false;
859
860 /*
861 * Zero sized files with no cached pages and delalloc blocks will not
862 * have speculative prealloc/delalloc blocks to remove.
863 */
864 if (VFS_I(ip)->i_size == 0 &&
865 VN_CACHED(VFS_I(ip)) == 0 &&
866 ip->i_delayed_blks == 0)
867 return false;
868
869 /* If we haven't read in the extent list, then don't do it now. */
870 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
871 return false;
872
873 /*
874 * Do not free real preallocated or append-only files unless the file
875 * has delalloc blocks and we are forced to remove them.
876 */
877 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
878 if (!force || ip->i_delayed_blks == 0)
879 return false;
880
881 return true;
882}
883
884/*
885 * This is called by xfs_inactive to free any blocks beyond eof
886 * when the link count isn't zero and by xfs_dm_punch_hole() when
887 * punching a hole to EOF.
888 */
889int
890xfs_free_eofblocks(
891 xfs_mount_t *mp,
892 xfs_inode_t *ip,
893 bool need_iolock)
894{
895 xfs_trans_t *tp;
896 int error;
897 xfs_fileoff_t end_fsb;
898 xfs_fileoff_t last_fsb;
899 xfs_filblks_t map_len;
900 int nimaps;
901 xfs_bmbt_irec_t imap;
902
903 /*
904 * Figure out if there are any blocks beyond the end
905 * of the file. If not, then there is nothing to do.
906 */
907 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
908 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
909 if (last_fsb <= end_fsb)
910 return 0;
911 map_len = last_fsb - end_fsb;
912
913 nimaps = 1;
914 xfs_ilock(ip, XFS_ILOCK_SHARED);
915 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
916 xfs_iunlock(ip, XFS_ILOCK_SHARED);
917
918 if (!error && (nimaps != 0) &&
919 (imap.br_startblock != HOLESTARTBLOCK ||
920 ip->i_delayed_blks)) {
921 /*
922 * Attach the dquots to the inode up front.
923 */
924 error = xfs_qm_dqattach(ip, 0);
925 if (error)
926 return error;
927
928 /*
929 * There are blocks after the end of file.
930 * Free them up now by truncating the file to
931 * its current size.
932 */
933 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
934
935 if (need_iolock) {
936 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
937 xfs_trans_cancel(tp, 0);
Dave Chinner24513372014-06-25 14:58:08 +1000938 return -EAGAIN;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000939 }
940 }
941
Jie Liu3d3c8b52013-08-12 20:49:59 +1000942 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000943 if (error) {
944 ASSERT(XFS_FORCED_SHUTDOWN(mp));
945 xfs_trans_cancel(tp, 0);
946 if (need_iolock)
947 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
948 return error;
949 }
950
951 xfs_ilock(ip, XFS_ILOCK_EXCL);
952 xfs_trans_ijoin(tp, ip, 0);
953
954 /*
955 * Do not update the on-disk file size. If we update the
956 * on-disk file size and then the system crashes before the
957 * contents of the file are flushed to disk then the files
958 * may be full of holes (ie NULL files bug).
959 */
960 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
961 XFS_ISIZE(ip));
962 if (error) {
963 /*
964 * If we get an error at this point we simply don't
965 * bother truncating the file.
966 */
967 xfs_trans_cancel(tp,
968 (XFS_TRANS_RELEASE_LOG_RES |
969 XFS_TRANS_ABORT));
970 } else {
971 error = xfs_trans_commit(tp,
972 XFS_TRANS_RELEASE_LOG_RES);
973 if (!error)
974 xfs_inode_clear_eofblocks_tag(ip);
975 }
976
977 xfs_iunlock(ip, XFS_ILOCK_EXCL);
978 if (need_iolock)
979 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
980 }
981 return error;
982}
983
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700984int
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000985xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700986 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000987 xfs_off_t offset,
988 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -0700989 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000990{
991 xfs_mount_t *mp = ip->i_mount;
992 xfs_off_t count;
993 xfs_filblks_t allocated_fsb;
994 xfs_filblks_t allocatesize_fsb;
995 xfs_extlen_t extsz, temp;
996 xfs_fileoff_t startoffset_fsb;
997 xfs_fsblock_t firstfsb;
998 int nimaps;
999 int quota_flag;
1000 int rt;
1001 xfs_trans_t *tp;
1002 xfs_bmbt_irec_t imaps[1], *imapp;
1003 xfs_bmap_free_t free_list;
1004 uint qblocks, resblks, resrtextents;
1005 int committed;
1006 int error;
1007
1008 trace_xfs_alloc_file_space(ip);
1009
1010 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001011 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001012
1013 error = xfs_qm_dqattach(ip, 0);
1014 if (error)
1015 return error;
1016
1017 if (len <= 0)
Dave Chinner24513372014-06-25 14:58:08 +10001018 return -EINVAL;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001019
1020 rt = XFS_IS_REALTIME_INODE(ip);
1021 extsz = xfs_get_extsz_hint(ip);
1022
1023 count = len;
1024 imapp = &imaps[0];
1025 nimaps = 1;
1026 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1027 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1028
1029 /*
1030 * Allocate file space until done or until there is an error
1031 */
1032 while (allocatesize_fsb && !error) {
1033 xfs_fileoff_t s, e;
1034
1035 /*
1036 * Determine space reservations for data/realtime.
1037 */
1038 if (unlikely(extsz)) {
1039 s = startoffset_fsb;
1040 do_div(s, extsz);
1041 s *= extsz;
1042 e = startoffset_fsb + allocatesize_fsb;
1043 if ((temp = do_mod(startoffset_fsb, extsz)))
1044 e += temp;
1045 if ((temp = do_mod(e, extsz)))
1046 e += extsz - temp;
1047 } else {
1048 s = 0;
1049 e = allocatesize_fsb;
1050 }
1051
1052 /*
1053 * The transaction reservation is limited to a 32-bit block
1054 * count, hence we need to limit the number of blocks we are
1055 * trying to reserve to avoid an overflow. We can't allocate
1056 * more than @nimaps extents, and an extent is limited on disk
1057 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1058 */
1059 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1060 if (unlikely(rt)) {
1061 resrtextents = qblocks = resblks;
1062 resrtextents /= mp->m_sb.sb_rextsize;
1063 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1064 quota_flag = XFS_QMOPT_RES_RTBLKS;
1065 } else {
1066 resrtextents = 0;
1067 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1068 quota_flag = XFS_QMOPT_RES_REGBLKS;
1069 }
1070
1071 /*
1072 * Allocate and setup the transaction.
1073 */
1074 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001075 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1076 resblks, resrtextents);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001077 /*
1078 * Check for running out of space
1079 */
1080 if (error) {
1081 /*
1082 * Free the transaction structure.
1083 */
Dave Chinner24513372014-06-25 14:58:08 +10001084 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001085 xfs_trans_cancel(tp, 0);
1086 break;
1087 }
1088 xfs_ilock(ip, XFS_ILOCK_EXCL);
1089 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1090 0, quota_flag);
1091 if (error)
1092 goto error1;
1093
1094 xfs_trans_ijoin(tp, ip, 0);
1095
1096 xfs_bmap_init(&free_list, &firstfsb);
1097 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1098 allocatesize_fsb, alloc_type, &firstfsb,
1099 0, imapp, &nimaps, &free_list);
1100 if (error) {
1101 goto error0;
1102 }
1103
1104 /*
1105 * Complete the transaction
1106 */
1107 error = xfs_bmap_finish(&tp, &free_list, &committed);
1108 if (error) {
1109 goto error0;
1110 }
1111
1112 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1113 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1114 if (error) {
1115 break;
1116 }
1117
1118 allocated_fsb = imapp->br_blockcount;
1119
1120 if (nimaps == 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001121 error = -ENOSPC;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001122 break;
1123 }
1124
1125 startoffset_fsb += allocated_fsb;
1126 allocatesize_fsb -= allocated_fsb;
1127 }
1128
1129 return error;
1130
1131error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1132 xfs_bmap_cancel(&free_list);
1133 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1134
1135error1: /* Just cancel transaction */
1136 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1137 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1138 return error;
1139}
1140
1141/*
1142 * Zero file bytes between startoff and endoff inclusive.
1143 * The iolock is held exclusive and no blocks are buffered.
1144 *
1145 * This function is used by xfs_free_file_space() to zero
1146 * partial blocks when the range to free is not block aligned.
1147 * When unreserving space with boundaries that are not block
1148 * aligned we round up the start and round down the end
1149 * boundaries and then use this function to zero the parts of
1150 * the blocks that got dropped during the rounding.
1151 */
1152STATIC int
1153xfs_zero_remaining_bytes(
1154 xfs_inode_t *ip,
1155 xfs_off_t startoff,
1156 xfs_off_t endoff)
1157{
1158 xfs_bmbt_irec_t imap;
1159 xfs_fileoff_t offset_fsb;
1160 xfs_off_t lastoffset;
1161 xfs_off_t offset;
1162 xfs_buf_t *bp;
1163 xfs_mount_t *mp = ip->i_mount;
1164 int nimap;
1165 int error = 0;
1166
1167 /*
1168 * Avoid doing I/O beyond eof - it's not necessary
1169 * since nothing can read beyond eof. The space will
1170 * be zeroed when the file is extended anyway.
1171 */
1172 if (startoff >= XFS_ISIZE(ip))
1173 return 0;
1174
1175 if (endoff > XFS_ISIZE(ip))
1176 endoff = XFS_ISIZE(ip);
1177
1178 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
1179 mp->m_rtdev_targp : mp->m_ddev_targp,
1180 BTOBB(mp->m_sb.sb_blocksize), 0);
1181 if (!bp)
Dave Chinner24513372014-06-25 14:58:08 +10001182 return -ENOMEM;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001183
1184 xfs_buf_unlock(bp);
1185
1186 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
Christoph Hellwig4f317362013-12-06 12:30:12 -08001187 uint lock_mode;
1188
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001189 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1190 nimap = 1;
Christoph Hellwig4f317362013-12-06 12:30:12 -08001191
1192 lock_mode = xfs_ilock_data_map_shared(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001193 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
Christoph Hellwig4f317362013-12-06 12:30:12 -08001194 xfs_iunlock(ip, lock_mode);
1195
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001196 if (error || nimap < 1)
1197 break;
1198 ASSERT(imap.br_blockcount >= 1);
1199 ASSERT(imap.br_startoff == offset_fsb);
1200 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1201 if (lastoffset > endoff)
1202 lastoffset = endoff;
1203 if (imap.br_startblock == HOLESTARTBLOCK)
1204 continue;
1205 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1206 if (imap.br_state == XFS_EXT_UNWRITTEN)
1207 continue;
1208 XFS_BUF_UNDONE(bp);
1209 XFS_BUF_UNWRITE(bp);
1210 XFS_BUF_READ(bp);
1211 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001212
1213 if (XFS_FORCED_SHUTDOWN(mp)) {
Dave Chinner24513372014-06-25 14:58:08 +10001214 error = -EIO;
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001215 break;
1216 }
1217 xfs_buf_iorequest(bp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001218 error = xfs_buf_iowait(bp);
1219 if (error) {
1220 xfs_buf_ioerror_alert(bp,
1221 "xfs_zero_remaining_bytes(read)");
1222 break;
1223 }
1224 memset(bp->b_addr +
1225 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1226 0, lastoffset - offset + 1);
1227 XFS_BUF_UNDONE(bp);
1228 XFS_BUF_UNREAD(bp);
1229 XFS_BUF_WRITE(bp);
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001230
1231 if (XFS_FORCED_SHUTDOWN(mp)) {
Dave Chinner24513372014-06-25 14:58:08 +10001232 error = -EIO;
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001233 break;
1234 }
1235 xfs_buf_iorequest(bp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001236 error = xfs_buf_iowait(bp);
1237 if (error) {
1238 xfs_buf_ioerror_alert(bp,
1239 "xfs_zero_remaining_bytes(write)");
1240 break;
1241 }
1242 }
1243 xfs_buf_free(bp);
1244 return error;
1245}
1246
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001247int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001248xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001249 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001250 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001251 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001252{
1253 int committed;
1254 int done;
1255 xfs_fileoff_t endoffset_fsb;
1256 int error;
1257 xfs_fsblock_t firstfsb;
1258 xfs_bmap_free_t free_list;
1259 xfs_bmbt_irec_t imap;
1260 xfs_off_t ioffset;
1261 xfs_extlen_t mod=0;
1262 xfs_mount_t *mp;
1263 int nimap;
1264 uint resblks;
1265 xfs_off_t rounding;
1266 int rt;
1267 xfs_fileoff_t startoffset_fsb;
1268 xfs_trans_t *tp;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001269
1270 mp = ip->i_mount;
1271
1272 trace_xfs_free_file_space(ip);
1273
1274 error = xfs_qm_dqattach(ip, 0);
1275 if (error)
1276 return error;
1277
1278 error = 0;
1279 if (len <= 0) /* if nothing being freed */
1280 return error;
1281 rt = XFS_IS_REALTIME_INODE(ip);
1282 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1283 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1284
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001285 /* wait for the completion of any pending DIOs */
1286 inode_dio_wait(VFS_I(ip));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001287
1288 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1289 ioffset = offset & ~(rounding - 1);
Dave Chinner24513372014-06-25 14:58:08 +10001290 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001291 ioffset, -1);
1292 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001293 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001294 truncate_pagecache_range(VFS_I(ip), ioffset, -1);
1295
1296 /*
1297 * Need to zero the stuff we're not freeing, on disk.
1298 * If it's a realtime file & can't use unwritten extents then we
1299 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1300 * will take care of it for us.
1301 */
1302 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1303 nimap = 1;
1304 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1305 &imap, &nimap, 0);
1306 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001307 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001308 ASSERT(nimap == 0 || nimap == 1);
1309 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1310 xfs_daddr_t block;
1311
1312 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1313 block = imap.br_startblock;
1314 mod = do_div(block, mp->m_sb.sb_rextsize);
1315 if (mod)
1316 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1317 }
1318 nimap = 1;
1319 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1320 &imap, &nimap, 0);
1321 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001322 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001323 ASSERT(nimap == 0 || nimap == 1);
1324 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1325 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1326 mod++;
1327 if (mod && (mod != mp->m_sb.sb_rextsize))
1328 endoffset_fsb -= mod;
1329 }
1330 }
1331 if ((done = (endoffset_fsb <= startoffset_fsb)))
1332 /*
1333 * One contiguous piece to clear
1334 */
1335 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1336 else {
1337 /*
1338 * Some full blocks, possibly two pieces to clear
1339 */
1340 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1341 error = xfs_zero_remaining_bytes(ip, offset,
1342 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1343 if (!error &&
1344 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1345 error = xfs_zero_remaining_bytes(ip,
1346 XFS_FSB_TO_B(mp, endoffset_fsb),
1347 offset + len - 1);
1348 }
1349
1350 /*
1351 * free file space until done or until there is an error
1352 */
1353 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1354 while (!error && !done) {
1355
1356 /*
1357 * allocate and setup the transaction. Allow this
1358 * transaction to dip into the reserve blocks to ensure
1359 * the freeing of the space succeeds at ENOSPC.
1360 */
1361 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001362 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001363
1364 /*
1365 * check for running out of space
1366 */
1367 if (error) {
1368 /*
1369 * Free the transaction structure.
1370 */
Dave Chinner24513372014-06-25 14:58:08 +10001371 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001372 xfs_trans_cancel(tp, 0);
1373 break;
1374 }
1375 xfs_ilock(ip, XFS_ILOCK_EXCL);
1376 error = xfs_trans_reserve_quota(tp, mp,
1377 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1378 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1379 if (error)
1380 goto error1;
1381
1382 xfs_trans_ijoin(tp, ip, 0);
1383
1384 /*
1385 * issue the bunmapi() call to free the blocks
1386 */
1387 xfs_bmap_init(&free_list, &firstfsb);
1388 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1389 endoffset_fsb - startoffset_fsb,
1390 0, 2, &firstfsb, &free_list, &done);
1391 if (error) {
1392 goto error0;
1393 }
1394
1395 /*
1396 * complete the transaction
1397 */
1398 error = xfs_bmap_finish(&tp, &free_list, &committed);
1399 if (error) {
1400 goto error0;
1401 }
1402
1403 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1404 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1405 }
1406
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001407 out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001408 return error;
1409
1410 error0:
1411 xfs_bmap_cancel(&free_list);
1412 error1:
1413 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001414 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1415 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001416}
1417
1418
Christoph Hellwig865e9442013-10-12 00:55:08 -07001419int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001420xfs_zero_file_space(
1421 struct xfs_inode *ip,
1422 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001423 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001424{
1425 struct xfs_mount *mp = ip->i_mount;
1426 uint granularity;
1427 xfs_off_t start_boundary;
1428 xfs_off_t end_boundary;
1429 int error;
1430
Dave Chinner897b73b2014-04-14 18:15:11 +10001431 trace_xfs_zero_file_space(ip);
1432
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001433 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1434
1435 /*
1436 * Round the range of extents we are going to convert inwards. If the
1437 * offset is aligned, then it doesn't get changed so we zero from the
1438 * start of the block offset points to.
1439 */
1440 start_boundary = round_up(offset, granularity);
1441 end_boundary = round_down(offset + len, granularity);
1442
1443 ASSERT(start_boundary >= offset);
1444 ASSERT(end_boundary <= offset + len);
1445
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001446 if (start_boundary < end_boundary - 1) {
Dave Chinner897b73b2014-04-14 18:15:11 +10001447 /*
1448 * punch out delayed allocation blocks and the page cache over
1449 * the conversion range
1450 */
1451 xfs_ilock(ip, XFS_ILOCK_EXCL);
1452 error = xfs_bmap_punch_delalloc_range(ip,
1453 XFS_B_TO_FSBT(mp, start_boundary),
1454 XFS_B_TO_FSB(mp, end_boundary - start_boundary));
1455 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001456 truncate_pagecache_range(VFS_I(ip), start_boundary,
1457 end_boundary - 1);
Dave Chinner897b73b2014-04-14 18:15:11 +10001458
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001459 /* convert the blocks */
1460 error = xfs_alloc_file_space(ip, start_boundary,
1461 end_boundary - start_boundary - 1,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001462 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001463 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001464 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001465
1466 /* We've handled the interior of the range, now for the edges */
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001467 if (start_boundary != offset) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001468 error = xfs_iozero(ip, offset, start_boundary - offset);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001469 if (error)
1470 goto out;
1471 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001472
1473 if (end_boundary != offset + len)
1474 error = xfs_iozero(ip, end_boundary,
1475 offset + len - end_boundary);
1476
1477 } else {
1478 /*
1479 * It's either a sub-granularity range or the range spanned lies
1480 * partially across two adjacent blocks.
1481 */
1482 error = xfs_iozero(ip, offset, len);
1483 }
1484
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001485out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001486 return error;
1487
1488}
1489
1490/*
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001491 * xfs_collapse_file_space()
1492 * This routine frees disk space and shift extent for the given file.
1493 * The first thing we do is to free data blocks in the specified range
1494 * by calling xfs_free_file_space(). It would also sync dirty data
1495 * and invalidate page cache over the region on which collapse range
1496 * is working. And Shift extent records to the left to cover a hole.
1497 * RETURNS:
1498 * 0 on success
1499 * errno on error
1500 *
1501 */
1502int
1503xfs_collapse_file_space(
1504 struct xfs_inode *ip,
1505 xfs_off_t offset,
1506 xfs_off_t len)
1507{
1508 int done = 0;
1509 struct xfs_mount *mp = ip->i_mount;
1510 struct xfs_trans *tp;
1511 int error;
1512 xfs_extnum_t current_ext = 0;
1513 struct xfs_bmap_free free_list;
1514 xfs_fsblock_t first_block;
1515 int committed;
1516 xfs_fileoff_t start_fsb;
1517 xfs_fileoff_t shift_fsb;
1518
1519 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1520
1521 trace_xfs_collapse_file_space(ip);
1522
1523 start_fsb = XFS_B_TO_FSB(mp, offset + len);
1524 shift_fsb = XFS_B_TO_FSB(mp, len);
1525
1526 error = xfs_free_file_space(ip, offset, len);
1527 if (error)
1528 return error;
1529
1530 while (!error && !done) {
1531 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001532 /*
1533 * We would need to reserve permanent block for transaction.
1534 * This will come into picture when after shifting extent into
1535 * hole we found that adjacent extents can be merged which
1536 * may lead to freeing of a block during record update.
1537 */
1538 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1539 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1540 if (error) {
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001541 xfs_trans_cancel(tp, 0);
1542 break;
1543 }
1544
1545 xfs_ilock(ip, XFS_ILOCK_EXCL);
1546 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1547 ip->i_gdquot, ip->i_pdquot,
1548 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1549 XFS_QMOPT_RES_REGBLKS);
1550 if (error)
1551 goto out;
1552
1553 xfs_trans_ijoin(tp, ip, 0);
1554
1555 xfs_bmap_init(&free_list, &first_block);
1556
1557 /*
1558 * We are using the write transaction in which max 2 bmbt
1559 * updates are allowed
1560 */
1561 error = xfs_bmap_shift_extents(tp, ip, &done, start_fsb,
1562 shift_fsb, &current_ext,
1563 &first_block, &free_list,
1564 XFS_BMAP_MAX_SHIFT_EXTENTS);
1565 if (error)
1566 goto out;
1567
1568 error = xfs_bmap_finish(&tp, &free_list, &committed);
1569 if (error)
1570 goto out;
1571
1572 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1573 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1574 }
1575
1576 return error;
1577
1578out:
1579 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1580 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1581 return error;
1582}
1583
1584/*
Dave Chinnera133d952013-08-12 20:49:48 +10001585 * We need to check that the format of the data fork in the temporary inode is
1586 * valid for the target inode before doing the swap. This is not a problem with
1587 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1588 * data fork depending on the space the attribute fork is taking so we can get
1589 * invalid formats on the target inode.
1590 *
1591 * E.g. target has space for 7 extents in extent format, temp inode only has
1592 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1593 * btree, but when swapped it needs to be in extent format. Hence we can't just
1594 * blindly swap data forks on attr2 filesystems.
1595 *
1596 * Note that we check the swap in both directions so that we don't end up with
1597 * a corrupt temporary inode, either.
1598 *
1599 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1600 * inode will prevent this situation from occurring, so all we do here is
1601 * reject and log the attempt. basically we are putting the responsibility on
1602 * userspace to get this right.
1603 */
1604static int
1605xfs_swap_extents_check_format(
1606 xfs_inode_t *ip, /* target inode */
1607 xfs_inode_t *tip) /* tmp inode */
1608{
1609
1610 /* Should never get a local format */
1611 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1612 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +10001613 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001614
1615 /*
1616 * if the target inode has less extents that then temporary inode then
1617 * why did userspace call us?
1618 */
1619 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
Dave Chinner24513372014-06-25 14:58:08 +10001620 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001621
1622 /*
1623 * if the target inode is in extent form and the temp inode is in btree
1624 * form then we will end up with the target inode in the wrong format
1625 * as we already know there are less extents in the temp inode.
1626 */
1627 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1628 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +10001629 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001630
1631 /* Check temp in extent form to max in target */
1632 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1633 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1634 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001635 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001636
1637 /* Check target in extent form to max in temp */
1638 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1639 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1640 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001641 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001642
1643 /*
1644 * If we are in a btree format, check that the temp root block will fit
1645 * in the target and that it has enough extents to be in btree format
1646 * in the target.
1647 *
1648 * Note that we have to be careful to allow btree->extent conversions
1649 * (a common defrag case) which will occur when the temp inode is in
1650 * extent format...
1651 */
1652 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1653 if (XFS_IFORK_BOFF(ip) &&
1654 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
Dave Chinner24513372014-06-25 14:58:08 +10001655 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001656 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1657 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001658 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001659 }
1660
1661 /* Reciprocal target->temp btree format checks */
1662 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1663 if (XFS_IFORK_BOFF(tip) &&
1664 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
Dave Chinner24513372014-06-25 14:58:08 +10001665 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001666 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1667 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001668 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001669 }
1670
1671 return 0;
1672}
1673
1674int
1675xfs_swap_extents(
1676 xfs_inode_t *ip, /* target inode */
1677 xfs_inode_t *tip, /* tmp inode */
1678 xfs_swapext_t *sxp)
1679{
1680 xfs_mount_t *mp = ip->i_mount;
1681 xfs_trans_t *tp;
1682 xfs_bstat_t *sbp = &sxp->sx_stat;
1683 xfs_ifork_t *tempifp, *ifp, *tifp;
1684 int src_log_flags, target_log_flags;
1685 int error = 0;
1686 int aforkblks = 0;
1687 int taforkblks = 0;
1688 __uint64_t tmp;
1689
Dave Chinnera133d952013-08-12 20:49:48 +10001690 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1691 if (!tempifp) {
Dave Chinner24513372014-06-25 14:58:08 +10001692 error = -ENOMEM;
Dave Chinnera133d952013-08-12 20:49:48 +10001693 goto out;
1694 }
1695
1696 /*
1697 * we have to do two separate lock calls here to keep lockdep
1698 * happy. If we try to get all the locks in one call, lock will
1699 * report false positives when we drop the ILOCK and regain them
1700 * below.
1701 */
1702 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1703 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1704
1705 /* Verify that both files have the same format */
1706 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
Dave Chinner24513372014-06-25 14:58:08 +10001707 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001708 goto out_unlock;
1709 }
1710
1711 /* Verify both files are either real-time or non-realtime */
1712 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
Dave Chinner24513372014-06-25 14:58:08 +10001713 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001714 goto out_unlock;
1715 }
1716
Dave Chinner24513372014-06-25 14:58:08 +10001717 error = filemap_write_and_wait(VFS_I(tip)->i_mapping);
Dave Chinnera133d952013-08-12 20:49:48 +10001718 if (error)
1719 goto out_unlock;
1720 truncate_pagecache_range(VFS_I(tip), 0, -1);
1721
1722 /* Verify O_DIRECT for ftmp */
1723 if (VN_CACHED(VFS_I(tip)) != 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001724 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001725 goto out_unlock;
1726 }
1727
1728 /* Verify all data are being swapped */
1729 if (sxp->sx_offset != 0 ||
1730 sxp->sx_length != ip->i_d.di_size ||
1731 sxp->sx_length != tip->i_d.di_size) {
Dave Chinner24513372014-06-25 14:58:08 +10001732 error = -EFAULT;
Dave Chinnera133d952013-08-12 20:49:48 +10001733 goto out_unlock;
1734 }
1735
1736 trace_xfs_swap_extent_before(ip, 0);
1737 trace_xfs_swap_extent_before(tip, 1);
1738
1739 /* check inode formats now that data is flushed */
1740 error = xfs_swap_extents_check_format(ip, tip);
1741 if (error) {
1742 xfs_notice(mp,
1743 "%s: inode 0x%llx format is incompatible for exchanging.",
1744 __func__, ip->i_ino);
1745 goto out_unlock;
1746 }
1747
1748 /*
1749 * Compare the current change & modify times with that
1750 * passed in. If they differ, we abort this swap.
1751 * This is the mechanism used to ensure the calling
1752 * process that the file was not changed out from
1753 * under it.
1754 */
1755 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1756 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1757 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1758 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
Dave Chinner24513372014-06-25 14:58:08 +10001759 error = -EBUSY;
Dave Chinnera133d952013-08-12 20:49:48 +10001760 goto out_unlock;
1761 }
1762
1763 /* We need to fail if the file is memory mapped. Once we have tossed
1764 * all existing pages, the page fault will have no option
1765 * but to go to the filesystem for pages. By making the page fault call
1766 * vop_read (or write in the case of autogrow) they block on the iolock
1767 * until we have switched the extents.
1768 */
1769 if (VN_MAPPED(VFS_I(ip))) {
Dave Chinner24513372014-06-25 14:58:08 +10001770 error = -EBUSY;
Dave Chinnera133d952013-08-12 20:49:48 +10001771 goto out_unlock;
1772 }
1773
1774 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1775 xfs_iunlock(tip, XFS_ILOCK_EXCL);
1776
1777 /*
1778 * There is a race condition here since we gave up the
1779 * ilock. However, the data fork will not change since
1780 * we have the iolock (locked for truncation too) so we
1781 * are safe. We don't really care if non-io related
1782 * fields change.
1783 */
1784 truncate_pagecache_range(VFS_I(ip), 0, -1);
1785
1786 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001787 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1788 if (error) {
Dave Chinnera133d952013-08-12 20:49:48 +10001789 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1790 xfs_iunlock(tip, XFS_IOLOCK_EXCL);
1791 xfs_trans_cancel(tp, 0);
1792 goto out;
1793 }
1794 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1795
1796 /*
1797 * Count the number of extended attribute blocks
1798 */
1799 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1800 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1801 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1802 if (error)
1803 goto out_trans_cancel;
1804 }
1805 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1806 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1807 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1808 &taforkblks);
1809 if (error)
1810 goto out_trans_cancel;
1811 }
1812
Dave Chinner21b5c972013-08-30 10:23:44 +10001813 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1814 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1815
1816 /*
1817 * Before we've swapped the forks, lets set the owners of the forks
1818 * appropriately. We have to do this as we are demand paging the btree
1819 * buffers, and so the validation done on read will expect the owner
1820 * field to be correctly set. Once we change the owners, we can swap the
1821 * inode forks.
1822 *
1823 * Note the trickiness in setting the log flags - we set the owner log
1824 * flag on the opposite inode (i.e. the inode we are setting the new
1825 * owner to be) because once we swap the forks and log that, log
1826 * recovery is going to see the fork as owned by the swapped inode,
1827 * not the pre-swapped inodes.
1828 */
1829 src_log_flags = XFS_ILOG_CORE;
1830 target_log_flags = XFS_ILOG_CORE;
1831 if (ip->i_d.di_version == 3 &&
1832 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001833 target_log_flags |= XFS_ILOG_DOWNER;
1834 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1835 tip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001836 if (error)
1837 goto out_trans_cancel;
1838 }
1839
1840 if (tip->i_d.di_version == 3 &&
1841 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001842 src_log_flags |= XFS_ILOG_DOWNER;
1843 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1844 ip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001845 if (error)
1846 goto out_trans_cancel;
1847 }
1848
Dave Chinnera133d952013-08-12 20:49:48 +10001849 /*
1850 * Swap the data forks of the inodes
1851 */
1852 ifp = &ip->i_df;
1853 tifp = &tip->i_df;
1854 *tempifp = *ifp; /* struct copy */
1855 *ifp = *tifp; /* struct copy */
1856 *tifp = *tempifp; /* struct copy */
1857
1858 /*
1859 * Fix the on-disk inode values
1860 */
1861 tmp = (__uint64_t)ip->i_d.di_nblocks;
1862 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1863 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1864
1865 tmp = (__uint64_t) ip->i_d.di_nextents;
1866 ip->i_d.di_nextents = tip->i_d.di_nextents;
1867 tip->i_d.di_nextents = tmp;
1868
1869 tmp = (__uint64_t) ip->i_d.di_format;
1870 ip->i_d.di_format = tip->i_d.di_format;
1871 tip->i_d.di_format = tmp;
1872
1873 /*
1874 * The extents in the source inode could still contain speculative
1875 * preallocation beyond EOF (e.g. the file is open but not modified
1876 * while defrag is in progress). In that case, we need to copy over the
1877 * number of delalloc blocks the data fork in the source inode is
1878 * tracking beyond EOF so that when the fork is truncated away when the
1879 * temporary inode is unlinked we don't underrun the i_delayed_blks
1880 * counter on that inode.
1881 */
1882 ASSERT(tip->i_delayed_blks == 0);
1883 tip->i_delayed_blks = ip->i_delayed_blks;
1884 ip->i_delayed_blks = 0;
1885
Dave Chinnera133d952013-08-12 20:49:48 +10001886 switch (ip->i_d.di_format) {
1887 case XFS_DINODE_FMT_EXTENTS:
1888 /* If the extents fit in the inode, fix the
1889 * pointer. Otherwise it's already NULL or
1890 * pointing to the extent.
1891 */
1892 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1893 ifp->if_u1.if_extents =
1894 ifp->if_u2.if_inline_ext;
1895 }
1896 src_log_flags |= XFS_ILOG_DEXT;
1897 break;
1898 case XFS_DINODE_FMT_BTREE:
Dave Chinner21b5c972013-08-30 10:23:44 +10001899 ASSERT(ip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001900 (src_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001901 src_log_flags |= XFS_ILOG_DBROOT;
1902 break;
1903 }
1904
Dave Chinnera133d952013-08-12 20:49:48 +10001905 switch (tip->i_d.di_format) {
1906 case XFS_DINODE_FMT_EXTENTS:
1907 /* If the extents fit in the inode, fix the
1908 * pointer. Otherwise it's already NULL or
1909 * pointing to the extent.
1910 */
1911 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1912 tifp->if_u1.if_extents =
1913 tifp->if_u2.if_inline_ext;
1914 }
1915 target_log_flags |= XFS_ILOG_DEXT;
1916 break;
1917 case XFS_DINODE_FMT_BTREE:
1918 target_log_flags |= XFS_ILOG_DBROOT;
Dave Chinner21b5c972013-08-30 10:23:44 +10001919 ASSERT(tip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001920 (target_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001921 break;
1922 }
1923
Dave Chinnera133d952013-08-12 20:49:48 +10001924 xfs_trans_log_inode(tp, ip, src_log_flags);
1925 xfs_trans_log_inode(tp, tip, target_log_flags);
1926
1927 /*
1928 * If this is a synchronous mount, make sure that the
1929 * transaction goes to disk before returning to the user.
1930 */
1931 if (mp->m_flags & XFS_MOUNT_WSYNC)
1932 xfs_trans_set_sync(tp);
1933
1934 error = xfs_trans_commit(tp, 0);
1935
1936 trace_xfs_swap_extent_after(ip, 0);
1937 trace_xfs_swap_extent_after(tip, 1);
1938out:
1939 kmem_free(tempifp);
1940 return error;
1941
1942out_unlock:
1943 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1944 xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1945 goto out;
1946
1947out_trans_cancel:
1948 xfs_trans_cancel(tp, 0);
1949 goto out_unlock;
1950}