blob: 586bb64e674bac152c54e7de2255400c7e00e965 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
Dave Chinner70a9883c2013-10-23 10:36:05 +110021#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100026#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110027#include "xfs_da_format.h"
Dave Chinner68988112013-08-12 20:49:42 +100028#include "xfs_inode.h"
29#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110030#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100031#include "xfs_extfree_item.h"
32#include "xfs_alloc.h"
33#include "xfs_bmap.h"
34#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110035#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100036#include "xfs_rtalloc.h"
37#include "xfs_error.h"
38#include "xfs_quota.h"
39#include "xfs_trans_space.h"
40#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100041#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110042#include "xfs_log.h"
Dave Chinner68988112013-08-12 20:49:42 +100043
44/* Kernel only BMAP related definitions and functions */
45
46/*
47 * Convert the given file system block to a disk block. We have to treat it
48 * differently based on whether the file is a real time file or not, because the
49 * bmap code does.
50 */
51xfs_daddr_t
52xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
53{
54 return (XFS_IS_REALTIME_INODE(ip) ? \
55 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
56 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
57}
58
59/*
Dave Chinner3fbbbea2015-11-03 12:27:22 +110060 * Routine to zero an extent on disk allocated to the specific inode.
61 *
62 * The VFS functions take a linearised filesystem block offset, so we have to
63 * convert the sparse xfs fsb to the right format first.
64 * VFS types are real funky, too.
65 */
66int
67xfs_zero_extent(
68 struct xfs_inode *ip,
69 xfs_fsblock_t start_fsb,
70 xfs_off_t count_fsb)
71{
72 struct xfs_mount *mp = ip->i_mount;
73 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
74 sector_t block = XFS_BB_TO_FSBT(mp, sector);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110075
Matthew Wilcox3dc29162016-03-15 11:20:41 -060076 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
77 block << (mp->m_super->s_blocksize_bits - 9),
78 count_fsb << (mp->m_super->s_blocksize_bits - 9),
79 GFP_NOFS, true);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110080}
81
82/*
Dave Chinner68988112013-08-12 20:49:42 +100083 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
84 * caller. Frees all the extents that need freeing, which must be done
85 * last due to locking considerations. We never free any extents in
86 * the first transaction.
87 *
Eric Sandeenf6106ef2016-01-11 11:34:01 +110088 * If an inode *ip is provided, rejoin it to the transaction if
89 * the transaction was committed.
Dave Chinner68988112013-08-12 20:49:42 +100090 */
91int /* error */
92xfs_bmap_finish(
Brian Foster8d99fe92015-08-19 09:51:16 +100093 struct xfs_trans **tp, /* transaction pointer addr */
94 struct xfs_bmap_free *flist, /* i/o: list extents to free */
Eric Sandeenf6106ef2016-01-11 11:34:01 +110095 struct xfs_inode *ip)
Dave Chinner68988112013-08-12 20:49:42 +100096{
Brian Foster8d99fe92015-08-19 09:51:16 +100097 struct xfs_efd_log_item *efd; /* extent free data */
98 struct xfs_efi_log_item *efi; /* extent free intention */
99 int error; /* error return value */
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100100 int committed;/* xact committed or not */
Brian Foster8d99fe92015-08-19 09:51:16 +1000101 struct xfs_bmap_free_item *free; /* free extent item */
102 struct xfs_bmap_free_item *next; /* next item on free list */
Dave Chinner68988112013-08-12 20:49:42 +1000103
104 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100105 if (flist->xbf_count == 0)
Dave Chinner68988112013-08-12 20:49:42 +1000106 return 0;
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100107
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +1000108 efi = xfs_trans_get_efi(*tp, flist->xbf_count);
Dave Chinner68988112013-08-12 20:49:42 +1000109 for (free = flist->xbf_first; free; free = free->xbfi_next)
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +1000110 xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
Dave Chinner68988112013-08-12 20:49:42 +1000111 free->xbfi_blockcount);
Jie Liu3d3c8b52013-08-12 20:49:59 +1000112
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100113 error = __xfs_trans_roll(tp, ip, &committed);
Brian Foster8d99fe92015-08-19 09:51:16 +1000114 if (error) {
115 /*
116 * If the transaction was committed, drop the EFD reference
117 * since we're bailing out of here. The other reference is
118 * dropped when the EFI hits the AIL.
119 *
120 * If the transaction was not committed, the EFI is freed by the
121 * EFI item unlock handler on abort. Also, we have a new
122 * transaction so we should return committed=1 even though we're
123 * returning an error.
124 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100125 if (committed) {
Brian Foster8d99fe92015-08-19 09:51:16 +1000126 xfs_efi_release(efi);
127 xfs_force_shutdown((*tp)->t_mountp,
128 (error == -EFSCORRUPTED) ?
129 SHUTDOWN_CORRUPT_INCORE :
130 SHUTDOWN_META_IO_ERROR);
Brian Foster8d99fe92015-08-19 09:51:16 +1000131 }
Dave Chinner68988112013-08-12 20:49:42 +1000132 return error;
Brian Foster8d99fe92015-08-19 09:51:16 +1000133 }
Dave Chinner68988112013-08-12 20:49:42 +1000134
Brian Foster6bc43af2015-08-19 09:51:43 +1000135 /*
136 * Get an EFD and free each extent in the list, logging to the EFD in
137 * the process. The remaining bmap free list is cleaned up by the caller
138 * on error.
139 */
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +1000140 efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
Dave Chinner68988112013-08-12 20:49:42 +1000141 for (free = flist->xbf_first; free != NULL; free = next) {
142 next = free->xbfi_next;
Brian Foster8d99fe92015-08-19 09:51:16 +1000143
Brian Foster6bc43af2015-08-19 09:51:43 +1000144 error = xfs_trans_free_extent(*tp, efd, free->xbfi_startblock,
145 free->xbfi_blockcount);
Brian Foster8d99fe92015-08-19 09:51:16 +1000146 if (error)
147 return error;
148
Dave Chinner68988112013-08-12 20:49:42 +1000149 xfs_bmap_del_free(flist, NULL, free);
150 }
Brian Foster8d99fe92015-08-19 09:51:16 +1000151
Dave Chinner68988112013-08-12 20:49:42 +1000152 return 0;
153}
154
155int
156xfs_bmap_rtalloc(
157 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
158{
159 xfs_alloctype_t atype = 0; /* type for allocation routines */
160 int error; /* error return value */
161 xfs_mount_t *mp; /* mount point structure */
162 xfs_extlen_t prod = 0; /* product factor for allocators */
163 xfs_extlen_t ralen = 0; /* realtime allocation length */
164 xfs_extlen_t align; /* minimum allocation alignment */
165 xfs_rtblock_t rtb;
166
167 mp = ap->ip->i_mount;
168 align = xfs_get_extsz_hint(ap->ip);
169 prod = align / mp->m_sb.sb_rextsize;
170 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
171 align, 1, ap->eof, 0,
172 ap->conv, &ap->offset, &ap->length);
173 if (error)
174 return error;
175 ASSERT(ap->length);
176 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
177
178 /*
179 * If the offset & length are not perfectly aligned
180 * then kill prod, it will just get us in trouble.
181 */
182 if (do_mod(ap->offset, align) || ap->length % align)
183 prod = 1;
184 /*
185 * Set ralen to be the actual requested length in rtextents.
186 */
187 ralen = ap->length / mp->m_sb.sb_rextsize;
188 /*
189 * If the old value was close enough to MAXEXTLEN that
190 * we rounded up to it, cut it back so it's valid again.
191 * Note that if it's a really large request (bigger than
192 * MAXEXTLEN), we don't hear about that number, and can't
193 * adjust the starting point to match it.
194 */
195 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
196 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
197
198 /*
Dave Chinner4b680af2016-02-08 10:46:51 +1100199 * Lock out modifications to both the RT bitmap and summary inodes
Dave Chinner68988112013-08-12 20:49:42 +1000200 */
201 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
202 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
Dave Chinner4b680af2016-02-08 10:46:51 +1100203 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
204 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
Dave Chinner68988112013-08-12 20:49:42 +1000205
206 /*
207 * If it's an allocation to an empty file at offset 0,
208 * pick an extent that will space things out in the rt area.
209 */
210 if (ap->eof && ap->offset == 0) {
211 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
212
213 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
214 if (error)
215 return error;
216 ap->blkno = rtx * mp->m_sb.sb_rextsize;
217 } else {
218 ap->blkno = 0;
219 }
220
221 xfs_bmap_adjacent(ap);
222
223 /*
224 * Realtime allocation, done through xfs_rtallocate_extent.
225 */
226 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
227 do_div(ap->blkno, mp->m_sb.sb_rextsize);
228 rtb = ap->blkno;
229 ap->length = ralen;
230 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
231 &ralen, atype, ap->wasdel, prod, &rtb)))
232 return error;
233 if (rtb == NULLFSBLOCK && prod > 1 &&
234 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
235 ap->length, &ralen, atype,
236 ap->wasdel, 1, &rtb)))
237 return error;
238 ap->blkno = rtb;
239 if (ap->blkno != NULLFSBLOCK) {
240 ap->blkno *= mp->m_sb.sb_rextsize;
241 ralen *= mp->m_sb.sb_rextsize;
242 ap->length = ralen;
243 ap->ip->i_d.di_nblocks += ralen;
244 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
245 if (ap->wasdel)
246 ap->ip->i_delayed_blks -= ralen;
247 /*
248 * Adjust the disk quota also. This was reserved
249 * earlier.
250 */
251 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
252 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
253 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100254
255 /* Zero the extent if we were asked to do so */
256 if (ap->userdata & XFS_ALLOC_USERDATA_ZERO) {
257 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
258 if (error)
259 return error;
260 }
Dave Chinner68988112013-08-12 20:49:42 +1000261 } else {
262 ap->length = 0;
263 }
264 return 0;
265}
266
267/*
Dave Chinner68988112013-08-12 20:49:42 +1000268 * Check if the endoff is outside the last extent. If so the caller will grow
269 * the allocation to a stripe unit boundary. All offsets are considered outside
270 * the end of file for an empty fork, so 1 is returned in *eof in that case.
271 */
272int
273xfs_bmap_eof(
274 struct xfs_inode *ip,
275 xfs_fileoff_t endoff,
276 int whichfork,
277 int *eof)
278{
279 struct xfs_bmbt_irec rec;
280 int error;
281
282 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
283 if (error || *eof)
284 return error;
285
286 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
287 return 0;
288}
289
290/*
291 * Extent tree block counting routines.
292 */
293
294/*
295 * Count leaf blocks given a range of extent records.
296 */
297STATIC void
298xfs_bmap_count_leaves(
299 xfs_ifork_t *ifp,
300 xfs_extnum_t idx,
301 int numrecs,
302 int *count)
303{
304 int b;
305
306 for (b = 0; b < numrecs; b++) {
307 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
308 *count += xfs_bmbt_get_blockcount(frp);
309 }
310}
311
312/*
313 * Count leaf blocks given a range of extent records originally
314 * in btree format.
315 */
316STATIC void
317xfs_bmap_disk_count_leaves(
318 struct xfs_mount *mp,
319 struct xfs_btree_block *block,
320 int numrecs,
321 int *count)
322{
323 int b;
324 xfs_bmbt_rec_t *frp;
325
326 for (b = 1; b <= numrecs; b++) {
327 frp = XFS_BMBT_REC_ADDR(mp, block, b);
328 *count += xfs_bmbt_disk_get_blockcount(frp);
329 }
330}
331
332/*
333 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000334 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000335 */
336STATIC int /* error */
337xfs_bmap_count_tree(
338 xfs_mount_t *mp, /* file system mount point */
339 xfs_trans_t *tp, /* transaction pointer */
340 xfs_ifork_t *ifp, /* inode fork pointer */
341 xfs_fsblock_t blockno, /* file system block number */
342 int levelin, /* level in btree */
343 int *count) /* Count of blocks */
344{
345 int error;
346 xfs_buf_t *bp, *nbp;
347 int level = levelin;
348 __be64 *pp;
349 xfs_fsblock_t bno = blockno;
350 xfs_fsblock_t nextbno;
351 struct xfs_btree_block *block, *nextblock;
352 int numrecs;
353
354 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
355 &xfs_bmbt_buf_ops);
356 if (error)
357 return error;
358 *count += 1;
359 block = XFS_BUF_TO_BLOCK(bp);
360
361 if (--level) {
362 /* Not at node above leaves, count this level of nodes */
363 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
364 while (nextbno != NULLFSBLOCK) {
365 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
366 XFS_BMAP_BTREE_REF,
367 &xfs_bmbt_buf_ops);
368 if (error)
369 return error;
370 *count += 1;
371 nextblock = XFS_BUF_TO_BLOCK(nbp);
372 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
373 xfs_trans_brelse(tp, nbp);
374 }
375
376 /* Dive to the next level */
377 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
378 bno = be64_to_cpu(*pp);
379 if (unlikely((error =
380 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
381 xfs_trans_brelse(tp, bp);
382 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
383 XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000384 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000385 }
386 xfs_trans_brelse(tp, bp);
387 } else {
388 /* count all level 1 nodes and their leaves */
389 for (;;) {
390 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
391 numrecs = be16_to_cpu(block->bb_numrecs);
392 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
393 xfs_trans_brelse(tp, bp);
394 if (nextbno == NULLFSBLOCK)
395 break;
396 bno = nextbno;
397 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
398 XFS_BMAP_BTREE_REF,
399 &xfs_bmbt_buf_ops);
400 if (error)
401 return error;
402 *count += 1;
403 block = XFS_BUF_TO_BLOCK(bp);
404 }
405 }
406 return 0;
407}
408
409/*
410 * Count fsblocks of the given fork.
411 */
412int /* error */
413xfs_bmap_count_blocks(
414 xfs_trans_t *tp, /* transaction pointer */
415 xfs_inode_t *ip, /* incore inode */
416 int whichfork, /* data or attr fork */
417 int *count) /* out: count of blocks */
418{
419 struct xfs_btree_block *block; /* current btree block */
420 xfs_fsblock_t bno; /* block # of "block" */
421 xfs_ifork_t *ifp; /* fork structure */
422 int level; /* btree level, for checking */
423 xfs_mount_t *mp; /* file system mount structure */
424 __be64 *pp; /* pointer to block address */
425
426 bno = NULLFSBLOCK;
427 mp = ip->i_mount;
428 ifp = XFS_IFORK_PTR(ip, whichfork);
429 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
430 xfs_bmap_count_leaves(ifp, 0,
431 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
432 count);
433 return 0;
434 }
435
436 /*
437 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
438 */
439 block = ifp->if_broot;
440 level = be16_to_cpu(block->bb_level);
441 ASSERT(level > 0);
442 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
443 bno = be64_to_cpu(*pp);
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000444 ASSERT(bno != NULLFSBLOCK);
Dave Chinner68988112013-08-12 20:49:42 +1000445 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
446 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
447
448 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
449 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
450 mp);
Dave Chinner24513372014-06-25 14:58:08 +1000451 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000452 }
453
454 return 0;
455}
456
457/*
458 * returns 1 for success, 0 if we failed to map the extent.
459 */
460STATIC int
461xfs_getbmapx_fix_eof_hole(
462 xfs_inode_t *ip, /* xfs incore inode pointer */
463 struct getbmapx *out, /* output structure */
464 int prealloced, /* this is a file with
465 * preallocated data space */
466 __int64_t end, /* last block requested */
467 xfs_fsblock_t startblock)
468{
469 __int64_t fixlen;
470 xfs_mount_t *mp; /* file system mount point */
471 xfs_ifork_t *ifp; /* inode fork pointer */
472 xfs_extnum_t lastx; /* last extent pointer */
473 xfs_fileoff_t fileblock;
474
475 if (startblock == HOLESTARTBLOCK) {
476 mp = ip->i_mount;
477 out->bmv_block = -1;
478 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
479 fixlen -= out->bmv_offset;
480 if (prealloced && out->bmv_offset + out->bmv_length == end) {
481 /* Came to hole at EOF. Trim it. */
482 if (fixlen <= 0)
483 return 0;
484 out->bmv_length = fixlen;
485 }
486 } else {
487 if (startblock == DELAYSTARTBLOCK)
488 out->bmv_block = -2;
489 else
490 out->bmv_block = xfs_fsb_to_db(ip, startblock);
491 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
492 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
493 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
494 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
495 out->bmv_oflags |= BMV_OF_LAST;
496 }
497
498 return 1;
499}
500
501/*
502 * Get inode's extents as described in bmv, and format for output.
503 * Calls formatter to fill the user's buffer until all extents
504 * are mapped, until the passed-in bmv->bmv_count slots have
505 * been filled, or until the formatter short-circuits the loop,
506 * if it is tracking filled-in extents on its own.
507 */
508int /* error code */
509xfs_getbmap(
510 xfs_inode_t *ip,
511 struct getbmapx *bmv, /* user bmap structure */
512 xfs_bmap_format_t formatter, /* format to user */
513 void *arg) /* formatter arg */
514{
515 __int64_t bmvend; /* last block requested */
516 int error = 0; /* return value */
517 __int64_t fixlen; /* length for -1 case */
518 int i; /* extent number */
519 int lock; /* lock state */
520 xfs_bmbt_irec_t *map; /* buffer for user's data */
521 xfs_mount_t *mp; /* file system mount point */
522 int nex; /* # of user extents can do */
523 int nexleft; /* # of user extents left */
524 int subnex; /* # of bmapi's can do */
525 int nmap; /* number of map entries */
526 struct getbmapx *out; /* output structure */
527 int whichfork; /* data or attr fork */
528 int prealloced; /* this is a file with
529 * preallocated data space */
530 int iflags; /* interface flags */
531 int bmapi_flags; /* flags for xfs_bmapi */
532 int cur_ext = 0;
533
534 mp = ip->i_mount;
535 iflags = bmv->bmv_iflags;
536 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
537
538 if (whichfork == XFS_ATTR_FORK) {
539 if (XFS_IFORK_Q(ip)) {
540 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
541 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
542 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000543 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000544 } else if (unlikely(
545 ip->i_d.di_aformat != 0 &&
546 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
547 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
548 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000549 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000550 }
551
552 prealloced = 0;
553 fixlen = 1LL << 32;
554 } else {
555 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
556 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
557 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000558 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000559
560 if (xfs_get_extsz_hint(ip) ||
561 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
562 prealloced = 1;
563 fixlen = mp->m_super->s_maxbytes;
564 } else {
565 prealloced = 0;
566 fixlen = XFS_ISIZE(ip);
567 }
568 }
569
570 if (bmv->bmv_length == -1) {
571 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
572 bmv->bmv_length =
573 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
574 } else if (bmv->bmv_length == 0) {
575 bmv->bmv_entries = 0;
576 return 0;
577 } else if (bmv->bmv_length < 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000578 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000579 }
580
581 nex = bmv->bmv_count - 1;
582 if (nex <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000583 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000584 bmvend = bmv->bmv_offset + bmv->bmv_length;
585
586
587 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
Dave Chinner24513372014-06-25 14:58:08 +1000588 return -ENOMEM;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000589 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
590 if (!out)
Dave Chinner24513372014-06-25 14:58:08 +1000591 return -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000592
593 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800594 if (whichfork == XFS_DATA_FORK) {
595 if (!(iflags & BMV_IF_DELALLOC) &&
596 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner24513372014-06-25 14:58:08 +1000597 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
Dave Chinner68988112013-08-12 20:49:42 +1000598 if (error)
599 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000600
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800601 /*
602 * Even after flushing the inode, there can still be
603 * delalloc blocks on the inode beyond EOF due to
604 * speculative preallocation. These are not removed
605 * until the release function is called or the inode
606 * is inactivated. Hence we cannot assert here that
607 * ip->i_delayed_blks == 0.
608 */
609 }
610
611 lock = xfs_ilock_data_map_shared(ip);
612 } else {
613 lock = xfs_ilock_attr_map_shared(ip);
614 }
Dave Chinner68988112013-08-12 20:49:42 +1000615
616 /*
617 * Don't let nex be bigger than the number of extents
618 * we can have assuming alternating holes and real extents.
619 */
620 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
621 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
622
623 bmapi_flags = xfs_bmapi_aflag(whichfork);
624 if (!(iflags & BMV_IF_PREALLOC))
625 bmapi_flags |= XFS_BMAPI_IGSTATE;
626
627 /*
628 * Allocate enough space to handle "subnex" maps at a time.
629 */
Dave Chinner24513372014-06-25 14:58:08 +1000630 error = -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000631 subnex = 16;
632 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
633 if (!map)
634 goto out_unlock_ilock;
635
636 bmv->bmv_entries = 0;
637
638 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
639 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
640 error = 0;
641 goto out_free_map;
642 }
643
644 nexleft = nex;
645
646 do {
647 nmap = (nexleft > subnex) ? subnex : nexleft;
648 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
649 XFS_BB_TO_FSB(mp, bmv->bmv_length),
650 map, &nmap, bmapi_flags);
651 if (error)
652 goto out_free_map;
653 ASSERT(nmap <= subnex);
654
655 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
656 out[cur_ext].bmv_oflags = 0;
657 if (map[i].br_state == XFS_EXT_UNWRITTEN)
658 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
659 else if (map[i].br_startblock == DELAYSTARTBLOCK)
660 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
661 out[cur_ext].bmv_offset =
662 XFS_FSB_TO_BB(mp, map[i].br_startoff);
663 out[cur_ext].bmv_length =
664 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
665 out[cur_ext].bmv_unused1 = 0;
666 out[cur_ext].bmv_unused2 = 0;
667
668 /*
669 * delayed allocation extents that start beyond EOF can
670 * occur due to speculative EOF allocation when the
671 * delalloc extent is larger than the largest freespace
672 * extent at conversion time. These extents cannot be
673 * converted by data writeback, so can exist here even
674 * if we are not supposed to be finding delalloc
675 * extents.
676 */
677 if (map[i].br_startblock == DELAYSTARTBLOCK &&
678 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
679 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
680
681 if (map[i].br_startblock == HOLESTARTBLOCK &&
682 whichfork == XFS_ATTR_FORK) {
683 /* came to the end of attribute fork */
684 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
685 goto out_free_map;
686 }
687
688 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
689 prealloced, bmvend,
690 map[i].br_startblock))
691 goto out_free_map;
692
693 bmv->bmv_offset =
694 out[cur_ext].bmv_offset +
695 out[cur_ext].bmv_length;
696 bmv->bmv_length =
697 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
698
699 /*
700 * In case we don't want to return the hole,
701 * don't increase cur_ext so that we can reuse
702 * it in the next loop.
703 */
704 if ((iflags & BMV_IF_NO_HOLES) &&
705 map[i].br_startblock == HOLESTARTBLOCK) {
706 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
707 continue;
708 }
709
710 nexleft--;
711 bmv->bmv_entries++;
712 cur_ext++;
713 }
714 } while (nmap && nexleft && bmv->bmv_length);
715
716 out_free_map:
717 kmem_free(map);
718 out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800719 xfs_iunlock(ip, lock);
Dave Chinner68988112013-08-12 20:49:42 +1000720 out_unlock_iolock:
721 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
722
723 for (i = 0; i < cur_ext; i++) {
724 int full = 0; /* user array is full */
725
726 /* format results & advance arg */
727 error = formatter(&arg, &out[i], &full);
728 if (error || full)
729 break;
730 }
731
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000732 kmem_free(out);
Dave Chinner68988112013-08-12 20:49:42 +1000733 return error;
734}
735
736/*
737 * dead simple method of punching delalyed allocation blocks from a range in
738 * the inode. Walks a block at a time so will be slow, but is only executed in
Zhi Yong Wuad4809b2013-08-12 03:14:55 +0000739 * rare error cases so the overhead is not critical. This will always punch out
Dave Chinner68988112013-08-12 20:49:42 +1000740 * both the start and end blocks, even if the ranges only partially overlap
741 * them, so it is up to the caller to ensure that partial blocks are not
742 * passed in.
743 */
744int
745xfs_bmap_punch_delalloc_range(
746 struct xfs_inode *ip,
747 xfs_fileoff_t start_fsb,
748 xfs_fileoff_t length)
749{
750 xfs_fileoff_t remaining = length;
751 int error = 0;
752
753 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
754
755 do {
756 int done;
757 xfs_bmbt_irec_t imap;
758 int nimaps = 1;
759 xfs_fsblock_t firstblock;
760 xfs_bmap_free_t flist;
761
762 /*
763 * Map the range first and check that it is a delalloc extent
764 * before trying to unmap the range. Otherwise we will be
765 * trying to remove a real extent (which requires a
766 * transaction) or a hole, which is probably a bad idea...
767 */
768 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
769 XFS_BMAPI_ENTIRE);
770
771 if (error) {
772 /* something screwed, just bail */
773 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
774 xfs_alert(ip->i_mount,
775 "Failed delalloc mapping lookup ino %lld fsb %lld.",
776 ip->i_ino, start_fsb);
777 }
778 break;
779 }
780 if (!nimaps) {
781 /* nothing there */
782 goto next_block;
783 }
784 if (imap.br_startblock != DELAYSTARTBLOCK) {
785 /* been converted, ignore */
786 goto next_block;
787 }
788 WARN_ON(imap.br_blockcount == 0);
789
790 /*
791 * Note: while we initialise the firstblock/flist pair, they
792 * should never be used because blocks should never be
793 * allocated or freed for a delalloc extent and hence we need
794 * don't cancel or finish them after the xfs_bunmapi() call.
795 */
796 xfs_bmap_init(&flist, &firstblock);
797 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
798 &flist, &done);
799 if (error)
800 break;
801
802 ASSERT(!flist.xbf_count && !flist.xbf_first);
803next_block:
804 start_fsb++;
805 remaining--;
806 } while(remaining > 0);
807
808 return error;
809}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000810
811/*
812 * Test whether it is appropriate to check an inode for and free post EOF
813 * blocks. The 'force' parameter determines whether we should also consider
814 * regular files that are marked preallocated or append-only.
815 */
816bool
817xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
818{
819 /* prealloc/delalloc exists only on regular files */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100820 if (!S_ISREG(VFS_I(ip)->i_mode))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000821 return false;
822
823 /*
824 * Zero sized files with no cached pages and delalloc blocks will not
825 * have speculative prealloc/delalloc blocks to remove.
826 */
827 if (VFS_I(ip)->i_size == 0 &&
Dave Chinner2667c6f2014-08-04 13:23:15 +1000828 VFS_I(ip)->i_mapping->nrpages == 0 &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000829 ip->i_delayed_blks == 0)
830 return false;
831
832 /* If we haven't read in the extent list, then don't do it now. */
833 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
834 return false;
835
836 /*
837 * Do not free real preallocated or append-only files unless the file
838 * has delalloc blocks and we are forced to remove them.
839 */
840 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
841 if (!force || ip->i_delayed_blks == 0)
842 return false;
843
844 return true;
845}
846
847/*
848 * This is called by xfs_inactive to free any blocks beyond eof
849 * when the link count isn't zero and by xfs_dm_punch_hole() when
850 * punching a hole to EOF.
851 */
852int
853xfs_free_eofblocks(
854 xfs_mount_t *mp,
855 xfs_inode_t *ip,
856 bool need_iolock)
857{
858 xfs_trans_t *tp;
859 int error;
860 xfs_fileoff_t end_fsb;
861 xfs_fileoff_t last_fsb;
862 xfs_filblks_t map_len;
863 int nimaps;
864 xfs_bmbt_irec_t imap;
865
866 /*
867 * Figure out if there are any blocks beyond the end
868 * of the file. If not, then there is nothing to do.
869 */
870 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
871 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
872 if (last_fsb <= end_fsb)
873 return 0;
874 map_len = last_fsb - end_fsb;
875
876 nimaps = 1;
877 xfs_ilock(ip, XFS_ILOCK_SHARED);
878 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
879 xfs_iunlock(ip, XFS_ILOCK_SHARED);
880
881 if (!error && (nimaps != 0) &&
882 (imap.br_startblock != HOLESTARTBLOCK ||
883 ip->i_delayed_blks)) {
884 /*
885 * Attach the dquots to the inode up front.
886 */
887 error = xfs_qm_dqattach(ip, 0);
888 if (error)
889 return error;
890
891 /*
892 * There are blocks after the end of file.
893 * Free them up now by truncating the file to
894 * its current size.
895 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000896 if (need_iolock) {
Christoph Hellwig253f4912016-04-06 09:19:55 +1000897 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
Dave Chinner24513372014-06-25 14:58:08 +1000898 return -EAGAIN;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000899 }
900
Christoph Hellwig253f4912016-04-06 09:19:55 +1000901 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
902 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000903 if (error) {
904 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000905 if (need_iolock)
906 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
907 return error;
908 }
909
910 xfs_ilock(ip, XFS_ILOCK_EXCL);
911 xfs_trans_ijoin(tp, ip, 0);
912
913 /*
914 * Do not update the on-disk file size. If we update the
915 * on-disk file size and then the system crashes before the
916 * contents of the file are flushed to disk then the files
917 * may be full of holes (ie NULL files bug).
918 */
919 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
920 XFS_ISIZE(ip));
921 if (error) {
922 /*
923 * If we get an error at this point we simply don't
924 * bother truncating the file.
925 */
Christoph Hellwig4906e212015-06-04 13:47:56 +1000926 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000927 } else {
Christoph Hellwig70393312015-06-04 13:48:08 +1000928 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000929 if (!error)
930 xfs_inode_clear_eofblocks_tag(ip);
931 }
932
933 xfs_iunlock(ip, XFS_ILOCK_EXCL);
934 if (need_iolock)
935 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
936 }
937 return error;
938}
939
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700940int
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000941xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700942 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000943 xfs_off_t offset,
944 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -0700945 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000946{
947 xfs_mount_t *mp = ip->i_mount;
948 xfs_off_t count;
949 xfs_filblks_t allocated_fsb;
950 xfs_filblks_t allocatesize_fsb;
951 xfs_extlen_t extsz, temp;
952 xfs_fileoff_t startoffset_fsb;
953 xfs_fsblock_t firstfsb;
954 int nimaps;
955 int quota_flag;
956 int rt;
957 xfs_trans_t *tp;
958 xfs_bmbt_irec_t imaps[1], *imapp;
959 xfs_bmap_free_t free_list;
960 uint qblocks, resblks, resrtextents;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000961 int error;
962
963 trace_xfs_alloc_file_space(ip);
964
965 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +1000966 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000967
968 error = xfs_qm_dqattach(ip, 0);
969 if (error)
970 return error;
971
972 if (len <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000973 return -EINVAL;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000974
975 rt = XFS_IS_REALTIME_INODE(ip);
976 extsz = xfs_get_extsz_hint(ip);
977
978 count = len;
979 imapp = &imaps[0];
980 nimaps = 1;
981 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
982 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
983
984 /*
985 * Allocate file space until done or until there is an error
986 */
987 while (allocatesize_fsb && !error) {
988 xfs_fileoff_t s, e;
989
990 /*
991 * Determine space reservations for data/realtime.
992 */
993 if (unlikely(extsz)) {
994 s = startoffset_fsb;
995 do_div(s, extsz);
996 s *= extsz;
997 e = startoffset_fsb + allocatesize_fsb;
998 if ((temp = do_mod(startoffset_fsb, extsz)))
999 e += temp;
1000 if ((temp = do_mod(e, extsz)))
1001 e += extsz - temp;
1002 } else {
1003 s = 0;
1004 e = allocatesize_fsb;
1005 }
1006
1007 /*
1008 * The transaction reservation is limited to a 32-bit block
1009 * count, hence we need to limit the number of blocks we are
1010 * trying to reserve to avoid an overflow. We can't allocate
1011 * more than @nimaps extents, and an extent is limited on disk
1012 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1013 */
1014 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1015 if (unlikely(rt)) {
1016 resrtextents = qblocks = resblks;
1017 resrtextents /= mp->m_sb.sb_rextsize;
1018 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1019 quota_flag = XFS_QMOPT_RES_RTBLKS;
1020 } else {
1021 resrtextents = 0;
1022 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1023 quota_flag = XFS_QMOPT_RES_REGBLKS;
1024 }
1025
1026 /*
1027 * Allocate and setup the transaction.
1028 */
Christoph Hellwig253f4912016-04-06 09:19:55 +10001029 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1030 resrtextents, 0, &tp);
1031
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001032 /*
1033 * Check for running out of space
1034 */
1035 if (error) {
1036 /*
1037 * Free the transaction structure.
1038 */
Dave Chinner24513372014-06-25 14:58:08 +10001039 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001040 break;
1041 }
1042 xfs_ilock(ip, XFS_ILOCK_EXCL);
1043 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1044 0, quota_flag);
1045 if (error)
1046 goto error1;
1047
1048 xfs_trans_ijoin(tp, ip, 0);
1049
1050 xfs_bmap_init(&free_list, &firstfsb);
1051 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1052 allocatesize_fsb, alloc_type, &firstfsb,
Brian Fosterdbd5c8c2015-10-12 16:04:13 +11001053 resblks, imapp, &nimaps, &free_list);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001054 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001055 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001056
1057 /*
1058 * Complete the transaction
1059 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001060 error = xfs_bmap_finish(&tp, &free_list, NULL);
1061 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001062 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001063
Christoph Hellwig70393312015-06-04 13:48:08 +10001064 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001065 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001066 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001067 break;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001068
1069 allocated_fsb = imapp->br_blockcount;
1070
1071 if (nimaps == 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001072 error = -ENOSPC;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001073 break;
1074 }
1075
1076 startoffset_fsb += allocated_fsb;
1077 allocatesize_fsb -= allocated_fsb;
1078 }
1079
1080 return error;
1081
1082error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1083 xfs_bmap_cancel(&free_list);
1084 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1085
1086error1: /* Just cancel transaction */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001087 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001088 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1089 return error;
1090}
1091
1092/*
1093 * Zero file bytes between startoff and endoff inclusive.
1094 * The iolock is held exclusive and no blocks are buffered.
1095 *
1096 * This function is used by xfs_free_file_space() to zero
1097 * partial blocks when the range to free is not block aligned.
1098 * When unreserving space with boundaries that are not block
1099 * aligned we round up the start and round down the end
1100 * boundaries and then use this function to zero the parts of
1101 * the blocks that got dropped during the rounding.
1102 */
1103STATIC int
1104xfs_zero_remaining_bytes(
1105 xfs_inode_t *ip,
1106 xfs_off_t startoff,
1107 xfs_off_t endoff)
1108{
1109 xfs_bmbt_irec_t imap;
1110 xfs_fileoff_t offset_fsb;
1111 xfs_off_t lastoffset;
1112 xfs_off_t offset;
1113 xfs_buf_t *bp;
1114 xfs_mount_t *mp = ip->i_mount;
1115 int nimap;
1116 int error = 0;
1117
1118 /*
1119 * Avoid doing I/O beyond eof - it's not necessary
1120 * since nothing can read beyond eof. The space will
1121 * be zeroed when the file is extended anyway.
1122 */
1123 if (startoff >= XFS_ISIZE(ip))
1124 return 0;
1125
1126 if (endoff > XFS_ISIZE(ip))
1127 endoff = XFS_ISIZE(ip);
1128
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001129 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
Christoph Hellwig4f317362013-12-06 12:30:12 -08001130 uint lock_mode;
1131
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001132 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1133 nimap = 1;
Christoph Hellwig4f317362013-12-06 12:30:12 -08001134
1135 lock_mode = xfs_ilock_data_map_shared(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001136 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
Christoph Hellwig4f317362013-12-06 12:30:12 -08001137 xfs_iunlock(ip, lock_mode);
1138
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001139 if (error || nimap < 1)
1140 break;
1141 ASSERT(imap.br_blockcount >= 1);
1142 ASSERT(imap.br_startoff == offset_fsb);
Dave Chinner4f69f572015-06-04 09:19:08 +10001143 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1144
1145 if (imap.br_startblock == HOLESTARTBLOCK ||
1146 imap.br_state == XFS_EXT_UNWRITTEN) {
1147 /* skip the entire extent */
1148 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff +
1149 imap.br_blockcount) - 1;
1150 continue;
1151 }
1152
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001153 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1154 if (lastoffset > endoff)
1155 lastoffset = endoff;
Dave Chinner4f69f572015-06-04 09:19:08 +10001156
1157 /* DAX can just zero the backing device directly */
1158 if (IS_DAX(VFS_I(ip))) {
1159 error = dax_zero_page_range(VFS_I(ip), offset,
1160 lastoffset - offset + 1,
1161 xfs_get_blocks_direct);
1162 if (error)
1163 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001164 continue;
Dave Chinner4f69f572015-06-04 09:19:08 +10001165 }
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001166
Christoph Hellwig8c156122014-10-02 09:05:44 +10001167 error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
1168 mp->m_rtdev_targp : mp->m_ddev_targp,
1169 xfs_fsb_to_db(ip, imap.br_startblock),
1170 BTOBB(mp->m_sb.sb_blocksize),
1171 0, &bp, NULL);
1172 if (error)
1173 return error;
1174
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001175 memset(bp->b_addr +
Christoph Hellwig8c156122014-10-02 09:05:44 +10001176 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1177 0, lastoffset - offset + 1);
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001178
Christoph Hellwig8c156122014-10-02 09:05:44 +10001179 error = xfs_bwrite(bp);
1180 xfs_buf_relse(bp);
1181 if (error)
1182 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001183 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001184 return error;
1185}
1186
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001187int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001188xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001189 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001190 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001191 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001192{
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001193 int done;
1194 xfs_fileoff_t endoffset_fsb;
1195 int error;
1196 xfs_fsblock_t firstfsb;
1197 xfs_bmap_free_t free_list;
1198 xfs_bmbt_irec_t imap;
1199 xfs_off_t ioffset;
Brian Foster8b5279e2014-09-23 15:39:05 +10001200 xfs_off_t iendoffset;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001201 xfs_extlen_t mod=0;
1202 xfs_mount_t *mp;
1203 int nimap;
1204 uint resblks;
1205 xfs_off_t rounding;
1206 int rt;
1207 xfs_fileoff_t startoffset_fsb;
1208 xfs_trans_t *tp;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001209
1210 mp = ip->i_mount;
1211
1212 trace_xfs_free_file_space(ip);
1213
1214 error = xfs_qm_dqattach(ip, 0);
1215 if (error)
1216 return error;
1217
1218 error = 0;
1219 if (len <= 0) /* if nothing being freed */
1220 return error;
1221 rt = XFS_IS_REALTIME_INODE(ip);
1222 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1223 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1224
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001225 /* wait for the completion of any pending DIOs */
1226 inode_dio_wait(VFS_I(ip));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001227
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001228 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
Brian Foster8b5279e2014-09-23 15:39:05 +10001229 ioffset = round_down(offset, rounding);
1230 iendoffset = round_up(offset + len, rounding) - 1;
1231 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
1232 iendoffset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001233 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001234 goto out;
Brian Foster8b5279e2014-09-23 15:39:05 +10001235 truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001236
1237 /*
1238 * Need to zero the stuff we're not freeing, on disk.
1239 * If it's a realtime file & can't use unwritten extents then we
1240 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1241 * will take care of it for us.
1242 */
1243 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1244 nimap = 1;
1245 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1246 &imap, &nimap, 0);
1247 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001248 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001249 ASSERT(nimap == 0 || nimap == 1);
1250 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1251 xfs_daddr_t block;
1252
1253 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1254 block = imap.br_startblock;
1255 mod = do_div(block, mp->m_sb.sb_rextsize);
1256 if (mod)
1257 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1258 }
1259 nimap = 1;
1260 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1261 &imap, &nimap, 0);
1262 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001263 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001264 ASSERT(nimap == 0 || nimap == 1);
1265 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1266 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1267 mod++;
1268 if (mod && (mod != mp->m_sb.sb_rextsize))
1269 endoffset_fsb -= mod;
1270 }
1271 }
1272 if ((done = (endoffset_fsb <= startoffset_fsb)))
1273 /*
1274 * One contiguous piece to clear
1275 */
1276 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1277 else {
1278 /*
1279 * Some full blocks, possibly two pieces to clear
1280 */
1281 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1282 error = xfs_zero_remaining_bytes(ip, offset,
1283 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1284 if (!error &&
1285 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1286 error = xfs_zero_remaining_bytes(ip,
1287 XFS_FSB_TO_B(mp, endoffset_fsb),
1288 offset + len - 1);
1289 }
1290
1291 /*
1292 * free file space until done or until there is an error
1293 */
1294 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1295 while (!error && !done) {
1296
1297 /*
1298 * allocate and setup the transaction. Allow this
1299 * transaction to dip into the reserve blocks to ensure
1300 * the freeing of the space succeeds at ENOSPC.
1301 */
Christoph Hellwig253f4912016-04-06 09:19:55 +10001302 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1303 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001304 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10001305 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001306 break;
1307 }
1308 xfs_ilock(ip, XFS_ILOCK_EXCL);
1309 error = xfs_trans_reserve_quota(tp, mp,
1310 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1311 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1312 if (error)
1313 goto error1;
1314
1315 xfs_trans_ijoin(tp, ip, 0);
1316
1317 /*
1318 * issue the bunmapi() call to free the blocks
1319 */
1320 xfs_bmap_init(&free_list, &firstfsb);
1321 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1322 endoffset_fsb - startoffset_fsb,
1323 0, 2, &firstfsb, &free_list, &done);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001324 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001325 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001326
1327 /*
1328 * complete the transaction
1329 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001330 error = xfs_bmap_finish(&tp, &free_list, NULL);
1331 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001332 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001333
Christoph Hellwig70393312015-06-04 13:48:08 +10001334 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001335 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1336 }
1337
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001338 out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001339 return error;
1340
1341 error0:
1342 xfs_bmap_cancel(&free_list);
1343 error1:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001344 xfs_trans_cancel(tp);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001345 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1346 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001347}
1348
Brian Foster5d11fb42014-10-30 10:35:11 +11001349/*
1350 * Preallocate and zero a range of a file. This mechanism has the allocation
1351 * semantics of fallocate and in addition converts data in the range to zeroes.
1352 */
Christoph Hellwig865e9442013-10-12 00:55:08 -07001353int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001354xfs_zero_file_space(
1355 struct xfs_inode *ip,
1356 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001357 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001358{
1359 struct xfs_mount *mp = ip->i_mount;
Brian Foster5d11fb42014-10-30 10:35:11 +11001360 uint blksize;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001361 int error;
1362
Dave Chinner897b73b2014-04-14 18:15:11 +10001363 trace_xfs_zero_file_space(ip);
1364
Brian Foster5d11fb42014-10-30 10:35:11 +11001365 blksize = 1 << mp->m_sb.sb_blocklog;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001366
1367 /*
Brian Foster5d11fb42014-10-30 10:35:11 +11001368 * Punch a hole and prealloc the range. We use hole punch rather than
1369 * unwritten extent conversion for two reasons:
1370 *
1371 * 1.) Hole punch handles partial block zeroing for us.
1372 *
1373 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1374 * by virtue of the hole punch.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001375 */
Brian Foster5d11fb42014-10-30 10:35:11 +11001376 error = xfs_free_file_space(ip, offset, len);
1377 if (error)
1378 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001379
Brian Foster5d11fb42014-10-30 10:35:11 +11001380 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1381 round_up(offset + len, blksize) -
1382 round_down(offset, blksize),
1383 XFS_BMAPI_PREALLOC);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001384out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001385 return error;
1386
1387}
1388
1389/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001390 * @next_fsb will keep track of the extent currently undergoing shift.
1391 * @stop_fsb will keep track of the extent at which we have to stop.
1392 * If we are shifting left, we will start with block (offset + len) and
1393 * shift each extent till last extent.
1394 * If we are shifting right, we will start with last extent inside file space
1395 * and continue until we reach the block corresponding to offset.
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001396 */
kbuild test robot72c1a732015-04-13 11:25:04 +10001397static int
Namjae Jeona904b1c2015-03-25 15:08:56 +11001398xfs_shift_file_space(
1399 struct xfs_inode *ip,
1400 xfs_off_t offset,
1401 xfs_off_t len,
1402 enum shift_direction direction)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001403{
1404 int done = 0;
1405 struct xfs_mount *mp = ip->i_mount;
1406 struct xfs_trans *tp;
1407 int error;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001408 struct xfs_bmap_free free_list;
1409 xfs_fsblock_t first_block;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001410 xfs_fileoff_t stop_fsb;
Brian Foster2c845f52014-09-23 15:37:09 +10001411 xfs_fileoff_t next_fsb;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001412 xfs_fileoff_t shift_fsb;
1413
Namjae Jeona904b1c2015-03-25 15:08:56 +11001414 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001415
Namjae Jeona904b1c2015-03-25 15:08:56 +11001416 if (direction == SHIFT_LEFT) {
1417 next_fsb = XFS_B_TO_FSB(mp, offset + len);
1418 stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1419 } else {
1420 /*
1421 * If right shift, delegate the work of initialization of
1422 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1423 */
1424 next_fsb = NULLFSBLOCK;
1425 stop_fsb = XFS_B_TO_FSB(mp, offset);
1426 }
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001427
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001428 shift_fsb = XFS_B_TO_FSB(mp, len);
1429
Brian Fosterf71721d2014-09-23 15:39:05 +10001430 /*
1431 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1432 * into the accessible region of the file.
1433 */
Brian Foster41b9d722014-09-02 12:12:53 +10001434 if (xfs_can_free_eofblocks(ip, true)) {
1435 error = xfs_free_eofblocks(mp, ip, false);
1436 if (error)
1437 return error;
1438 }
Dave Chinner1669a8c2014-09-02 12:12:53 +10001439
Brian Fosterf71721d2014-09-23 15:39:05 +10001440 /*
1441 * Writeback and invalidate cache for the remainder of the file as we're
Namjae Jeona904b1c2015-03-25 15:08:56 +11001442 * about to shift down every extent from offset to EOF.
Brian Fosterf71721d2014-09-23 15:39:05 +10001443 */
1444 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
Namjae Jeona904b1c2015-03-25 15:08:56 +11001445 offset, -1);
Brian Fosterf71721d2014-09-23 15:39:05 +10001446 if (error)
1447 return error;
1448 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001449 offset >> PAGE_SHIFT, -1);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001450 if (error)
1451 return error;
1452
Namjae Jeona904b1c2015-03-25 15:08:56 +11001453 /*
1454 * The extent shiting code works on extent granularity. So, if
1455 * stop_fsb is not the starting block of extent, we need to split
1456 * the extent at stop_fsb.
1457 */
1458 if (direction == SHIFT_RIGHT) {
1459 error = xfs_bmap_split_extent(ip, stop_fsb);
1460 if (error)
1461 return error;
1462 }
1463
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001464 while (!error && !done) {
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001465 /*
1466 * We would need to reserve permanent block for transaction.
1467 * This will come into picture when after shifting extent into
1468 * hole we found that adjacent extents can be merged which
1469 * may lead to freeing of a block during record update.
1470 */
Christoph Hellwig253f4912016-04-06 09:19:55 +10001471 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1472 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1473 if (error)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001474 break;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001475
1476 xfs_ilock(ip, XFS_ILOCK_EXCL);
1477 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1478 ip->i_gdquot, ip->i_pdquot,
1479 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1480 XFS_QMOPT_RES_REGBLKS);
1481 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001482 goto out_trans_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001483
Namjae Jeona904b1c2015-03-25 15:08:56 +11001484 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001485
1486 xfs_bmap_init(&free_list, &first_block);
1487
1488 /*
1489 * We are using the write transaction in which max 2 bmbt
1490 * updates are allowed
1491 */
Namjae Jeona904b1c2015-03-25 15:08:56 +11001492 error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1493 &done, stop_fsb, &first_block, &free_list,
1494 direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001495 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001496 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001497
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001498 error = xfs_bmap_finish(&tp, &free_list, NULL);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001499 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001500 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001501
Christoph Hellwig70393312015-06-04 13:48:08 +10001502 error = xfs_trans_commit(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001503 }
1504
1505 return error;
1506
Brian Fosterd4a97a02015-08-19 10:01:40 +10001507out_bmap_cancel:
1508 xfs_bmap_cancel(&free_list);
1509out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001510 xfs_trans_cancel(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001511 return error;
1512}
1513
1514/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001515 * xfs_collapse_file_space()
1516 * This routine frees disk space and shift extent for the given file.
1517 * The first thing we do is to free data blocks in the specified range
1518 * by calling xfs_free_file_space(). It would also sync dirty data
1519 * and invalidate page cache over the region on which collapse range
1520 * is working. And Shift extent records to the left to cover a hole.
1521 * RETURNS:
1522 * 0 on success
1523 * errno on error
1524 *
1525 */
1526int
1527xfs_collapse_file_space(
1528 struct xfs_inode *ip,
1529 xfs_off_t offset,
1530 xfs_off_t len)
1531{
1532 int error;
1533
1534 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1535 trace_xfs_collapse_file_space(ip);
1536
1537 error = xfs_free_file_space(ip, offset, len);
1538 if (error)
1539 return error;
1540
1541 return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1542}
1543
1544/*
1545 * xfs_insert_file_space()
1546 * This routine create hole space by shifting extents for the given file.
1547 * The first thing we do is to sync dirty data and invalidate page cache
1548 * over the region on which insert range is working. And split an extent
1549 * to two extents at given offset by calling xfs_bmap_split_extent.
1550 * And shift all extent records which are laying between [offset,
1551 * last allocated extent] to the right to reserve hole range.
1552 * RETURNS:
1553 * 0 on success
1554 * errno on error
1555 */
1556int
1557xfs_insert_file_space(
1558 struct xfs_inode *ip,
1559 loff_t offset,
1560 loff_t len)
1561{
1562 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1563 trace_xfs_insert_file_space(ip);
1564
1565 return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1566}
1567
1568/*
Dave Chinnera133d952013-08-12 20:49:48 +10001569 * We need to check that the format of the data fork in the temporary inode is
1570 * valid for the target inode before doing the swap. This is not a problem with
1571 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1572 * data fork depending on the space the attribute fork is taking so we can get
1573 * invalid formats on the target inode.
1574 *
1575 * E.g. target has space for 7 extents in extent format, temp inode only has
1576 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1577 * btree, but when swapped it needs to be in extent format. Hence we can't just
1578 * blindly swap data forks on attr2 filesystems.
1579 *
1580 * Note that we check the swap in both directions so that we don't end up with
1581 * a corrupt temporary inode, either.
1582 *
1583 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1584 * inode will prevent this situation from occurring, so all we do here is
1585 * reject and log the attempt. basically we are putting the responsibility on
1586 * userspace to get this right.
1587 */
1588static int
1589xfs_swap_extents_check_format(
1590 xfs_inode_t *ip, /* target inode */
1591 xfs_inode_t *tip) /* tmp inode */
1592{
1593
1594 /* Should never get a local format */
1595 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1596 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +10001597 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001598
1599 /*
1600 * if the target inode has less extents that then temporary inode then
1601 * why did userspace call us?
1602 */
1603 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
Dave Chinner24513372014-06-25 14:58:08 +10001604 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001605
1606 /*
1607 * if the target inode is in extent form and the temp inode is in btree
1608 * form then we will end up with the target inode in the wrong format
1609 * as we already know there are less extents in the temp inode.
1610 */
1611 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1612 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +10001613 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001614
1615 /* Check temp in extent form to max in target */
1616 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1617 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1618 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001619 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001620
1621 /* Check target in extent form to max in temp */
1622 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1623 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1624 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001625 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001626
1627 /*
1628 * If we are in a btree format, check that the temp root block will fit
1629 * in the target and that it has enough extents to be in btree format
1630 * in the target.
1631 *
1632 * Note that we have to be careful to allow btree->extent conversions
1633 * (a common defrag case) which will occur when the temp inode is in
1634 * extent format...
1635 */
1636 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1637 if (XFS_IFORK_BOFF(ip) &&
1638 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
Dave Chinner24513372014-06-25 14:58:08 +10001639 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001640 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1641 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001642 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001643 }
1644
1645 /* Reciprocal target->temp btree format checks */
1646 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1647 if (XFS_IFORK_BOFF(tip) &&
1648 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
Dave Chinner24513372014-06-25 14:58:08 +10001649 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001650 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1651 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001652 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001653 }
1654
1655 return 0;
1656}
1657
Dave Chinner7abbb8f2014-09-23 16:20:11 +10001658static int
Dave Chinner4ef897a2014-08-04 13:44:08 +10001659xfs_swap_extent_flush(
1660 struct xfs_inode *ip)
1661{
1662 int error;
1663
1664 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1665 if (error)
1666 return error;
1667 truncate_pagecache_range(VFS_I(ip), 0, -1);
1668
1669 /* Verify O_DIRECT for ftmp */
1670 if (VFS_I(ip)->i_mapping->nrpages)
1671 return -EINVAL;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001672 return 0;
1673}
1674
1675int
Dave Chinnera133d952013-08-12 20:49:48 +10001676xfs_swap_extents(
1677 xfs_inode_t *ip, /* target inode */
1678 xfs_inode_t *tip, /* tmp inode */
1679 xfs_swapext_t *sxp)
1680{
1681 xfs_mount_t *mp = ip->i_mount;
1682 xfs_trans_t *tp;
1683 xfs_bstat_t *sbp = &sxp->sx_stat;
1684 xfs_ifork_t *tempifp, *ifp, *tifp;
1685 int src_log_flags, target_log_flags;
1686 int error = 0;
1687 int aforkblks = 0;
1688 int taforkblks = 0;
1689 __uint64_t tmp;
Dave Chinner81217682014-08-04 13:29:32 +10001690 int lock_flags;
Dave Chinnera133d952013-08-12 20:49:48 +10001691
Dave Chinnera133d952013-08-12 20:49:48 +10001692 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1693 if (!tempifp) {
Dave Chinner24513372014-06-25 14:58:08 +10001694 error = -ENOMEM;
Dave Chinnera133d952013-08-12 20:49:48 +10001695 goto out;
1696 }
1697
1698 /*
Dave Chinner723cac42015-02-23 21:47:29 +11001699 * Lock the inodes against other IO, page faults and truncate to
1700 * begin with. Then we can ensure the inodes are flushed and have no
1701 * page cache safely. Once we have done this we can take the ilocks and
1702 * do the rest of the checks.
Dave Chinnera133d952013-08-12 20:49:48 +10001703 */
Dave Chinner723cac42015-02-23 21:47:29 +11001704 lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
Dave Chinnera133d952013-08-12 20:49:48 +10001705 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
Dave Chinner723cac42015-02-23 21:47:29 +11001706 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
Dave Chinnera133d952013-08-12 20:49:48 +10001707
1708 /* Verify that both files have the same format */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001709 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
Dave Chinner24513372014-06-25 14:58:08 +10001710 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001711 goto out_unlock;
1712 }
1713
1714 /* Verify both files are either real-time or non-realtime */
1715 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
Dave Chinner24513372014-06-25 14:58:08 +10001716 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001717 goto out_unlock;
1718 }
1719
Dave Chinner4ef897a2014-08-04 13:44:08 +10001720 error = xfs_swap_extent_flush(ip);
Dave Chinnera133d952013-08-12 20:49:48 +10001721 if (error)
1722 goto out_unlock;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001723 error = xfs_swap_extent_flush(tip);
1724 if (error)
1725 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10001726
Christoph Hellwig253f4912016-04-06 09:19:55 +10001727 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1728 if (error)
Dave Chinnera133d952013-08-12 20:49:48 +10001729 goto out_unlock;
Dave Chinner723cac42015-02-23 21:47:29 +11001730
1731 /*
1732 * Lock and join the inodes to the tansaction so that transaction commit
1733 * or cancel will unlock the inodes from this point onwards.
1734 */
Dave Chinner4ef897a2014-08-04 13:44:08 +10001735 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1736 lock_flags |= XFS_ILOCK_EXCL;
Dave Chinner723cac42015-02-23 21:47:29 +11001737 xfs_trans_ijoin(tp, ip, lock_flags);
1738 xfs_trans_ijoin(tp, tip, lock_flags);
1739
Dave Chinnera133d952013-08-12 20:49:48 +10001740
1741 /* Verify all data are being swapped */
1742 if (sxp->sx_offset != 0 ||
1743 sxp->sx_length != ip->i_d.di_size ||
1744 sxp->sx_length != tip->i_d.di_size) {
Dave Chinner24513372014-06-25 14:58:08 +10001745 error = -EFAULT;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001746 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001747 }
1748
1749 trace_xfs_swap_extent_before(ip, 0);
1750 trace_xfs_swap_extent_before(tip, 1);
1751
1752 /* check inode formats now that data is flushed */
1753 error = xfs_swap_extents_check_format(ip, tip);
1754 if (error) {
1755 xfs_notice(mp,
1756 "%s: inode 0x%llx format is incompatible for exchanging.",
1757 __func__, ip->i_ino);
Dave Chinner4ef897a2014-08-04 13:44:08 +10001758 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001759 }
1760
1761 /*
1762 * Compare the current change & modify times with that
1763 * passed in. If they differ, we abort this swap.
1764 * This is the mechanism used to ensure the calling
1765 * process that the file was not changed out from
1766 * under it.
1767 */
1768 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1769 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1770 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1771 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
Dave Chinner24513372014-06-25 14:58:08 +10001772 error = -EBUSY;
Dave Chinner81217682014-08-04 13:29:32 +10001773 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001774 }
Dave Chinnera133d952013-08-12 20:49:48 +10001775 /*
1776 * Count the number of extended attribute blocks
1777 */
1778 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1779 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1780 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1781 if (error)
1782 goto out_trans_cancel;
1783 }
1784 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1785 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1786 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1787 &taforkblks);
1788 if (error)
1789 goto out_trans_cancel;
1790 }
1791
Dave Chinner21b5c972013-08-30 10:23:44 +10001792 /*
1793 * Before we've swapped the forks, lets set the owners of the forks
1794 * appropriately. We have to do this as we are demand paging the btree
1795 * buffers, and so the validation done on read will expect the owner
1796 * field to be correctly set. Once we change the owners, we can swap the
1797 * inode forks.
1798 *
1799 * Note the trickiness in setting the log flags - we set the owner log
1800 * flag on the opposite inode (i.e. the inode we are setting the new
1801 * owner to be) because once we swap the forks and log that, log
1802 * recovery is going to see the fork as owned by the swapped inode,
1803 * not the pre-swapped inodes.
1804 */
1805 src_log_flags = XFS_ILOG_CORE;
1806 target_log_flags = XFS_ILOG_CORE;
1807 if (ip->i_d.di_version == 3 &&
1808 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001809 target_log_flags |= XFS_ILOG_DOWNER;
1810 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1811 tip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001812 if (error)
1813 goto out_trans_cancel;
1814 }
1815
1816 if (tip->i_d.di_version == 3 &&
1817 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001818 src_log_flags |= XFS_ILOG_DOWNER;
1819 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1820 ip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001821 if (error)
1822 goto out_trans_cancel;
1823 }
1824
Dave Chinnera133d952013-08-12 20:49:48 +10001825 /*
1826 * Swap the data forks of the inodes
1827 */
1828 ifp = &ip->i_df;
1829 tifp = &tip->i_df;
1830 *tempifp = *ifp; /* struct copy */
1831 *ifp = *tifp; /* struct copy */
1832 *tifp = *tempifp; /* struct copy */
1833
1834 /*
1835 * Fix the on-disk inode values
1836 */
1837 tmp = (__uint64_t)ip->i_d.di_nblocks;
1838 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1839 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1840
1841 tmp = (__uint64_t) ip->i_d.di_nextents;
1842 ip->i_d.di_nextents = tip->i_d.di_nextents;
1843 tip->i_d.di_nextents = tmp;
1844
1845 tmp = (__uint64_t) ip->i_d.di_format;
1846 ip->i_d.di_format = tip->i_d.di_format;
1847 tip->i_d.di_format = tmp;
1848
1849 /*
1850 * The extents in the source inode could still contain speculative
1851 * preallocation beyond EOF (e.g. the file is open but not modified
1852 * while defrag is in progress). In that case, we need to copy over the
1853 * number of delalloc blocks the data fork in the source inode is
1854 * tracking beyond EOF so that when the fork is truncated away when the
1855 * temporary inode is unlinked we don't underrun the i_delayed_blks
1856 * counter on that inode.
1857 */
1858 ASSERT(tip->i_delayed_blks == 0);
1859 tip->i_delayed_blks = ip->i_delayed_blks;
1860 ip->i_delayed_blks = 0;
1861
Dave Chinnera133d952013-08-12 20:49:48 +10001862 switch (ip->i_d.di_format) {
1863 case XFS_DINODE_FMT_EXTENTS:
1864 /* If the extents fit in the inode, fix the
1865 * pointer. Otherwise it's already NULL or
1866 * pointing to the extent.
1867 */
1868 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1869 ifp->if_u1.if_extents =
1870 ifp->if_u2.if_inline_ext;
1871 }
1872 src_log_flags |= XFS_ILOG_DEXT;
1873 break;
1874 case XFS_DINODE_FMT_BTREE:
Dave Chinner21b5c972013-08-30 10:23:44 +10001875 ASSERT(ip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001876 (src_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001877 src_log_flags |= XFS_ILOG_DBROOT;
1878 break;
1879 }
1880
Dave Chinnera133d952013-08-12 20:49:48 +10001881 switch (tip->i_d.di_format) {
1882 case XFS_DINODE_FMT_EXTENTS:
1883 /* If the extents fit in the inode, fix the
1884 * pointer. Otherwise it's already NULL or
1885 * pointing to the extent.
1886 */
1887 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1888 tifp->if_u1.if_extents =
1889 tifp->if_u2.if_inline_ext;
1890 }
1891 target_log_flags |= XFS_ILOG_DEXT;
1892 break;
1893 case XFS_DINODE_FMT_BTREE:
1894 target_log_flags |= XFS_ILOG_DBROOT;
Dave Chinner21b5c972013-08-30 10:23:44 +10001895 ASSERT(tip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001896 (target_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001897 break;
1898 }
1899
Dave Chinnera133d952013-08-12 20:49:48 +10001900 xfs_trans_log_inode(tp, ip, src_log_flags);
1901 xfs_trans_log_inode(tp, tip, target_log_flags);
1902
1903 /*
1904 * If this is a synchronous mount, make sure that the
1905 * transaction goes to disk before returning to the user.
1906 */
1907 if (mp->m_flags & XFS_MOUNT_WSYNC)
1908 xfs_trans_set_sync(tp);
1909
Christoph Hellwig70393312015-06-04 13:48:08 +10001910 error = xfs_trans_commit(tp);
Dave Chinnera133d952013-08-12 20:49:48 +10001911
1912 trace_xfs_swap_extent_after(ip, 0);
1913 trace_xfs_swap_extent_after(tip, 1);
1914out:
1915 kmem_free(tempifp);
1916 return error;
1917
1918out_unlock:
Dave Chinner81217682014-08-04 13:29:32 +10001919 xfs_iunlock(ip, lock_flags);
1920 xfs_iunlock(tip, lock_flags);
Dave Chinnera133d952013-08-12 20:49:48 +10001921 goto out;
1922
1923out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001924 xfs_trans_cancel(tp);
Dave Chinner723cac42015-02-23 21:47:29 +11001925 goto out;
Dave Chinnera133d952013-08-12 20:49:48 +10001926}