blob: 3b6309865c65336793a7ac84009188b44b394a32 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110021#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100026#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110027#include "xfs_da_format.h"
Dave Chinner68988112013-08-12 20:49:42 +100028#include "xfs_inode.h"
29#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110030#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100031#include "xfs_extfree_item.h"
32#include "xfs_alloc.h"
33#include "xfs_bmap.h"
34#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110035#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100036#include "xfs_rtalloc.h"
37#include "xfs_error.h"
38#include "xfs_quota.h"
39#include "xfs_trans_space.h"
40#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100041#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110042#include "xfs_log.h"
Dave Chinner68988112013-08-12 20:49:42 +100043
44/* Kernel only BMAP related definitions and functions */
45
46/*
47 * Convert the given file system block to a disk block. We have to treat it
48 * differently based on whether the file is a real time file or not, because the
49 * bmap code does.
50 */
51xfs_daddr_t
52xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
53{
54 return (XFS_IS_REALTIME_INODE(ip) ? \
55 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
56 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
57}
58
59/*
Dave Chinner3fbbbea2015-11-03 12:27:22 +110060 * Routine to zero an extent on disk allocated to the specific inode.
61 *
62 * The VFS functions take a linearised filesystem block offset, so we have to
63 * convert the sparse xfs fsb to the right format first.
64 * VFS types are real funky, too.
65 */
66int
67xfs_zero_extent(
68 struct xfs_inode *ip,
69 xfs_fsblock_t start_fsb,
70 xfs_off_t count_fsb)
71{
72 struct xfs_mount *mp = ip->i_mount;
73 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
74 sector_t block = XFS_BB_TO_FSBT(mp, sector);
75 ssize_t size = XFS_FSB_TO_B(mp, count_fsb);
76
77 if (IS_DAX(VFS_I(ip)))
Ross Zwisler20a90f52016-02-26 15:19:52 -080078 return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
79 sector, size);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110080
81 /*
82 * let the block layer decide on the fastest method of
83 * implementing the zeroing.
84 */
85 return sb_issue_zeroout(mp->m_super, block, count_fsb, GFP_NOFS);
86
87}
88
89/*
Dave Chinner68988112013-08-12 20:49:42 +100090 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
91 * caller. Frees all the extents that need freeing, which must be done
92 * last due to locking considerations. We never free any extents in
93 * the first transaction.
94 *
Eric Sandeenf6106ef2016-01-11 11:34:01 +110095 * If an inode *ip is provided, rejoin it to the transaction if
96 * the transaction was committed.
Dave Chinner68988112013-08-12 20:49:42 +100097 */
98int /* error */
99xfs_bmap_finish(
Brian Foster8d99fe92015-08-19 09:51:16 +1000100 struct xfs_trans **tp, /* transaction pointer addr */
101 struct xfs_bmap_free *flist, /* i/o: list extents to free */
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100102 struct xfs_inode *ip)
Dave Chinner68988112013-08-12 20:49:42 +1000103{
Brian Foster8d99fe92015-08-19 09:51:16 +1000104 struct xfs_efd_log_item *efd; /* extent free data */
105 struct xfs_efi_log_item *efi; /* extent free intention */
106 int error; /* error return value */
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100107 int committed;/* xact committed or not */
Brian Foster8d99fe92015-08-19 09:51:16 +1000108 struct xfs_bmap_free_item *free; /* free extent item */
109 struct xfs_bmap_free_item *next; /* next item on free list */
Dave Chinner68988112013-08-12 20:49:42 +1000110
111 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100112 if (flist->xbf_count == 0)
Dave Chinner68988112013-08-12 20:49:42 +1000113 return 0;
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100114
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +1000115 efi = xfs_trans_get_efi(*tp, flist->xbf_count);
Dave Chinner68988112013-08-12 20:49:42 +1000116 for (free = flist->xbf_first; free; free = free->xbfi_next)
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +1000117 xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
Dave Chinner68988112013-08-12 20:49:42 +1000118 free->xbfi_blockcount);
Jie Liu3d3c8b52013-08-12 20:49:59 +1000119
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100120 error = __xfs_trans_roll(tp, ip, &committed);
Brian Foster8d99fe92015-08-19 09:51:16 +1000121 if (error) {
122 /*
123 * If the transaction was committed, drop the EFD reference
124 * since we're bailing out of here. The other reference is
125 * dropped when the EFI hits the AIL.
126 *
127 * If the transaction was not committed, the EFI is freed by the
128 * EFI item unlock handler on abort. Also, we have a new
129 * transaction so we should return committed=1 even though we're
130 * returning an error.
131 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100132 if (committed) {
Brian Foster8d99fe92015-08-19 09:51:16 +1000133 xfs_efi_release(efi);
134 xfs_force_shutdown((*tp)->t_mountp,
135 (error == -EFSCORRUPTED) ?
136 SHUTDOWN_CORRUPT_INCORE :
137 SHUTDOWN_META_IO_ERROR);
Brian Foster8d99fe92015-08-19 09:51:16 +1000138 }
Dave Chinner68988112013-08-12 20:49:42 +1000139 return error;
Brian Foster8d99fe92015-08-19 09:51:16 +1000140 }
Dave Chinner68988112013-08-12 20:49:42 +1000141
Brian Foster6bc43af2015-08-19 09:51:43 +1000142 /*
143 * Get an EFD and free each extent in the list, logging to the EFD in
144 * the process. The remaining bmap free list is cleaned up by the caller
145 * on error.
146 */
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +1000147 efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
Dave Chinner68988112013-08-12 20:49:42 +1000148 for (free = flist->xbf_first; free != NULL; free = next) {
149 next = free->xbfi_next;
Brian Foster8d99fe92015-08-19 09:51:16 +1000150
Brian Foster6bc43af2015-08-19 09:51:43 +1000151 error = xfs_trans_free_extent(*tp, efd, free->xbfi_startblock,
152 free->xbfi_blockcount);
Brian Foster8d99fe92015-08-19 09:51:16 +1000153 if (error)
154 return error;
155
Dave Chinner68988112013-08-12 20:49:42 +1000156 xfs_bmap_del_free(flist, NULL, free);
157 }
Brian Foster8d99fe92015-08-19 09:51:16 +1000158
Dave Chinner68988112013-08-12 20:49:42 +1000159 return 0;
160}
161
162int
163xfs_bmap_rtalloc(
164 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
165{
166 xfs_alloctype_t atype = 0; /* type for allocation routines */
167 int error; /* error return value */
168 xfs_mount_t *mp; /* mount point structure */
169 xfs_extlen_t prod = 0; /* product factor for allocators */
170 xfs_extlen_t ralen = 0; /* realtime allocation length */
171 xfs_extlen_t align; /* minimum allocation alignment */
172 xfs_rtblock_t rtb;
173
174 mp = ap->ip->i_mount;
175 align = xfs_get_extsz_hint(ap->ip);
176 prod = align / mp->m_sb.sb_rextsize;
177 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
178 align, 1, ap->eof, 0,
179 ap->conv, &ap->offset, &ap->length);
180 if (error)
181 return error;
182 ASSERT(ap->length);
183 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
184
185 /*
186 * If the offset & length are not perfectly aligned
187 * then kill prod, it will just get us in trouble.
188 */
189 if (do_mod(ap->offset, align) || ap->length % align)
190 prod = 1;
191 /*
192 * Set ralen to be the actual requested length in rtextents.
193 */
194 ralen = ap->length / mp->m_sb.sb_rextsize;
195 /*
196 * If the old value was close enough to MAXEXTLEN that
197 * we rounded up to it, cut it back so it's valid again.
198 * Note that if it's a really large request (bigger than
199 * MAXEXTLEN), we don't hear about that number, and can't
200 * adjust the starting point to match it.
201 */
202 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
203 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
204
205 /*
Dave Chinner4b680af2016-02-08 10:46:51 +1100206 * Lock out modifications to both the RT bitmap and summary inodes
Dave Chinner68988112013-08-12 20:49:42 +1000207 */
208 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
209 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
Dave Chinner4b680af2016-02-08 10:46:51 +1100210 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
211 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
Dave Chinner68988112013-08-12 20:49:42 +1000212
213 /*
214 * If it's an allocation to an empty file at offset 0,
215 * pick an extent that will space things out in the rt area.
216 */
217 if (ap->eof && ap->offset == 0) {
218 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
219
220 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
221 if (error)
222 return error;
223 ap->blkno = rtx * mp->m_sb.sb_rextsize;
224 } else {
225 ap->blkno = 0;
226 }
227
228 xfs_bmap_adjacent(ap);
229
230 /*
231 * Realtime allocation, done through xfs_rtallocate_extent.
232 */
233 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
234 do_div(ap->blkno, mp->m_sb.sb_rextsize);
235 rtb = ap->blkno;
236 ap->length = ralen;
237 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
238 &ralen, atype, ap->wasdel, prod, &rtb)))
239 return error;
240 if (rtb == NULLFSBLOCK && prod > 1 &&
241 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
242 ap->length, &ralen, atype,
243 ap->wasdel, 1, &rtb)))
244 return error;
245 ap->blkno = rtb;
246 if (ap->blkno != NULLFSBLOCK) {
247 ap->blkno *= mp->m_sb.sb_rextsize;
248 ralen *= mp->m_sb.sb_rextsize;
249 ap->length = ralen;
250 ap->ip->i_d.di_nblocks += ralen;
251 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
252 if (ap->wasdel)
253 ap->ip->i_delayed_blks -= ralen;
254 /*
255 * Adjust the disk quota also. This was reserved
256 * earlier.
257 */
258 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
259 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
260 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100261
262 /* Zero the extent if we were asked to do so */
263 if (ap->userdata & XFS_ALLOC_USERDATA_ZERO) {
264 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
265 if (error)
266 return error;
267 }
Dave Chinner68988112013-08-12 20:49:42 +1000268 } else {
269 ap->length = 0;
270 }
271 return 0;
272}
273
274/*
Dave Chinner68988112013-08-12 20:49:42 +1000275 * Check if the endoff is outside the last extent. If so the caller will grow
276 * the allocation to a stripe unit boundary. All offsets are considered outside
277 * the end of file for an empty fork, so 1 is returned in *eof in that case.
278 */
279int
280xfs_bmap_eof(
281 struct xfs_inode *ip,
282 xfs_fileoff_t endoff,
283 int whichfork,
284 int *eof)
285{
286 struct xfs_bmbt_irec rec;
287 int error;
288
289 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
290 if (error || *eof)
291 return error;
292
293 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
294 return 0;
295}
296
297/*
298 * Extent tree block counting routines.
299 */
300
301/*
302 * Count leaf blocks given a range of extent records.
303 */
304STATIC void
305xfs_bmap_count_leaves(
306 xfs_ifork_t *ifp,
307 xfs_extnum_t idx,
308 int numrecs,
309 int *count)
310{
311 int b;
312
313 for (b = 0; b < numrecs; b++) {
314 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
315 *count += xfs_bmbt_get_blockcount(frp);
316 }
317}
318
319/*
320 * Count leaf blocks given a range of extent records originally
321 * in btree format.
322 */
323STATIC void
324xfs_bmap_disk_count_leaves(
325 struct xfs_mount *mp,
326 struct xfs_btree_block *block,
327 int numrecs,
328 int *count)
329{
330 int b;
331 xfs_bmbt_rec_t *frp;
332
333 for (b = 1; b <= numrecs; b++) {
334 frp = XFS_BMBT_REC_ADDR(mp, block, b);
335 *count += xfs_bmbt_disk_get_blockcount(frp);
336 }
337}
338
339/*
340 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000341 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000342 */
343STATIC int /* error */
344xfs_bmap_count_tree(
345 xfs_mount_t *mp, /* file system mount point */
346 xfs_trans_t *tp, /* transaction pointer */
347 xfs_ifork_t *ifp, /* inode fork pointer */
348 xfs_fsblock_t blockno, /* file system block number */
349 int levelin, /* level in btree */
350 int *count) /* Count of blocks */
351{
352 int error;
353 xfs_buf_t *bp, *nbp;
354 int level = levelin;
355 __be64 *pp;
356 xfs_fsblock_t bno = blockno;
357 xfs_fsblock_t nextbno;
358 struct xfs_btree_block *block, *nextblock;
359 int numrecs;
360
361 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
362 &xfs_bmbt_buf_ops);
363 if (error)
364 return error;
365 *count += 1;
366 block = XFS_BUF_TO_BLOCK(bp);
367
368 if (--level) {
369 /* Not at node above leaves, count this level of nodes */
370 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
371 while (nextbno != NULLFSBLOCK) {
372 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
373 XFS_BMAP_BTREE_REF,
374 &xfs_bmbt_buf_ops);
375 if (error)
376 return error;
377 *count += 1;
378 nextblock = XFS_BUF_TO_BLOCK(nbp);
379 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
380 xfs_trans_brelse(tp, nbp);
381 }
382
383 /* Dive to the next level */
384 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
385 bno = be64_to_cpu(*pp);
386 if (unlikely((error =
387 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
388 xfs_trans_brelse(tp, bp);
389 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
390 XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000391 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000392 }
393 xfs_trans_brelse(tp, bp);
394 } else {
395 /* count all level 1 nodes and their leaves */
396 for (;;) {
397 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
398 numrecs = be16_to_cpu(block->bb_numrecs);
399 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
400 xfs_trans_brelse(tp, bp);
401 if (nextbno == NULLFSBLOCK)
402 break;
403 bno = nextbno;
404 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
405 XFS_BMAP_BTREE_REF,
406 &xfs_bmbt_buf_ops);
407 if (error)
408 return error;
409 *count += 1;
410 block = XFS_BUF_TO_BLOCK(bp);
411 }
412 }
413 return 0;
414}
415
416/*
417 * Count fsblocks of the given fork.
418 */
419int /* error */
420xfs_bmap_count_blocks(
421 xfs_trans_t *tp, /* transaction pointer */
422 xfs_inode_t *ip, /* incore inode */
423 int whichfork, /* data or attr fork */
424 int *count) /* out: count of blocks */
425{
426 struct xfs_btree_block *block; /* current btree block */
427 xfs_fsblock_t bno; /* block # of "block" */
428 xfs_ifork_t *ifp; /* fork structure */
429 int level; /* btree level, for checking */
430 xfs_mount_t *mp; /* file system mount structure */
431 __be64 *pp; /* pointer to block address */
432
433 bno = NULLFSBLOCK;
434 mp = ip->i_mount;
435 ifp = XFS_IFORK_PTR(ip, whichfork);
436 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
437 xfs_bmap_count_leaves(ifp, 0,
438 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
439 count);
440 return 0;
441 }
442
443 /*
444 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
445 */
446 block = ifp->if_broot;
447 level = be16_to_cpu(block->bb_level);
448 ASSERT(level > 0);
449 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
450 bno = be64_to_cpu(*pp);
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000451 ASSERT(bno != NULLFSBLOCK);
Dave Chinner68988112013-08-12 20:49:42 +1000452 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
453 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
454
455 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
456 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
457 mp);
Dave Chinner24513372014-06-25 14:58:08 +1000458 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000459 }
460
461 return 0;
462}
463
464/*
465 * returns 1 for success, 0 if we failed to map the extent.
466 */
467STATIC int
468xfs_getbmapx_fix_eof_hole(
469 xfs_inode_t *ip, /* xfs incore inode pointer */
470 struct getbmapx *out, /* output structure */
471 int prealloced, /* this is a file with
472 * preallocated data space */
473 __int64_t end, /* last block requested */
474 xfs_fsblock_t startblock)
475{
476 __int64_t fixlen;
477 xfs_mount_t *mp; /* file system mount point */
478 xfs_ifork_t *ifp; /* inode fork pointer */
479 xfs_extnum_t lastx; /* last extent pointer */
480 xfs_fileoff_t fileblock;
481
482 if (startblock == HOLESTARTBLOCK) {
483 mp = ip->i_mount;
484 out->bmv_block = -1;
485 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
486 fixlen -= out->bmv_offset;
487 if (prealloced && out->bmv_offset + out->bmv_length == end) {
488 /* Came to hole at EOF. Trim it. */
489 if (fixlen <= 0)
490 return 0;
491 out->bmv_length = fixlen;
492 }
493 } else {
494 if (startblock == DELAYSTARTBLOCK)
495 out->bmv_block = -2;
496 else
497 out->bmv_block = xfs_fsb_to_db(ip, startblock);
498 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
499 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
500 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
501 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
502 out->bmv_oflags |= BMV_OF_LAST;
503 }
504
505 return 1;
506}
507
508/*
509 * Get inode's extents as described in bmv, and format for output.
510 * Calls formatter to fill the user's buffer until all extents
511 * are mapped, until the passed-in bmv->bmv_count slots have
512 * been filled, or until the formatter short-circuits the loop,
513 * if it is tracking filled-in extents on its own.
514 */
515int /* error code */
516xfs_getbmap(
517 xfs_inode_t *ip,
518 struct getbmapx *bmv, /* user bmap structure */
519 xfs_bmap_format_t formatter, /* format to user */
520 void *arg) /* formatter arg */
521{
522 __int64_t bmvend; /* last block requested */
523 int error = 0; /* return value */
524 __int64_t fixlen; /* length for -1 case */
525 int i; /* extent number */
526 int lock; /* lock state */
527 xfs_bmbt_irec_t *map; /* buffer for user's data */
528 xfs_mount_t *mp; /* file system mount point */
529 int nex; /* # of user extents can do */
530 int nexleft; /* # of user extents left */
531 int subnex; /* # of bmapi's can do */
532 int nmap; /* number of map entries */
533 struct getbmapx *out; /* output structure */
534 int whichfork; /* data or attr fork */
535 int prealloced; /* this is a file with
536 * preallocated data space */
537 int iflags; /* interface flags */
538 int bmapi_flags; /* flags for xfs_bmapi */
539 int cur_ext = 0;
540
541 mp = ip->i_mount;
542 iflags = bmv->bmv_iflags;
543 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
544
545 if (whichfork == XFS_ATTR_FORK) {
546 if (XFS_IFORK_Q(ip)) {
547 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
548 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
549 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000550 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000551 } else if (unlikely(
552 ip->i_d.di_aformat != 0 &&
553 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
554 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
555 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000556 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000557 }
558
559 prealloced = 0;
560 fixlen = 1LL << 32;
561 } else {
562 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
563 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
564 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000565 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000566
567 if (xfs_get_extsz_hint(ip) ||
568 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
569 prealloced = 1;
570 fixlen = mp->m_super->s_maxbytes;
571 } else {
572 prealloced = 0;
573 fixlen = XFS_ISIZE(ip);
574 }
575 }
576
577 if (bmv->bmv_length == -1) {
578 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
579 bmv->bmv_length =
580 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
581 } else if (bmv->bmv_length == 0) {
582 bmv->bmv_entries = 0;
583 return 0;
584 } else if (bmv->bmv_length < 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000585 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000586 }
587
588 nex = bmv->bmv_count - 1;
589 if (nex <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000590 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000591 bmvend = bmv->bmv_offset + bmv->bmv_length;
592
593
594 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
Dave Chinner24513372014-06-25 14:58:08 +1000595 return -ENOMEM;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000596 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
597 if (!out)
Dave Chinner24513372014-06-25 14:58:08 +1000598 return -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000599
600 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800601 if (whichfork == XFS_DATA_FORK) {
602 if (!(iflags & BMV_IF_DELALLOC) &&
603 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner24513372014-06-25 14:58:08 +1000604 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
Dave Chinner68988112013-08-12 20:49:42 +1000605 if (error)
606 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000607
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800608 /*
609 * Even after flushing the inode, there can still be
610 * delalloc blocks on the inode beyond EOF due to
611 * speculative preallocation. These are not removed
612 * until the release function is called or the inode
613 * is inactivated. Hence we cannot assert here that
614 * ip->i_delayed_blks == 0.
615 */
616 }
617
618 lock = xfs_ilock_data_map_shared(ip);
619 } else {
620 lock = xfs_ilock_attr_map_shared(ip);
621 }
Dave Chinner68988112013-08-12 20:49:42 +1000622
623 /*
624 * Don't let nex be bigger than the number of extents
625 * we can have assuming alternating holes and real extents.
626 */
627 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
628 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
629
630 bmapi_flags = xfs_bmapi_aflag(whichfork);
631 if (!(iflags & BMV_IF_PREALLOC))
632 bmapi_flags |= XFS_BMAPI_IGSTATE;
633
634 /*
635 * Allocate enough space to handle "subnex" maps at a time.
636 */
Dave Chinner24513372014-06-25 14:58:08 +1000637 error = -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000638 subnex = 16;
639 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
640 if (!map)
641 goto out_unlock_ilock;
642
643 bmv->bmv_entries = 0;
644
645 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
646 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
647 error = 0;
648 goto out_free_map;
649 }
650
651 nexleft = nex;
652
653 do {
654 nmap = (nexleft > subnex) ? subnex : nexleft;
655 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
656 XFS_BB_TO_FSB(mp, bmv->bmv_length),
657 map, &nmap, bmapi_flags);
658 if (error)
659 goto out_free_map;
660 ASSERT(nmap <= subnex);
661
662 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
663 out[cur_ext].bmv_oflags = 0;
664 if (map[i].br_state == XFS_EXT_UNWRITTEN)
665 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
666 else if (map[i].br_startblock == DELAYSTARTBLOCK)
667 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
668 out[cur_ext].bmv_offset =
669 XFS_FSB_TO_BB(mp, map[i].br_startoff);
670 out[cur_ext].bmv_length =
671 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
672 out[cur_ext].bmv_unused1 = 0;
673 out[cur_ext].bmv_unused2 = 0;
674
675 /*
676 * delayed allocation extents that start beyond EOF can
677 * occur due to speculative EOF allocation when the
678 * delalloc extent is larger than the largest freespace
679 * extent at conversion time. These extents cannot be
680 * converted by data writeback, so can exist here even
681 * if we are not supposed to be finding delalloc
682 * extents.
683 */
684 if (map[i].br_startblock == DELAYSTARTBLOCK &&
685 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
686 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
687
688 if (map[i].br_startblock == HOLESTARTBLOCK &&
689 whichfork == XFS_ATTR_FORK) {
690 /* came to the end of attribute fork */
691 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
692 goto out_free_map;
693 }
694
695 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
696 prealloced, bmvend,
697 map[i].br_startblock))
698 goto out_free_map;
699
700 bmv->bmv_offset =
701 out[cur_ext].bmv_offset +
702 out[cur_ext].bmv_length;
703 bmv->bmv_length =
704 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
705
706 /*
707 * In case we don't want to return the hole,
708 * don't increase cur_ext so that we can reuse
709 * it in the next loop.
710 */
711 if ((iflags & BMV_IF_NO_HOLES) &&
712 map[i].br_startblock == HOLESTARTBLOCK) {
713 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
714 continue;
715 }
716
717 nexleft--;
718 bmv->bmv_entries++;
719 cur_ext++;
720 }
721 } while (nmap && nexleft && bmv->bmv_length);
722
723 out_free_map:
724 kmem_free(map);
725 out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800726 xfs_iunlock(ip, lock);
Dave Chinner68988112013-08-12 20:49:42 +1000727 out_unlock_iolock:
728 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
729
730 for (i = 0; i < cur_ext; i++) {
731 int full = 0; /* user array is full */
732
733 /* format results & advance arg */
734 error = formatter(&arg, &out[i], &full);
735 if (error || full)
736 break;
737 }
738
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000739 kmem_free(out);
Dave Chinner68988112013-08-12 20:49:42 +1000740 return error;
741}
742
743/*
744 * dead simple method of punching delalyed allocation blocks from a range in
745 * the inode. Walks a block at a time so will be slow, but is only executed in
Zhi Yong Wuad4809b2013-08-12 03:14:55 +0000746 * rare error cases so the overhead is not critical. This will always punch out
Dave Chinner68988112013-08-12 20:49:42 +1000747 * both the start and end blocks, even if the ranges only partially overlap
748 * them, so it is up to the caller to ensure that partial blocks are not
749 * passed in.
750 */
751int
752xfs_bmap_punch_delalloc_range(
753 struct xfs_inode *ip,
754 xfs_fileoff_t start_fsb,
755 xfs_fileoff_t length)
756{
757 xfs_fileoff_t remaining = length;
758 int error = 0;
759
760 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
761
762 do {
763 int done;
764 xfs_bmbt_irec_t imap;
765 int nimaps = 1;
766 xfs_fsblock_t firstblock;
767 xfs_bmap_free_t flist;
768
769 /*
770 * Map the range first and check that it is a delalloc extent
771 * before trying to unmap the range. Otherwise we will be
772 * trying to remove a real extent (which requires a
773 * transaction) or a hole, which is probably a bad idea...
774 */
775 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
776 XFS_BMAPI_ENTIRE);
777
778 if (error) {
779 /* something screwed, just bail */
780 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
781 xfs_alert(ip->i_mount,
782 "Failed delalloc mapping lookup ino %lld fsb %lld.",
783 ip->i_ino, start_fsb);
784 }
785 break;
786 }
787 if (!nimaps) {
788 /* nothing there */
789 goto next_block;
790 }
791 if (imap.br_startblock != DELAYSTARTBLOCK) {
792 /* been converted, ignore */
793 goto next_block;
794 }
795 WARN_ON(imap.br_blockcount == 0);
796
797 /*
798 * Note: while we initialise the firstblock/flist pair, they
799 * should never be used because blocks should never be
800 * allocated or freed for a delalloc extent and hence we need
801 * don't cancel or finish them after the xfs_bunmapi() call.
802 */
803 xfs_bmap_init(&flist, &firstblock);
804 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
805 &flist, &done);
806 if (error)
807 break;
808
809 ASSERT(!flist.xbf_count && !flist.xbf_first);
810next_block:
811 start_fsb++;
812 remaining--;
813 } while(remaining > 0);
814
815 return error;
816}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000817
818/*
819 * Test whether it is appropriate to check an inode for and free post EOF
820 * blocks. The 'force' parameter determines whether we should also consider
821 * regular files that are marked preallocated or append-only.
822 */
823bool
824xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
825{
826 /* prealloc/delalloc exists only on regular files */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100827 if (!S_ISREG(VFS_I(ip)->i_mode))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000828 return false;
829
830 /*
831 * Zero sized files with no cached pages and delalloc blocks will not
832 * have speculative prealloc/delalloc blocks to remove.
833 */
834 if (VFS_I(ip)->i_size == 0 &&
Dave Chinner2667c6f2014-08-04 13:23:15 +1000835 VFS_I(ip)->i_mapping->nrpages == 0 &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000836 ip->i_delayed_blks == 0)
837 return false;
838
839 /* If we haven't read in the extent list, then don't do it now. */
840 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
841 return false;
842
843 /*
844 * Do not free real preallocated or append-only files unless the file
845 * has delalloc blocks and we are forced to remove them.
846 */
847 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
848 if (!force || ip->i_delayed_blks == 0)
849 return false;
850
851 return true;
852}
853
854/*
855 * This is called by xfs_inactive to free any blocks beyond eof
856 * when the link count isn't zero and by xfs_dm_punch_hole() when
857 * punching a hole to EOF.
858 */
859int
860xfs_free_eofblocks(
861 xfs_mount_t *mp,
862 xfs_inode_t *ip,
863 bool need_iolock)
864{
865 xfs_trans_t *tp;
866 int error;
867 xfs_fileoff_t end_fsb;
868 xfs_fileoff_t last_fsb;
869 xfs_filblks_t map_len;
870 int nimaps;
871 xfs_bmbt_irec_t imap;
872
873 /*
874 * Figure out if there are any blocks beyond the end
875 * of the file. If not, then there is nothing to do.
876 */
877 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
878 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
879 if (last_fsb <= end_fsb)
880 return 0;
881 map_len = last_fsb - end_fsb;
882
883 nimaps = 1;
884 xfs_ilock(ip, XFS_ILOCK_SHARED);
885 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
886 xfs_iunlock(ip, XFS_ILOCK_SHARED);
887
888 if (!error && (nimaps != 0) &&
889 (imap.br_startblock != HOLESTARTBLOCK ||
890 ip->i_delayed_blks)) {
891 /*
892 * Attach the dquots to the inode up front.
893 */
894 error = xfs_qm_dqattach(ip, 0);
895 if (error)
896 return error;
897
898 /*
899 * There are blocks after the end of file.
900 * Free them up now by truncating the file to
901 * its current size.
902 */
903 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
904
905 if (need_iolock) {
906 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
Christoph Hellwig4906e212015-06-04 13:47:56 +1000907 xfs_trans_cancel(tp);
Dave Chinner24513372014-06-25 14:58:08 +1000908 return -EAGAIN;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000909 }
910 }
911
Jie Liu3d3c8b52013-08-12 20:49:59 +1000912 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000913 if (error) {
914 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Christoph Hellwig4906e212015-06-04 13:47:56 +1000915 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000916 if (need_iolock)
917 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
918 return error;
919 }
920
921 xfs_ilock(ip, XFS_ILOCK_EXCL);
922 xfs_trans_ijoin(tp, ip, 0);
923
924 /*
925 * Do not update the on-disk file size. If we update the
926 * on-disk file size and then the system crashes before the
927 * contents of the file are flushed to disk then the files
928 * may be full of holes (ie NULL files bug).
929 */
930 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
931 XFS_ISIZE(ip));
932 if (error) {
933 /*
934 * If we get an error at this point we simply don't
935 * bother truncating the file.
936 */
Christoph Hellwig4906e212015-06-04 13:47:56 +1000937 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000938 } else {
Christoph Hellwig70393312015-06-04 13:48:08 +1000939 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000940 if (!error)
941 xfs_inode_clear_eofblocks_tag(ip);
942 }
943
944 xfs_iunlock(ip, XFS_ILOCK_EXCL);
945 if (need_iolock)
946 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
947 }
948 return error;
949}
950
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700951int
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000952xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700953 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000954 xfs_off_t offset,
955 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -0700956 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000957{
958 xfs_mount_t *mp = ip->i_mount;
959 xfs_off_t count;
960 xfs_filblks_t allocated_fsb;
961 xfs_filblks_t allocatesize_fsb;
962 xfs_extlen_t extsz, temp;
963 xfs_fileoff_t startoffset_fsb;
964 xfs_fsblock_t firstfsb;
965 int nimaps;
966 int quota_flag;
967 int rt;
968 xfs_trans_t *tp;
969 xfs_bmbt_irec_t imaps[1], *imapp;
970 xfs_bmap_free_t free_list;
971 uint qblocks, resblks, resrtextents;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000972 int error;
973
974 trace_xfs_alloc_file_space(ip);
975
976 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +1000977 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000978
979 error = xfs_qm_dqattach(ip, 0);
980 if (error)
981 return error;
982
983 if (len <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000984 return -EINVAL;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000985
986 rt = XFS_IS_REALTIME_INODE(ip);
987 extsz = xfs_get_extsz_hint(ip);
988
989 count = len;
990 imapp = &imaps[0];
991 nimaps = 1;
992 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
993 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
994
995 /*
996 * Allocate file space until done or until there is an error
997 */
998 while (allocatesize_fsb && !error) {
999 xfs_fileoff_t s, e;
1000
1001 /*
1002 * Determine space reservations for data/realtime.
1003 */
1004 if (unlikely(extsz)) {
1005 s = startoffset_fsb;
1006 do_div(s, extsz);
1007 s *= extsz;
1008 e = startoffset_fsb + allocatesize_fsb;
1009 if ((temp = do_mod(startoffset_fsb, extsz)))
1010 e += temp;
1011 if ((temp = do_mod(e, extsz)))
1012 e += extsz - temp;
1013 } else {
1014 s = 0;
1015 e = allocatesize_fsb;
1016 }
1017
1018 /*
1019 * The transaction reservation is limited to a 32-bit block
1020 * count, hence we need to limit the number of blocks we are
1021 * trying to reserve to avoid an overflow. We can't allocate
1022 * more than @nimaps extents, and an extent is limited on disk
1023 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1024 */
1025 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1026 if (unlikely(rt)) {
1027 resrtextents = qblocks = resblks;
1028 resrtextents /= mp->m_sb.sb_rextsize;
1029 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1030 quota_flag = XFS_QMOPT_RES_RTBLKS;
1031 } else {
1032 resrtextents = 0;
1033 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1034 quota_flag = XFS_QMOPT_RES_REGBLKS;
1035 }
1036
1037 /*
1038 * Allocate and setup the transaction.
1039 */
1040 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001041 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1042 resblks, resrtextents);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001043 /*
1044 * Check for running out of space
1045 */
1046 if (error) {
1047 /*
1048 * Free the transaction structure.
1049 */
Dave Chinner24513372014-06-25 14:58:08 +10001050 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Christoph Hellwig4906e212015-06-04 13:47:56 +10001051 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001052 break;
1053 }
1054 xfs_ilock(ip, XFS_ILOCK_EXCL);
1055 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1056 0, quota_flag);
1057 if (error)
1058 goto error1;
1059
1060 xfs_trans_ijoin(tp, ip, 0);
1061
1062 xfs_bmap_init(&free_list, &firstfsb);
1063 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1064 allocatesize_fsb, alloc_type, &firstfsb,
Brian Fosterdbd5c8c2015-10-12 16:04:13 +11001065 resblks, imapp, &nimaps, &free_list);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001066 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001067 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001068
1069 /*
1070 * Complete the transaction
1071 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001072 error = xfs_bmap_finish(&tp, &free_list, NULL);
1073 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001074 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001075
Christoph Hellwig70393312015-06-04 13:48:08 +10001076 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001077 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001078 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001079 break;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001080
1081 allocated_fsb = imapp->br_blockcount;
1082
1083 if (nimaps == 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001084 error = -ENOSPC;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001085 break;
1086 }
1087
1088 startoffset_fsb += allocated_fsb;
1089 allocatesize_fsb -= allocated_fsb;
1090 }
1091
1092 return error;
1093
1094error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1095 xfs_bmap_cancel(&free_list);
1096 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1097
1098error1: /* Just cancel transaction */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001099 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001100 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1101 return error;
1102}
1103
1104/*
1105 * Zero file bytes between startoff and endoff inclusive.
1106 * The iolock is held exclusive and no blocks are buffered.
1107 *
1108 * This function is used by xfs_free_file_space() to zero
1109 * partial blocks when the range to free is not block aligned.
1110 * When unreserving space with boundaries that are not block
1111 * aligned we round up the start and round down the end
1112 * boundaries and then use this function to zero the parts of
1113 * the blocks that got dropped during the rounding.
1114 */
1115STATIC int
1116xfs_zero_remaining_bytes(
1117 xfs_inode_t *ip,
1118 xfs_off_t startoff,
1119 xfs_off_t endoff)
1120{
1121 xfs_bmbt_irec_t imap;
1122 xfs_fileoff_t offset_fsb;
1123 xfs_off_t lastoffset;
1124 xfs_off_t offset;
1125 xfs_buf_t *bp;
1126 xfs_mount_t *mp = ip->i_mount;
1127 int nimap;
1128 int error = 0;
1129
1130 /*
1131 * Avoid doing I/O beyond eof - it's not necessary
1132 * since nothing can read beyond eof. The space will
1133 * be zeroed when the file is extended anyway.
1134 */
1135 if (startoff >= XFS_ISIZE(ip))
1136 return 0;
1137
1138 if (endoff > XFS_ISIZE(ip))
1139 endoff = XFS_ISIZE(ip);
1140
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001141 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
Christoph Hellwig4f317362013-12-06 12:30:12 -08001142 uint lock_mode;
1143
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001144 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1145 nimap = 1;
Christoph Hellwig4f317362013-12-06 12:30:12 -08001146
1147 lock_mode = xfs_ilock_data_map_shared(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001148 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
Christoph Hellwig4f317362013-12-06 12:30:12 -08001149 xfs_iunlock(ip, lock_mode);
1150
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001151 if (error || nimap < 1)
1152 break;
1153 ASSERT(imap.br_blockcount >= 1);
1154 ASSERT(imap.br_startoff == offset_fsb);
Dave Chinner4f69f572015-06-04 09:19:08 +10001155 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1156
1157 if (imap.br_startblock == HOLESTARTBLOCK ||
1158 imap.br_state == XFS_EXT_UNWRITTEN) {
1159 /* skip the entire extent */
1160 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff +
1161 imap.br_blockcount) - 1;
1162 continue;
1163 }
1164
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001165 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1166 if (lastoffset > endoff)
1167 lastoffset = endoff;
Dave Chinner4f69f572015-06-04 09:19:08 +10001168
1169 /* DAX can just zero the backing device directly */
1170 if (IS_DAX(VFS_I(ip))) {
1171 error = dax_zero_page_range(VFS_I(ip), offset,
1172 lastoffset - offset + 1,
1173 xfs_get_blocks_direct);
1174 if (error)
1175 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001176 continue;
Dave Chinner4f69f572015-06-04 09:19:08 +10001177 }
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001178
Christoph Hellwig8c156122014-10-02 09:05:44 +10001179 error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
1180 mp->m_rtdev_targp : mp->m_ddev_targp,
1181 xfs_fsb_to_db(ip, imap.br_startblock),
1182 BTOBB(mp->m_sb.sb_blocksize),
1183 0, &bp, NULL);
1184 if (error)
1185 return error;
1186
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001187 memset(bp->b_addr +
Christoph Hellwig8c156122014-10-02 09:05:44 +10001188 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1189 0, lastoffset - offset + 1);
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001190
Christoph Hellwig8c156122014-10-02 09:05:44 +10001191 error = xfs_bwrite(bp);
1192 xfs_buf_relse(bp);
1193 if (error)
1194 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001195 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001196 return error;
1197}
1198
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001199int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001200xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001201 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001202 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001203 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001204{
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001205 int done;
1206 xfs_fileoff_t endoffset_fsb;
1207 int error;
1208 xfs_fsblock_t firstfsb;
1209 xfs_bmap_free_t free_list;
1210 xfs_bmbt_irec_t imap;
1211 xfs_off_t ioffset;
Brian Foster8b5279e2014-09-23 15:39:05 +10001212 xfs_off_t iendoffset;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001213 xfs_extlen_t mod=0;
1214 xfs_mount_t *mp;
1215 int nimap;
1216 uint resblks;
1217 xfs_off_t rounding;
1218 int rt;
1219 xfs_fileoff_t startoffset_fsb;
1220 xfs_trans_t *tp;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001221
1222 mp = ip->i_mount;
1223
1224 trace_xfs_free_file_space(ip);
1225
1226 error = xfs_qm_dqattach(ip, 0);
1227 if (error)
1228 return error;
1229
1230 error = 0;
1231 if (len <= 0) /* if nothing being freed */
1232 return error;
1233 rt = XFS_IS_REALTIME_INODE(ip);
1234 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1235 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1236
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001237 /* wait for the completion of any pending DIOs */
1238 inode_dio_wait(VFS_I(ip));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001239
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001240 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
Brian Foster8b5279e2014-09-23 15:39:05 +10001241 ioffset = round_down(offset, rounding);
1242 iendoffset = round_up(offset + len, rounding) - 1;
1243 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
1244 iendoffset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001245 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001246 goto out;
Brian Foster8b5279e2014-09-23 15:39:05 +10001247 truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001248
1249 /*
1250 * Need to zero the stuff we're not freeing, on disk.
1251 * If it's a realtime file & can't use unwritten extents then we
1252 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1253 * will take care of it for us.
1254 */
1255 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1256 nimap = 1;
1257 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1258 &imap, &nimap, 0);
1259 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001260 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001261 ASSERT(nimap == 0 || nimap == 1);
1262 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1263 xfs_daddr_t block;
1264
1265 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1266 block = imap.br_startblock;
1267 mod = do_div(block, mp->m_sb.sb_rextsize);
1268 if (mod)
1269 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1270 }
1271 nimap = 1;
1272 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1273 &imap, &nimap, 0);
1274 if (error)
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001275 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001276 ASSERT(nimap == 0 || nimap == 1);
1277 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1278 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1279 mod++;
1280 if (mod && (mod != mp->m_sb.sb_rextsize))
1281 endoffset_fsb -= mod;
1282 }
1283 }
1284 if ((done = (endoffset_fsb <= startoffset_fsb)))
1285 /*
1286 * One contiguous piece to clear
1287 */
1288 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1289 else {
1290 /*
1291 * Some full blocks, possibly two pieces to clear
1292 */
1293 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1294 error = xfs_zero_remaining_bytes(ip, offset,
1295 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1296 if (!error &&
1297 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1298 error = xfs_zero_remaining_bytes(ip,
1299 XFS_FSB_TO_B(mp, endoffset_fsb),
1300 offset + len - 1);
1301 }
1302
1303 /*
1304 * free file space until done or until there is an error
1305 */
1306 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1307 while (!error && !done) {
1308
1309 /*
1310 * allocate and setup the transaction. Allow this
1311 * transaction to dip into the reserve blocks to ensure
1312 * the freeing of the space succeeds at ENOSPC.
1313 */
1314 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001315 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001316
1317 /*
1318 * check for running out of space
1319 */
1320 if (error) {
1321 /*
1322 * Free the transaction structure.
1323 */
Dave Chinner24513372014-06-25 14:58:08 +10001324 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Christoph Hellwig4906e212015-06-04 13:47:56 +10001325 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001326 break;
1327 }
1328 xfs_ilock(ip, XFS_ILOCK_EXCL);
1329 error = xfs_trans_reserve_quota(tp, mp,
1330 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1331 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1332 if (error)
1333 goto error1;
1334
1335 xfs_trans_ijoin(tp, ip, 0);
1336
1337 /*
1338 * issue the bunmapi() call to free the blocks
1339 */
1340 xfs_bmap_init(&free_list, &firstfsb);
1341 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1342 endoffset_fsb - startoffset_fsb,
1343 0, 2, &firstfsb, &free_list, &done);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001344 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001345 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001346
1347 /*
1348 * complete the transaction
1349 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001350 error = xfs_bmap_finish(&tp, &free_list, NULL);
1351 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001352 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001353
Christoph Hellwig70393312015-06-04 13:48:08 +10001354 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001355 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1356 }
1357
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001358 out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001359 return error;
1360
1361 error0:
1362 xfs_bmap_cancel(&free_list);
1363 error1:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001364 xfs_trans_cancel(tp);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001365 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1366 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001367}
1368
Brian Foster5d11fb42014-10-30 10:35:11 +11001369/*
1370 * Preallocate and zero a range of a file. This mechanism has the allocation
1371 * semantics of fallocate and in addition converts data in the range to zeroes.
1372 */
Christoph Hellwig865e9442013-10-12 00:55:08 -07001373int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001374xfs_zero_file_space(
1375 struct xfs_inode *ip,
1376 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001377 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001378{
1379 struct xfs_mount *mp = ip->i_mount;
Brian Foster5d11fb42014-10-30 10:35:11 +11001380 uint blksize;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001381 int error;
1382
Dave Chinner897b73b2014-04-14 18:15:11 +10001383 trace_xfs_zero_file_space(ip);
1384
Brian Foster5d11fb42014-10-30 10:35:11 +11001385 blksize = 1 << mp->m_sb.sb_blocklog;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001386
1387 /*
Brian Foster5d11fb42014-10-30 10:35:11 +11001388 * Punch a hole and prealloc the range. We use hole punch rather than
1389 * unwritten extent conversion for two reasons:
1390 *
1391 * 1.) Hole punch handles partial block zeroing for us.
1392 *
1393 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1394 * by virtue of the hole punch.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001395 */
Brian Foster5d11fb42014-10-30 10:35:11 +11001396 error = xfs_free_file_space(ip, offset, len);
1397 if (error)
1398 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001399
Brian Foster5d11fb42014-10-30 10:35:11 +11001400 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1401 round_up(offset + len, blksize) -
1402 round_down(offset, blksize),
1403 XFS_BMAPI_PREALLOC);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001404out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001405 return error;
1406
1407}
1408
1409/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001410 * @next_fsb will keep track of the extent currently undergoing shift.
1411 * @stop_fsb will keep track of the extent at which we have to stop.
1412 * If we are shifting left, we will start with block (offset + len) and
1413 * shift each extent till last extent.
1414 * If we are shifting right, we will start with last extent inside file space
1415 * and continue until we reach the block corresponding to offset.
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001416 */
kbuild test robot72c1a732015-04-13 11:25:04 +10001417static int
Namjae Jeona904b1c2015-03-25 15:08:56 +11001418xfs_shift_file_space(
1419 struct xfs_inode *ip,
1420 xfs_off_t offset,
1421 xfs_off_t len,
1422 enum shift_direction direction)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001423{
1424 int done = 0;
1425 struct xfs_mount *mp = ip->i_mount;
1426 struct xfs_trans *tp;
1427 int error;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001428 struct xfs_bmap_free free_list;
1429 xfs_fsblock_t first_block;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001430 xfs_fileoff_t stop_fsb;
Brian Foster2c845f52014-09-23 15:37:09 +10001431 xfs_fileoff_t next_fsb;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001432 xfs_fileoff_t shift_fsb;
1433
Namjae Jeona904b1c2015-03-25 15:08:56 +11001434 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001435
Namjae Jeona904b1c2015-03-25 15:08:56 +11001436 if (direction == SHIFT_LEFT) {
1437 next_fsb = XFS_B_TO_FSB(mp, offset + len);
1438 stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1439 } else {
1440 /*
1441 * If right shift, delegate the work of initialization of
1442 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1443 */
1444 next_fsb = NULLFSBLOCK;
1445 stop_fsb = XFS_B_TO_FSB(mp, offset);
1446 }
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001447
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001448 shift_fsb = XFS_B_TO_FSB(mp, len);
1449
Brian Fosterf71721d2014-09-23 15:39:05 +10001450 /*
1451 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1452 * into the accessible region of the file.
1453 */
Brian Foster41b9d722014-09-02 12:12:53 +10001454 if (xfs_can_free_eofblocks(ip, true)) {
1455 error = xfs_free_eofblocks(mp, ip, false);
1456 if (error)
1457 return error;
1458 }
Dave Chinner1669a8c2014-09-02 12:12:53 +10001459
Brian Fosterf71721d2014-09-23 15:39:05 +10001460 /*
1461 * Writeback and invalidate cache for the remainder of the file as we're
Namjae Jeona904b1c2015-03-25 15:08:56 +11001462 * about to shift down every extent from offset to EOF.
Brian Fosterf71721d2014-09-23 15:39:05 +10001463 */
1464 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
Namjae Jeona904b1c2015-03-25 15:08:56 +11001465 offset, -1);
Brian Fosterf71721d2014-09-23 15:39:05 +10001466 if (error)
1467 return error;
1468 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001469 offset >> PAGE_SHIFT, -1);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001470 if (error)
1471 return error;
1472
Namjae Jeona904b1c2015-03-25 15:08:56 +11001473 /*
1474 * The extent shiting code works on extent granularity. So, if
1475 * stop_fsb is not the starting block of extent, we need to split
1476 * the extent at stop_fsb.
1477 */
1478 if (direction == SHIFT_RIGHT) {
1479 error = xfs_bmap_split_extent(ip, stop_fsb);
1480 if (error)
1481 return error;
1482 }
1483
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001484 while (!error && !done) {
1485 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001486 /*
1487 * We would need to reserve permanent block for transaction.
1488 * This will come into picture when after shifting extent into
1489 * hole we found that adjacent extents can be merged which
1490 * may lead to freeing of a block during record update.
1491 */
1492 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1493 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1494 if (error) {
Christoph Hellwig4906e212015-06-04 13:47:56 +10001495 xfs_trans_cancel(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001496 break;
1497 }
1498
1499 xfs_ilock(ip, XFS_ILOCK_EXCL);
1500 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1501 ip->i_gdquot, ip->i_pdquot,
1502 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1503 XFS_QMOPT_RES_REGBLKS);
1504 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001505 goto out_trans_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001506
Namjae Jeona904b1c2015-03-25 15:08:56 +11001507 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001508
1509 xfs_bmap_init(&free_list, &first_block);
1510
1511 /*
1512 * We are using the write transaction in which max 2 bmbt
1513 * updates are allowed
1514 */
Namjae Jeona904b1c2015-03-25 15:08:56 +11001515 error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1516 &done, stop_fsb, &first_block, &free_list,
1517 direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001518 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001519 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001520
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001521 error = xfs_bmap_finish(&tp, &free_list, NULL);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001522 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001523 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001524
Christoph Hellwig70393312015-06-04 13:48:08 +10001525 error = xfs_trans_commit(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001526 }
1527
1528 return error;
1529
Brian Fosterd4a97a02015-08-19 10:01:40 +10001530out_bmap_cancel:
1531 xfs_bmap_cancel(&free_list);
1532out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001533 xfs_trans_cancel(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001534 return error;
1535}
1536
1537/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001538 * xfs_collapse_file_space()
1539 * This routine frees disk space and shift extent for the given file.
1540 * The first thing we do is to free data blocks in the specified range
1541 * by calling xfs_free_file_space(). It would also sync dirty data
1542 * and invalidate page cache over the region on which collapse range
1543 * is working. And Shift extent records to the left to cover a hole.
1544 * RETURNS:
1545 * 0 on success
1546 * errno on error
1547 *
1548 */
1549int
1550xfs_collapse_file_space(
1551 struct xfs_inode *ip,
1552 xfs_off_t offset,
1553 xfs_off_t len)
1554{
1555 int error;
1556
1557 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1558 trace_xfs_collapse_file_space(ip);
1559
1560 error = xfs_free_file_space(ip, offset, len);
1561 if (error)
1562 return error;
1563
1564 return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1565}
1566
1567/*
1568 * xfs_insert_file_space()
1569 * This routine create hole space by shifting extents for the given file.
1570 * The first thing we do is to sync dirty data and invalidate page cache
1571 * over the region on which insert range is working. And split an extent
1572 * to two extents at given offset by calling xfs_bmap_split_extent.
1573 * And shift all extent records which are laying between [offset,
1574 * last allocated extent] to the right to reserve hole range.
1575 * RETURNS:
1576 * 0 on success
1577 * errno on error
1578 */
1579int
1580xfs_insert_file_space(
1581 struct xfs_inode *ip,
1582 loff_t offset,
1583 loff_t len)
1584{
1585 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1586 trace_xfs_insert_file_space(ip);
1587
1588 return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1589}
1590
1591/*
Dave Chinnera133d952013-08-12 20:49:48 +10001592 * We need to check that the format of the data fork in the temporary inode is
1593 * valid for the target inode before doing the swap. This is not a problem with
1594 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1595 * data fork depending on the space the attribute fork is taking so we can get
1596 * invalid formats on the target inode.
1597 *
1598 * E.g. target has space for 7 extents in extent format, temp inode only has
1599 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1600 * btree, but when swapped it needs to be in extent format. Hence we can't just
1601 * blindly swap data forks on attr2 filesystems.
1602 *
1603 * Note that we check the swap in both directions so that we don't end up with
1604 * a corrupt temporary inode, either.
1605 *
1606 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1607 * inode will prevent this situation from occurring, so all we do here is
1608 * reject and log the attempt. basically we are putting the responsibility on
1609 * userspace to get this right.
1610 */
1611static int
1612xfs_swap_extents_check_format(
1613 xfs_inode_t *ip, /* target inode */
1614 xfs_inode_t *tip) /* tmp inode */
1615{
1616
1617 /* Should never get a local format */
1618 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1619 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +10001620 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001621
1622 /*
1623 * if the target inode has less extents that then temporary inode then
1624 * why did userspace call us?
1625 */
1626 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
Dave Chinner24513372014-06-25 14:58:08 +10001627 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001628
1629 /*
1630 * if the target inode is in extent form and the temp inode is in btree
1631 * form then we will end up with the target inode in the wrong format
1632 * as we already know there are less extents in the temp inode.
1633 */
1634 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1635 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +10001636 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001637
1638 /* Check temp in extent form to max in target */
1639 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1640 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1641 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001642 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001643
1644 /* Check target in extent form to max in temp */
1645 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1646 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1647 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001648 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001649
1650 /*
1651 * If we are in a btree format, check that the temp root block will fit
1652 * in the target and that it has enough extents to be in btree format
1653 * in the target.
1654 *
1655 * Note that we have to be careful to allow btree->extent conversions
1656 * (a common defrag case) which will occur when the temp inode is in
1657 * extent format...
1658 */
1659 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1660 if (XFS_IFORK_BOFF(ip) &&
1661 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
Dave Chinner24513372014-06-25 14:58:08 +10001662 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001663 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1664 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001665 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001666 }
1667
1668 /* Reciprocal target->temp btree format checks */
1669 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1670 if (XFS_IFORK_BOFF(tip) &&
1671 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
Dave Chinner24513372014-06-25 14:58:08 +10001672 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001673 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1674 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001675 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001676 }
1677
1678 return 0;
1679}
1680
Dave Chinner7abbb8f2014-09-23 16:20:11 +10001681static int
Dave Chinner4ef897a2014-08-04 13:44:08 +10001682xfs_swap_extent_flush(
1683 struct xfs_inode *ip)
1684{
1685 int error;
1686
1687 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1688 if (error)
1689 return error;
1690 truncate_pagecache_range(VFS_I(ip), 0, -1);
1691
1692 /* Verify O_DIRECT for ftmp */
1693 if (VFS_I(ip)->i_mapping->nrpages)
1694 return -EINVAL;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001695 return 0;
1696}
1697
1698int
Dave Chinnera133d952013-08-12 20:49:48 +10001699xfs_swap_extents(
1700 xfs_inode_t *ip, /* target inode */
1701 xfs_inode_t *tip, /* tmp inode */
1702 xfs_swapext_t *sxp)
1703{
1704 xfs_mount_t *mp = ip->i_mount;
1705 xfs_trans_t *tp;
1706 xfs_bstat_t *sbp = &sxp->sx_stat;
1707 xfs_ifork_t *tempifp, *ifp, *tifp;
1708 int src_log_flags, target_log_flags;
1709 int error = 0;
1710 int aforkblks = 0;
1711 int taforkblks = 0;
1712 __uint64_t tmp;
Dave Chinner81217682014-08-04 13:29:32 +10001713 int lock_flags;
Dave Chinnera133d952013-08-12 20:49:48 +10001714
Dave Chinnera133d952013-08-12 20:49:48 +10001715 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1716 if (!tempifp) {
Dave Chinner24513372014-06-25 14:58:08 +10001717 error = -ENOMEM;
Dave Chinnera133d952013-08-12 20:49:48 +10001718 goto out;
1719 }
1720
1721 /*
Dave Chinner723cac42015-02-23 21:47:29 +11001722 * Lock the inodes against other IO, page faults and truncate to
1723 * begin with. Then we can ensure the inodes are flushed and have no
1724 * page cache safely. Once we have done this we can take the ilocks and
1725 * do the rest of the checks.
Dave Chinnera133d952013-08-12 20:49:48 +10001726 */
Dave Chinner723cac42015-02-23 21:47:29 +11001727 lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
Dave Chinnera133d952013-08-12 20:49:48 +10001728 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
Dave Chinner723cac42015-02-23 21:47:29 +11001729 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
Dave Chinnera133d952013-08-12 20:49:48 +10001730
1731 /* Verify that both files have the same format */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001732 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
Dave Chinner24513372014-06-25 14:58:08 +10001733 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001734 goto out_unlock;
1735 }
1736
1737 /* Verify both files are either real-time or non-realtime */
1738 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
Dave Chinner24513372014-06-25 14:58:08 +10001739 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001740 goto out_unlock;
1741 }
1742
Dave Chinner4ef897a2014-08-04 13:44:08 +10001743 error = xfs_swap_extent_flush(ip);
Dave Chinnera133d952013-08-12 20:49:48 +10001744 if (error)
1745 goto out_unlock;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001746 error = xfs_swap_extent_flush(tip);
1747 if (error)
1748 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10001749
Dave Chinner4ef897a2014-08-04 13:44:08 +10001750 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1751 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1752 if (error) {
Christoph Hellwig4906e212015-06-04 13:47:56 +10001753 xfs_trans_cancel(tp);
Dave Chinnera133d952013-08-12 20:49:48 +10001754 goto out_unlock;
1755 }
Dave Chinner723cac42015-02-23 21:47:29 +11001756
1757 /*
1758 * Lock and join the inodes to the tansaction so that transaction commit
1759 * or cancel will unlock the inodes from this point onwards.
1760 */
Dave Chinner4ef897a2014-08-04 13:44:08 +10001761 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1762 lock_flags |= XFS_ILOCK_EXCL;
Dave Chinner723cac42015-02-23 21:47:29 +11001763 xfs_trans_ijoin(tp, ip, lock_flags);
1764 xfs_trans_ijoin(tp, tip, lock_flags);
1765
Dave Chinnera133d952013-08-12 20:49:48 +10001766
1767 /* Verify all data are being swapped */
1768 if (sxp->sx_offset != 0 ||
1769 sxp->sx_length != ip->i_d.di_size ||
1770 sxp->sx_length != tip->i_d.di_size) {
Dave Chinner24513372014-06-25 14:58:08 +10001771 error = -EFAULT;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001772 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001773 }
1774
1775 trace_xfs_swap_extent_before(ip, 0);
1776 trace_xfs_swap_extent_before(tip, 1);
1777
1778 /* check inode formats now that data is flushed */
1779 error = xfs_swap_extents_check_format(ip, tip);
1780 if (error) {
1781 xfs_notice(mp,
1782 "%s: inode 0x%llx format is incompatible for exchanging.",
1783 __func__, ip->i_ino);
Dave Chinner4ef897a2014-08-04 13:44:08 +10001784 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001785 }
1786
1787 /*
1788 * Compare the current change & modify times with that
1789 * passed in. If they differ, we abort this swap.
1790 * This is the mechanism used to ensure the calling
1791 * process that the file was not changed out from
1792 * under it.
1793 */
1794 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1795 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1796 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1797 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
Dave Chinner24513372014-06-25 14:58:08 +10001798 error = -EBUSY;
Dave Chinner81217682014-08-04 13:29:32 +10001799 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001800 }
Dave Chinnera133d952013-08-12 20:49:48 +10001801 /*
1802 * Count the number of extended attribute blocks
1803 */
1804 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1805 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1806 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1807 if (error)
1808 goto out_trans_cancel;
1809 }
1810 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1811 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1812 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1813 &taforkblks);
1814 if (error)
1815 goto out_trans_cancel;
1816 }
1817
Dave Chinner21b5c972013-08-30 10:23:44 +10001818 /*
1819 * Before we've swapped the forks, lets set the owners of the forks
1820 * appropriately. We have to do this as we are demand paging the btree
1821 * buffers, and so the validation done on read will expect the owner
1822 * field to be correctly set. Once we change the owners, we can swap the
1823 * inode forks.
1824 *
1825 * Note the trickiness in setting the log flags - we set the owner log
1826 * flag on the opposite inode (i.e. the inode we are setting the new
1827 * owner to be) because once we swap the forks and log that, log
1828 * recovery is going to see the fork as owned by the swapped inode,
1829 * not the pre-swapped inodes.
1830 */
1831 src_log_flags = XFS_ILOG_CORE;
1832 target_log_flags = XFS_ILOG_CORE;
1833 if (ip->i_d.di_version == 3 &&
1834 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001835 target_log_flags |= XFS_ILOG_DOWNER;
1836 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1837 tip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001838 if (error)
1839 goto out_trans_cancel;
1840 }
1841
1842 if (tip->i_d.di_version == 3 &&
1843 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Dave Chinner638f44162013-08-30 10:23:45 +10001844 src_log_flags |= XFS_ILOG_DOWNER;
1845 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1846 ip->i_ino, NULL);
Dave Chinner21b5c972013-08-30 10:23:44 +10001847 if (error)
1848 goto out_trans_cancel;
1849 }
1850
Dave Chinnera133d952013-08-12 20:49:48 +10001851 /*
1852 * Swap the data forks of the inodes
1853 */
1854 ifp = &ip->i_df;
1855 tifp = &tip->i_df;
1856 *tempifp = *ifp; /* struct copy */
1857 *ifp = *tifp; /* struct copy */
1858 *tifp = *tempifp; /* struct copy */
1859
1860 /*
1861 * Fix the on-disk inode values
1862 */
1863 tmp = (__uint64_t)ip->i_d.di_nblocks;
1864 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1865 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1866
1867 tmp = (__uint64_t) ip->i_d.di_nextents;
1868 ip->i_d.di_nextents = tip->i_d.di_nextents;
1869 tip->i_d.di_nextents = tmp;
1870
1871 tmp = (__uint64_t) ip->i_d.di_format;
1872 ip->i_d.di_format = tip->i_d.di_format;
1873 tip->i_d.di_format = tmp;
1874
1875 /*
1876 * The extents in the source inode could still contain speculative
1877 * preallocation beyond EOF (e.g. the file is open but not modified
1878 * while defrag is in progress). In that case, we need to copy over the
1879 * number of delalloc blocks the data fork in the source inode is
1880 * tracking beyond EOF so that when the fork is truncated away when the
1881 * temporary inode is unlinked we don't underrun the i_delayed_blks
1882 * counter on that inode.
1883 */
1884 ASSERT(tip->i_delayed_blks == 0);
1885 tip->i_delayed_blks = ip->i_delayed_blks;
1886 ip->i_delayed_blks = 0;
1887
Dave Chinnera133d952013-08-12 20:49:48 +10001888 switch (ip->i_d.di_format) {
1889 case XFS_DINODE_FMT_EXTENTS:
1890 /* If the extents fit in the inode, fix the
1891 * pointer. Otherwise it's already NULL or
1892 * pointing to the extent.
1893 */
1894 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1895 ifp->if_u1.if_extents =
1896 ifp->if_u2.if_inline_ext;
1897 }
1898 src_log_flags |= XFS_ILOG_DEXT;
1899 break;
1900 case XFS_DINODE_FMT_BTREE:
Dave Chinner21b5c972013-08-30 10:23:44 +10001901 ASSERT(ip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001902 (src_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001903 src_log_flags |= XFS_ILOG_DBROOT;
1904 break;
1905 }
1906
Dave Chinnera133d952013-08-12 20:49:48 +10001907 switch (tip->i_d.di_format) {
1908 case XFS_DINODE_FMT_EXTENTS:
1909 /* If the extents fit in the inode, fix the
1910 * pointer. Otherwise it's already NULL or
1911 * pointing to the extent.
1912 */
1913 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1914 tifp->if_u1.if_extents =
1915 tifp->if_u2.if_inline_ext;
1916 }
1917 target_log_flags |= XFS_ILOG_DEXT;
1918 break;
1919 case XFS_DINODE_FMT_BTREE:
1920 target_log_flags |= XFS_ILOG_DBROOT;
Dave Chinner21b5c972013-08-30 10:23:44 +10001921 ASSERT(tip->i_d.di_version < 3 ||
Dave Chinner638f44162013-08-30 10:23:45 +10001922 (target_log_flags & XFS_ILOG_DOWNER));
Dave Chinnera133d952013-08-12 20:49:48 +10001923 break;
1924 }
1925
Dave Chinnera133d952013-08-12 20:49:48 +10001926 xfs_trans_log_inode(tp, ip, src_log_flags);
1927 xfs_trans_log_inode(tp, tip, target_log_flags);
1928
1929 /*
1930 * If this is a synchronous mount, make sure that the
1931 * transaction goes to disk before returning to the user.
1932 */
1933 if (mp->m_flags & XFS_MOUNT_WSYNC)
1934 xfs_trans_set_sync(tp);
1935
Christoph Hellwig70393312015-06-04 13:48:08 +10001936 error = xfs_trans_commit(tp);
Dave Chinnera133d952013-08-12 20:49:48 +10001937
1938 trace_xfs_swap_extent_after(ip, 0);
1939 trace_xfs_swap_extent_after(tip, 1);
1940out:
1941 kmem_free(tempifp);
1942 return error;
1943
1944out_unlock:
Dave Chinner81217682014-08-04 13:29:32 +10001945 xfs_iunlock(ip, lock_flags);
1946 xfs_iunlock(tip, lock_flags);
Dave Chinnera133d952013-08-12 20:49:48 +10001947 goto out;
1948
1949out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001950 xfs_trans_cancel(tp);
Dave Chinner723cac42015-02-23 21:47:29 +11001951 goto out;
Dave Chinnera133d952013-08-12 20:49:48 +10001952}