blob: e9db7fc95b70bb4a2ba531af65d8718f73b61721 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110021#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100026#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110027#include "xfs_da_format.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100028#include "xfs_defer.h"
Dave Chinner68988112013-08-12 20:49:42 +100029#include "xfs_inode.h"
30#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110031#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100032#include "xfs_extfree_item.h"
33#include "xfs_alloc.h"
34#include "xfs_bmap.h"
35#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110036#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100037#include "xfs_rtalloc.h"
38#include "xfs_error.h"
39#include "xfs_quota.h"
40#include "xfs_trans_space.h"
41#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100042#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110043#include "xfs_log.h"
Darrick J. Wong9c194642016-08-03 12:16:05 +100044#include "xfs_rmap_btree.h"
Darrick J. Wongf86f4032016-10-03 09:11:41 -070045#include "xfs_iomap.h"
46#include "xfs_reflink.h"
47#include "xfs_refcount.h"
Dave Chinner68988112013-08-12 20:49:42 +100048
49/* Kernel only BMAP related definitions and functions */
50
51/*
52 * Convert the given file system block to a disk block. We have to treat it
53 * differently based on whether the file is a real time file or not, because the
54 * bmap code does.
55 */
56xfs_daddr_t
57xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58{
59 return (XFS_IS_REALTIME_INODE(ip) ? \
60 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62}
63
64/*
Dave Chinner3fbbbea2015-11-03 12:27:22 +110065 * Routine to zero an extent on disk allocated to the specific inode.
66 *
67 * The VFS functions take a linearised filesystem block offset, so we have to
68 * convert the sparse xfs fsb to the right format first.
69 * VFS types are real funky, too.
70 */
71int
72xfs_zero_extent(
73 struct xfs_inode *ip,
74 xfs_fsblock_t start_fsb,
75 xfs_off_t count_fsb)
76{
77 struct xfs_mount *mp = ip->i_mount;
78 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
79 sector_t block = XFS_BB_TO_FSBT(mp, sector);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110080
Matthew Wilcox3dc29162016-03-15 11:20:41 -060081 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 block << (mp->m_super->s_blocksize_bits - 9),
83 count_fsb << (mp->m_super->s_blocksize_bits - 9),
Christoph Hellwigee472d82017-04-05 19:21:08 +020084 GFP_NOFS, 0);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110085}
86
Dave Chinner68988112013-08-12 20:49:42 +100087int
88xfs_bmap_rtalloc(
89 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
90{
Dave Chinner68988112013-08-12 20:49:42 +100091 int error; /* error return value */
92 xfs_mount_t *mp; /* mount point structure */
93 xfs_extlen_t prod = 0; /* product factor for allocators */
94 xfs_extlen_t ralen = 0; /* realtime allocation length */
95 xfs_extlen_t align; /* minimum allocation alignment */
96 xfs_rtblock_t rtb;
97
98 mp = ap->ip->i_mount;
99 align = xfs_get_extsz_hint(ap->ip);
100 prod = align / mp->m_sb.sb_rextsize;
101 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
102 align, 1, ap->eof, 0,
103 ap->conv, &ap->offset, &ap->length);
104 if (error)
105 return error;
106 ASSERT(ap->length);
107 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
108
109 /*
110 * If the offset & length are not perfectly aligned
111 * then kill prod, it will just get us in trouble.
112 */
113 if (do_mod(ap->offset, align) || ap->length % align)
114 prod = 1;
115 /*
116 * Set ralen to be the actual requested length in rtextents.
117 */
118 ralen = ap->length / mp->m_sb.sb_rextsize;
119 /*
120 * If the old value was close enough to MAXEXTLEN that
121 * we rounded up to it, cut it back so it's valid again.
122 * Note that if it's a really large request (bigger than
123 * MAXEXTLEN), we don't hear about that number, and can't
124 * adjust the starting point to match it.
125 */
126 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
127 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
128
129 /*
Dave Chinner4b680af2016-02-08 10:46:51 +1100130 * Lock out modifications to both the RT bitmap and summary inodes
Dave Chinner68988112013-08-12 20:49:42 +1000131 */
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000132 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
Dave Chinner68988112013-08-12 20:49:42 +1000133 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000134 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
Dave Chinner4b680af2016-02-08 10:46:51 +1100135 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
Dave Chinner68988112013-08-12 20:49:42 +1000136
137 /*
138 * If it's an allocation to an empty file at offset 0,
139 * pick an extent that will space things out in the rt area.
140 */
141 if (ap->eof && ap->offset == 0) {
142 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
143
144 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
145 if (error)
146 return error;
147 ap->blkno = rtx * mp->m_sb.sb_rextsize;
148 } else {
149 ap->blkno = 0;
150 }
151
152 xfs_bmap_adjacent(ap);
153
154 /*
155 * Realtime allocation, done through xfs_rtallocate_extent.
156 */
Dave Chinner68988112013-08-12 20:49:42 +1000157 do_div(ap->blkno, mp->m_sb.sb_rextsize);
158 rtb = ap->blkno;
159 ap->length = ralen;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800160 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
161 &ralen, ap->wasdel, prod, &rtb);
162 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000163 return error;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800164
Dave Chinner68988112013-08-12 20:49:42 +1000165 ap->blkno = rtb;
166 if (ap->blkno != NULLFSBLOCK) {
167 ap->blkno *= mp->m_sb.sb_rextsize;
168 ralen *= mp->m_sb.sb_rextsize;
169 ap->length = ralen;
170 ap->ip->i_d.di_nblocks += ralen;
171 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
172 if (ap->wasdel)
173 ap->ip->i_delayed_blks -= ralen;
174 /*
175 * Adjust the disk quota also. This was reserved
176 * earlier.
177 */
178 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
179 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
180 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100181
182 /* Zero the extent if we were asked to do so */
Dave Chinner292378e2016-09-26 08:21:28 +1000183 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100184 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
185 if (error)
186 return error;
187 }
Dave Chinner68988112013-08-12 20:49:42 +1000188 } else {
189 ap->length = 0;
190 }
191 return 0;
192}
193
194/*
Dave Chinner68988112013-08-12 20:49:42 +1000195 * Check if the endoff is outside the last extent. If so the caller will grow
196 * the allocation to a stripe unit boundary. All offsets are considered outside
197 * the end of file for an empty fork, so 1 is returned in *eof in that case.
198 */
199int
200xfs_bmap_eof(
201 struct xfs_inode *ip,
202 xfs_fileoff_t endoff,
203 int whichfork,
204 int *eof)
205{
206 struct xfs_bmbt_irec rec;
207 int error;
208
209 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
210 if (error || *eof)
211 return error;
212
213 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
214 return 0;
215}
216
217/*
218 * Extent tree block counting routines.
219 */
220
221/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700222 * Count leaf blocks given a range of extent records. Delayed allocation
223 * extents are not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000224 */
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700225xfs_extnum_t
Dave Chinner68988112013-08-12 20:49:42 +1000226xfs_bmap_count_leaves(
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700227 struct xfs_ifork *ifp,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700228 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000229{
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700230 struct xfs_bmbt_irec got;
231 xfs_extnum_t numrecs = 0, i = 0;
Dave Chinner68988112013-08-12 20:49:42 +1000232
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700233 while (xfs_iext_get_extent(ifp, i++, &got)) {
234 if (!isnullstartblock(got.br_startblock)) {
235 *count += got.br_blockcount;
236 numrecs++;
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700237 }
Dave Chinner68988112013-08-12 20:49:42 +1000238 }
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700239 return numrecs;
Dave Chinner68988112013-08-12 20:49:42 +1000240}
241
242/*
243 * Count leaf blocks given a range of extent records originally
244 * in btree format.
245 */
246STATIC void
247xfs_bmap_disk_count_leaves(
248 struct xfs_mount *mp,
249 struct xfs_btree_block *block,
250 int numrecs,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700251 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000252{
253 int b;
254 xfs_bmbt_rec_t *frp;
255
256 for (b = 1; b <= numrecs; b++) {
257 frp = XFS_BMBT_REC_ADDR(mp, block, b);
258 *count += xfs_bmbt_disk_get_blockcount(frp);
259 }
260}
261
262/*
263 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000264 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000265 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700266STATIC int
Dave Chinner68988112013-08-12 20:49:42 +1000267xfs_bmap_count_tree(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700268 struct xfs_mount *mp,
269 struct xfs_trans *tp,
270 struct xfs_ifork *ifp,
271 xfs_fsblock_t blockno,
272 int levelin,
273 xfs_extnum_t *nextents,
274 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000275{
276 int error;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700277 struct xfs_buf *bp, *nbp;
Dave Chinner68988112013-08-12 20:49:42 +1000278 int level = levelin;
279 __be64 *pp;
280 xfs_fsblock_t bno = blockno;
281 xfs_fsblock_t nextbno;
282 struct xfs_btree_block *block, *nextblock;
283 int numrecs;
284
285 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
286 &xfs_bmbt_buf_ops);
287 if (error)
288 return error;
289 *count += 1;
290 block = XFS_BUF_TO_BLOCK(bp);
291
292 if (--level) {
293 /* Not at node above leaves, count this level of nodes */
294 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
295 while (nextbno != NULLFSBLOCK) {
296 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
297 XFS_BMAP_BTREE_REF,
298 &xfs_bmbt_buf_ops);
299 if (error)
300 return error;
301 *count += 1;
302 nextblock = XFS_BUF_TO_BLOCK(nbp);
303 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
304 xfs_trans_brelse(tp, nbp);
305 }
306
307 /* Dive to the next level */
308 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
309 bno = be64_to_cpu(*pp);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700310 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
311 count);
312 if (error) {
Dave Chinner68988112013-08-12 20:49:42 +1000313 xfs_trans_brelse(tp, bp);
314 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
315 XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000316 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000317 }
318 xfs_trans_brelse(tp, bp);
319 } else {
320 /* count all level 1 nodes and their leaves */
321 for (;;) {
322 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
323 numrecs = be16_to_cpu(block->bb_numrecs);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700324 (*nextents) += numrecs;
Dave Chinner68988112013-08-12 20:49:42 +1000325 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
326 xfs_trans_brelse(tp, bp);
327 if (nextbno == NULLFSBLOCK)
328 break;
329 bno = nextbno;
330 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
331 XFS_BMAP_BTREE_REF,
332 &xfs_bmbt_buf_ops);
333 if (error)
334 return error;
335 *count += 1;
336 block = XFS_BUF_TO_BLOCK(bp);
337 }
338 }
339 return 0;
340}
341
342/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700343 * Count fsblocks of the given fork. Delayed allocation extents are
344 * not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000345 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700346int
Dave Chinner68988112013-08-12 20:49:42 +1000347xfs_bmap_count_blocks(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700348 struct xfs_trans *tp,
349 struct xfs_inode *ip,
350 int whichfork,
351 xfs_extnum_t *nextents,
352 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000353{
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700354 struct xfs_mount *mp; /* file system mount structure */
Dave Chinner68988112013-08-12 20:49:42 +1000355 __be64 *pp; /* pointer to block address */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700356 struct xfs_btree_block *block; /* current btree block */
357 struct xfs_ifork *ifp; /* fork structure */
358 xfs_fsblock_t bno; /* block # of "block" */
359 int level; /* btree level, for checking */
360 int error;
Dave Chinner68988112013-08-12 20:49:42 +1000361
362 bno = NULLFSBLOCK;
363 mp = ip->i_mount;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700364 *nextents = 0;
365 *count = 0;
Dave Chinner68988112013-08-12 20:49:42 +1000366 ifp = XFS_IFORK_PTR(ip, whichfork);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700367 if (!ifp)
Dave Chinner68988112013-08-12 20:49:42 +1000368 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000369
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700370 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
371 case XFS_DINODE_FMT_EXTENTS:
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700372 *nextents = xfs_bmap_count_leaves(ifp, count);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700373 return 0;
374 case XFS_DINODE_FMT_BTREE:
375 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
376 error = xfs_iread_extents(tp, ip, whichfork);
377 if (error)
378 return error;
379 }
Dave Chinner68988112013-08-12 20:49:42 +1000380
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700381 /*
382 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
383 */
384 block = ifp->if_broot;
385 level = be16_to_cpu(block->bb_level);
386 ASSERT(level > 0);
387 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
388 bno = be64_to_cpu(*pp);
389 ASSERT(bno != NULLFSBLOCK);
390 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
391 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
392
393 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
394 nextents, count);
395 if (error) {
396 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
397 XFS_ERRLEVEL_LOW, mp);
398 return -EFSCORRUPTED;
399 }
400 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000401 }
402
403 return 0;
404}
405
406/*
407 * returns 1 for success, 0 if we failed to map the extent.
408 */
409STATIC int
410xfs_getbmapx_fix_eof_hole(
411 xfs_inode_t *ip, /* xfs incore inode pointer */
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700412 int whichfork,
Dave Chinner68988112013-08-12 20:49:42 +1000413 struct getbmapx *out, /* output structure */
414 int prealloced, /* this is a file with
415 * preallocated data space */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700416 int64_t end, /* last block requested */
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700417 xfs_fsblock_t startblock,
418 bool moretocome)
Dave Chinner68988112013-08-12 20:49:42 +1000419{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700420 int64_t fixlen;
Dave Chinner68988112013-08-12 20:49:42 +1000421 xfs_mount_t *mp; /* file system mount point */
422 xfs_ifork_t *ifp; /* inode fork pointer */
423 xfs_extnum_t lastx; /* last extent pointer */
424 xfs_fileoff_t fileblock;
425
426 if (startblock == HOLESTARTBLOCK) {
427 mp = ip->i_mount;
428 out->bmv_block = -1;
429 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
430 fixlen -= out->bmv_offset;
431 if (prealloced && out->bmv_offset + out->bmv_length == end) {
432 /* Came to hole at EOF. Trim it. */
433 if (fixlen <= 0)
434 return 0;
435 out->bmv_length = fixlen;
436 }
437 } else {
438 if (startblock == DELAYSTARTBLOCK)
439 out->bmv_block = -2;
440 else
441 out->bmv_block = xfs_fsb_to_db(ip, startblock);
442 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700443 ifp = XFS_IFORK_PTR(ip, whichfork);
444 if (!moretocome &&
445 xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
Eric Sandeen5d829302016-11-08 12:59:42 +1100446 (lastx == xfs_iext_count(ifp) - 1))
Dave Chinner68988112013-08-12 20:49:42 +1000447 out->bmv_oflags |= BMV_OF_LAST;
448 }
449
450 return 1;
451}
452
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700453/* Adjust the reported bmap around shared/unshared extent transitions. */
454STATIC int
455xfs_getbmap_adjust_shared(
456 struct xfs_inode *ip,
457 int whichfork,
458 struct xfs_bmbt_irec *map,
459 struct getbmapx *out,
460 struct xfs_bmbt_irec *next_map)
461{
462 struct xfs_mount *mp = ip->i_mount;
463 xfs_agnumber_t agno;
464 xfs_agblock_t agbno;
465 xfs_agblock_t ebno;
466 xfs_extlen_t elen;
467 xfs_extlen_t nlen;
468 int error;
469
470 next_map->br_startblock = NULLFSBLOCK;
471 next_map->br_startoff = NULLFILEOFF;
472 next_map->br_blockcount = 0;
473
474 /* Only written data blocks can be shared. */
Christoph Hellwig9c4f29d2017-03-28 14:53:35 -0700475 if (!xfs_is_reflink_inode(ip) ||
476 whichfork != XFS_DATA_FORK ||
477 !xfs_bmap_is_real_extent(map))
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700478 return 0;
479
480 agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
481 agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
Darrick J. Wong92ff7282017-06-16 11:00:10 -0700482 error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
483 map->br_blockcount, &ebno, &elen, true);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700484 if (error)
485 return error;
486
487 if (ebno == NULLAGBLOCK) {
488 /* No shared blocks at all. */
489 return 0;
490 } else if (agbno == ebno) {
491 /*
492 * Shared extent at (agbno, elen). Shrink the reported
493 * extent length and prepare to move the start of map[i]
494 * to agbno+elen, with the aim of (re)formatting the new
495 * map[i] the next time through the inner loop.
496 */
497 out->bmv_length = XFS_FSB_TO_BB(mp, elen);
498 out->bmv_oflags |= BMV_OF_SHARED;
499 if (elen != map->br_blockcount) {
500 *next_map = *map;
501 next_map->br_startblock += elen;
502 next_map->br_startoff += elen;
503 next_map->br_blockcount -= elen;
504 }
505 map->br_blockcount -= elen;
506 } else {
507 /*
508 * There's an unshared extent (agbno, ebno - agbno)
509 * followed by shared extent at (ebno, elen). Shrink
510 * the reported extent length to cover only the unshared
511 * extent and prepare to move up the start of map[i] to
512 * ebno, with the aim of (re)formatting the new map[i]
513 * the next time through the inner loop.
514 */
515 *next_map = *map;
516 nlen = ebno - agbno;
517 out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
518 next_map->br_startblock += nlen;
519 next_map->br_startoff += nlen;
520 next_map->br_blockcount -= nlen;
521 map->br_blockcount -= nlen;
522 }
523
524 return 0;
525}
526
Dave Chinner68988112013-08-12 20:49:42 +1000527/*
528 * Get inode's extents as described in bmv, and format for output.
529 * Calls formatter to fill the user's buffer until all extents
530 * are mapped, until the passed-in bmv->bmv_count slots have
531 * been filled, or until the formatter short-circuits the loop,
532 * if it is tracking filled-in extents on its own.
533 */
534int /* error code */
535xfs_getbmap(
536 xfs_inode_t *ip,
537 struct getbmapx *bmv, /* user bmap structure */
538 xfs_bmap_format_t formatter, /* format to user */
539 void *arg) /* formatter arg */
540{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700541 int64_t bmvend; /* last block requested */
Dave Chinner68988112013-08-12 20:49:42 +1000542 int error = 0; /* return value */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700543 int64_t fixlen; /* length for -1 case */
Dave Chinner68988112013-08-12 20:49:42 +1000544 int i; /* extent number */
545 int lock; /* lock state */
546 xfs_bmbt_irec_t *map; /* buffer for user's data */
547 xfs_mount_t *mp; /* file system mount point */
548 int nex; /* # of user extents can do */
Dave Chinner68988112013-08-12 20:49:42 +1000549 int subnex; /* # of bmapi's can do */
550 int nmap; /* number of map entries */
551 struct getbmapx *out; /* output structure */
552 int whichfork; /* data or attr fork */
553 int prealloced; /* this is a file with
554 * preallocated data space */
555 int iflags; /* interface flags */
556 int bmapi_flags; /* flags for xfs_bmapi */
557 int cur_ext = 0;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700558 struct xfs_bmbt_irec inject_map;
Dave Chinner68988112013-08-12 20:49:42 +1000559
560 mp = ip->i_mount;
561 iflags = bmv->bmv_iflags;
Dave Chinner68988112013-08-12 20:49:42 +1000562
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700563#ifndef DEBUG
564 /* Only allow CoW fork queries if we're debugging. */
565 if (iflags & BMV_IF_COWFORK)
566 return -EINVAL;
567#endif
568 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
569 return -EINVAL;
570
571 if (iflags & BMV_IF_ATTRFORK)
572 whichfork = XFS_ATTR_FORK;
573 else if (iflags & BMV_IF_COWFORK)
574 whichfork = XFS_COW_FORK;
575 else
576 whichfork = XFS_DATA_FORK;
577
578 switch (whichfork) {
579 case XFS_ATTR_FORK:
Dave Chinner68988112013-08-12 20:49:42 +1000580 if (XFS_IFORK_Q(ip)) {
581 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
582 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
583 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000584 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000585 } else if (unlikely(
586 ip->i_d.di_aformat != 0 &&
587 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
588 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
589 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000590 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000591 }
592
593 prealloced = 0;
594 fixlen = 1LL << 32;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700595 break;
596 case XFS_COW_FORK:
597 if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
598 return -EINVAL;
599
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700600 if (xfs_get_cowextsz_hint(ip)) {
601 prealloced = 1;
602 fixlen = mp->m_super->s_maxbytes;
603 } else {
604 prealloced = 0;
605 fixlen = XFS_ISIZE(ip);
606 }
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700607 break;
608 default:
Darrick J. Wong6eadbf42017-05-12 10:44:08 -0700609 /* Local format data forks report no extents. */
610 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
611 bmv->bmv_entries = 0;
612 return 0;
613 }
Dave Chinner68988112013-08-12 20:49:42 +1000614 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
Darrick J. Wong6eadbf42017-05-12 10:44:08 -0700615 ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +1000616 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000617
618 if (xfs_get_extsz_hint(ip) ||
619 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
620 prealloced = 1;
621 fixlen = mp->m_super->s_maxbytes;
622 } else {
623 prealloced = 0;
624 fixlen = XFS_ISIZE(ip);
625 }
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700626 break;
Dave Chinner68988112013-08-12 20:49:42 +1000627 }
628
629 if (bmv->bmv_length == -1) {
630 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
631 bmv->bmv_length =
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700632 max_t(int64_t, fixlen - bmv->bmv_offset, 0);
Dave Chinner68988112013-08-12 20:49:42 +1000633 } else if (bmv->bmv_length == 0) {
634 bmv->bmv_entries = 0;
635 return 0;
636 } else if (bmv->bmv_length < 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000637 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000638 }
639
640 nex = bmv->bmv_count - 1;
641 if (nex <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000642 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000643 bmvend = bmv->bmv_offset + bmv->bmv_length;
644
645
646 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
Dave Chinner24513372014-06-25 14:58:08 +1000647 return -ENOMEM;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000648 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
649 if (!out)
Dave Chinner24513372014-06-25 14:58:08 +1000650 return -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000651
652 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700653 switch (whichfork) {
654 case XFS_DATA_FORK:
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800655 if (!(iflags & BMV_IF_DELALLOC) &&
656 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner24513372014-06-25 14:58:08 +1000657 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
Dave Chinner68988112013-08-12 20:49:42 +1000658 if (error)
659 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000660
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800661 /*
662 * Even after flushing the inode, there can still be
663 * delalloc blocks on the inode beyond EOF due to
664 * speculative preallocation. These are not removed
665 * until the release function is called or the inode
666 * is inactivated. Hence we cannot assert here that
667 * ip->i_delayed_blks == 0.
668 */
669 }
670
671 lock = xfs_ilock_data_map_shared(ip);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700672 break;
673 case XFS_COW_FORK:
674 lock = XFS_ILOCK_SHARED;
675 xfs_ilock(ip, lock);
676 break;
677 case XFS_ATTR_FORK:
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800678 lock = xfs_ilock_attr_map_shared(ip);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700679 break;
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800680 }
Dave Chinner68988112013-08-12 20:49:42 +1000681
682 /*
683 * Don't let nex be bigger than the number of extents
684 * we can have assuming alternating holes and real extents.
685 */
686 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
687 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
688
689 bmapi_flags = xfs_bmapi_aflag(whichfork);
690 if (!(iflags & BMV_IF_PREALLOC))
691 bmapi_flags |= XFS_BMAPI_IGSTATE;
692
693 /*
694 * Allocate enough space to handle "subnex" maps at a time.
695 */
Dave Chinner24513372014-06-25 14:58:08 +1000696 error = -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000697 subnex = 16;
698 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
699 if (!map)
700 goto out_unlock_ilock;
701
702 bmv->bmv_entries = 0;
703
704 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
705 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
706 error = 0;
707 goto out_free_map;
708 }
709
Dave Chinner68988112013-08-12 20:49:42 +1000710 do {
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800711 nmap = (nex> subnex) ? subnex : nex;
Dave Chinner68988112013-08-12 20:49:42 +1000712 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
713 XFS_BB_TO_FSB(mp, bmv->bmv_length),
714 map, &nmap, bmapi_flags);
715 if (error)
716 goto out_free_map;
717 ASSERT(nmap <= subnex);
718
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800719 for (i = 0; i < nmap && bmv->bmv_length &&
720 cur_ext < bmv->bmv_count - 1; i++) {
Dave Chinner68988112013-08-12 20:49:42 +1000721 out[cur_ext].bmv_oflags = 0;
722 if (map[i].br_state == XFS_EXT_UNWRITTEN)
723 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
724 else if (map[i].br_startblock == DELAYSTARTBLOCK)
725 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
726 out[cur_ext].bmv_offset =
727 XFS_FSB_TO_BB(mp, map[i].br_startoff);
728 out[cur_ext].bmv_length =
729 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
730 out[cur_ext].bmv_unused1 = 0;
731 out[cur_ext].bmv_unused2 = 0;
732
733 /*
734 * delayed allocation extents that start beyond EOF can
735 * occur due to speculative EOF allocation when the
736 * delalloc extent is larger than the largest freespace
737 * extent at conversion time. These extents cannot be
738 * converted by data writeback, so can exist here even
739 * if we are not supposed to be finding delalloc
740 * extents.
741 */
742 if (map[i].br_startblock == DELAYSTARTBLOCK &&
Zorro Lang892d2a52017-05-15 08:40:02 -0700743 map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
Dave Chinner68988112013-08-12 20:49:42 +1000744 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
745
746 if (map[i].br_startblock == HOLESTARTBLOCK &&
747 whichfork == XFS_ATTR_FORK) {
748 /* came to the end of attribute fork */
749 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
750 goto out_free_map;
751 }
752
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700753 /* Is this a shared block? */
754 error = xfs_getbmap_adjust_shared(ip, whichfork,
755 &map[i], &out[cur_ext], &inject_map);
756 if (error)
757 goto out_free_map;
758
759 if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
760 &out[cur_ext], prealloced, bmvend,
761 map[i].br_startblock,
762 inject_map.br_startblock != NULLFSBLOCK))
Dave Chinner68988112013-08-12 20:49:42 +1000763 goto out_free_map;
764
765 bmv->bmv_offset =
766 out[cur_ext].bmv_offset +
767 out[cur_ext].bmv_length;
768 bmv->bmv_length =
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700769 max_t(int64_t, 0, bmvend - bmv->bmv_offset);
Dave Chinner68988112013-08-12 20:49:42 +1000770
771 /*
772 * In case we don't want to return the hole,
773 * don't increase cur_ext so that we can reuse
774 * it in the next loop.
775 */
776 if ((iflags & BMV_IF_NO_HOLES) &&
777 map[i].br_startblock == HOLESTARTBLOCK) {
778 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
779 continue;
780 }
781
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800782 /*
783 * In order to report shared extents accurately,
784 * we report each distinct shared/unshared part
785 * of a single bmbt record using multiple bmap
786 * extents. To make that happen, we iterate the
787 * same map array item multiple times, each
788 * time trimming out the subextent that we just
789 * reported.
790 *
791 * Because of this, we must check the out array
792 * index (cur_ext) directly against bmv_count-1
793 * to avoid overflows.
794 */
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700795 if (inject_map.br_startblock != NULLFSBLOCK) {
796 map[i] = inject_map;
797 i--;
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800798 }
Dave Chinner68988112013-08-12 20:49:42 +1000799 bmv->bmv_entries++;
800 cur_ext++;
801 }
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800802 } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
Dave Chinner68988112013-08-12 20:49:42 +1000803
804 out_free_map:
805 kmem_free(map);
806 out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800807 xfs_iunlock(ip, lock);
Dave Chinner68988112013-08-12 20:49:42 +1000808 out_unlock_iolock:
809 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
810
811 for (i = 0; i < cur_ext; i++) {
Dave Chinner68988112013-08-12 20:49:42 +1000812 /* format results & advance arg */
Eric Sandeen1dbba082017-01-27 23:24:28 -0800813 error = formatter(&arg, &out[i]);
814 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000815 break;
816 }
817
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000818 kmem_free(out);
Dave Chinner68988112013-08-12 20:49:42 +1000819 return error;
820}
821
822/*
823 * dead simple method of punching delalyed allocation blocks from a range in
824 * the inode. Walks a block at a time so will be slow, but is only executed in
Zhi Yong Wuad4809b2013-08-12 03:14:55 +0000825 * rare error cases so the overhead is not critical. This will always punch out
Dave Chinner68988112013-08-12 20:49:42 +1000826 * both the start and end blocks, even if the ranges only partially overlap
827 * them, so it is up to the caller to ensure that partial blocks are not
828 * passed in.
829 */
830int
831xfs_bmap_punch_delalloc_range(
832 struct xfs_inode *ip,
833 xfs_fileoff_t start_fsb,
834 xfs_fileoff_t length)
835{
836 xfs_fileoff_t remaining = length;
837 int error = 0;
838
839 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
840
841 do {
842 int done;
843 xfs_bmbt_irec_t imap;
844 int nimaps = 1;
845 xfs_fsblock_t firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000846 struct xfs_defer_ops dfops;
Dave Chinner68988112013-08-12 20:49:42 +1000847
848 /*
849 * Map the range first and check that it is a delalloc extent
850 * before trying to unmap the range. Otherwise we will be
851 * trying to remove a real extent (which requires a
852 * transaction) or a hole, which is probably a bad idea...
853 */
854 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
855 XFS_BMAPI_ENTIRE);
856
857 if (error) {
858 /* something screwed, just bail */
859 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
860 xfs_alert(ip->i_mount,
861 "Failed delalloc mapping lookup ino %lld fsb %lld.",
862 ip->i_ino, start_fsb);
863 }
864 break;
865 }
866 if (!nimaps) {
867 /* nothing there */
868 goto next_block;
869 }
870 if (imap.br_startblock != DELAYSTARTBLOCK) {
871 /* been converted, ignore */
872 goto next_block;
873 }
874 WARN_ON(imap.br_blockcount == 0);
875
876 /*
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000877 * Note: while we initialise the firstblock/dfops pair, they
Dave Chinner68988112013-08-12 20:49:42 +1000878 * should never be used because blocks should never be
879 * allocated or freed for a delalloc extent and hence we need
880 * don't cancel or finish them after the xfs_bunmapi() call.
881 */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000882 xfs_defer_init(&dfops, &firstblock);
Dave Chinner68988112013-08-12 20:49:42 +1000883 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000884 &dfops, &done);
Dave Chinner68988112013-08-12 20:49:42 +1000885 if (error)
886 break;
887
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000888 ASSERT(!xfs_defer_has_unfinished_work(&dfops));
Dave Chinner68988112013-08-12 20:49:42 +1000889next_block:
890 start_fsb++;
891 remaining--;
892 } while(remaining > 0);
893
894 return error;
895}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000896
897/*
898 * Test whether it is appropriate to check an inode for and free post EOF
899 * blocks. The 'force' parameter determines whether we should also consider
900 * regular files that are marked preallocated or append-only.
901 */
902bool
903xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
904{
905 /* prealloc/delalloc exists only on regular files */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100906 if (!S_ISREG(VFS_I(ip)->i_mode))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000907 return false;
908
909 /*
910 * Zero sized files with no cached pages and delalloc blocks will not
911 * have speculative prealloc/delalloc blocks to remove.
912 */
913 if (VFS_I(ip)->i_size == 0 &&
Dave Chinner2667c6f2014-08-04 13:23:15 +1000914 VFS_I(ip)->i_mapping->nrpages == 0 &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000915 ip->i_delayed_blks == 0)
916 return false;
917
918 /* If we haven't read in the extent list, then don't do it now. */
919 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
920 return false;
921
922 /*
923 * Do not free real preallocated or append-only files unless the file
924 * has delalloc blocks and we are forced to remove them.
925 */
926 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
927 if (!force || ip->i_delayed_blks == 0)
928 return false;
929
930 return true;
931}
932
933/*
Brian Foster3b4683c2017-04-11 10:50:05 -0700934 * This is called to free any blocks beyond eof. The caller must hold
935 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
936 * reference to the inode.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000937 */
938int
939xfs_free_eofblocks(
Brian Fostera36b9262017-01-27 23:22:55 -0800940 struct xfs_inode *ip)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000941{
Brian Fostera36b9262017-01-27 23:22:55 -0800942 struct xfs_trans *tp;
943 int error;
944 xfs_fileoff_t end_fsb;
945 xfs_fileoff_t last_fsb;
946 xfs_filblks_t map_len;
947 int nimaps;
948 struct xfs_bmbt_irec imap;
949 struct xfs_mount *mp = ip->i_mount;
950
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000951 /*
952 * Figure out if there are any blocks beyond the end
953 * of the file. If not, then there is nothing to do.
954 */
955 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
956 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
957 if (last_fsb <= end_fsb)
958 return 0;
959 map_len = last_fsb - end_fsb;
960
961 nimaps = 1;
962 xfs_ilock(ip, XFS_ILOCK_SHARED);
963 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
964 xfs_iunlock(ip, XFS_ILOCK_SHARED);
965
Brian Fostera36b9262017-01-27 23:22:55 -0800966 /*
967 * If there are blocks after the end of file, truncate the file to its
968 * current size to free them up.
969 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000970 if (!error && (nimaps != 0) &&
971 (imap.br_startblock != HOLESTARTBLOCK ||
972 ip->i_delayed_blks)) {
973 /*
974 * Attach the dquots to the inode up front.
975 */
976 error = xfs_qm_dqattach(ip, 0);
977 if (error)
978 return error;
979
Brian Fostere4229d6b2017-01-27 23:22:57 -0800980 /* wait on dio to ensure i_size has settled */
981 inode_dio_wait(VFS_I(ip));
982
Christoph Hellwig253f4912016-04-06 09:19:55 +1000983 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
984 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000985 if (error) {
986 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000987 return error;
988 }
989
990 xfs_ilock(ip, XFS_ILOCK_EXCL);
991 xfs_trans_ijoin(tp, ip, 0);
992
993 /*
994 * Do not update the on-disk file size. If we update the
995 * on-disk file size and then the system crashes before the
996 * contents of the file are flushed to disk then the files
997 * may be full of holes (ie NULL files bug).
998 */
999 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
1000 XFS_ISIZE(ip));
1001 if (error) {
1002 /*
1003 * If we get an error at this point we simply don't
1004 * bother truncating the file.
1005 */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001006 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001007 } else {
Christoph Hellwig70393312015-06-04 13:48:08 +10001008 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001009 if (!error)
1010 xfs_inode_clear_eofblocks_tag(ip);
1011 }
1012
1013 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001014 }
1015 return error;
1016}
1017
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001018int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001019xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001020 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001021 xfs_off_t offset,
1022 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001023 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001024{
1025 xfs_mount_t *mp = ip->i_mount;
1026 xfs_off_t count;
1027 xfs_filblks_t allocated_fsb;
1028 xfs_filblks_t allocatesize_fsb;
1029 xfs_extlen_t extsz, temp;
1030 xfs_fileoff_t startoffset_fsb;
1031 xfs_fsblock_t firstfsb;
1032 int nimaps;
1033 int quota_flag;
1034 int rt;
1035 xfs_trans_t *tp;
1036 xfs_bmbt_irec_t imaps[1], *imapp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001037 struct xfs_defer_ops dfops;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001038 uint qblocks, resblks, resrtextents;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001039 int error;
1040
1041 trace_xfs_alloc_file_space(ip);
1042
1043 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001044 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001045
1046 error = xfs_qm_dqattach(ip, 0);
1047 if (error)
1048 return error;
1049
1050 if (len <= 0)
Dave Chinner24513372014-06-25 14:58:08 +10001051 return -EINVAL;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001052
1053 rt = XFS_IS_REALTIME_INODE(ip);
1054 extsz = xfs_get_extsz_hint(ip);
1055
1056 count = len;
1057 imapp = &imaps[0];
1058 nimaps = 1;
1059 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1060 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1061
1062 /*
1063 * Allocate file space until done or until there is an error
1064 */
1065 while (allocatesize_fsb && !error) {
1066 xfs_fileoff_t s, e;
1067
1068 /*
1069 * Determine space reservations for data/realtime.
1070 */
1071 if (unlikely(extsz)) {
1072 s = startoffset_fsb;
1073 do_div(s, extsz);
1074 s *= extsz;
1075 e = startoffset_fsb + allocatesize_fsb;
1076 if ((temp = do_mod(startoffset_fsb, extsz)))
1077 e += temp;
1078 if ((temp = do_mod(e, extsz)))
1079 e += extsz - temp;
1080 } else {
1081 s = 0;
1082 e = allocatesize_fsb;
1083 }
1084
1085 /*
1086 * The transaction reservation is limited to a 32-bit block
1087 * count, hence we need to limit the number of blocks we are
1088 * trying to reserve to avoid an overflow. We can't allocate
1089 * more than @nimaps extents, and an extent is limited on disk
1090 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1091 */
1092 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1093 if (unlikely(rt)) {
1094 resrtextents = qblocks = resblks;
1095 resrtextents /= mp->m_sb.sb_rextsize;
1096 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1097 quota_flag = XFS_QMOPT_RES_RTBLKS;
1098 } else {
1099 resrtextents = 0;
1100 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1101 quota_flag = XFS_QMOPT_RES_REGBLKS;
1102 }
1103
1104 /*
1105 * Allocate and setup the transaction.
1106 */
Christoph Hellwig253f4912016-04-06 09:19:55 +10001107 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1108 resrtextents, 0, &tp);
1109
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001110 /*
1111 * Check for running out of space
1112 */
1113 if (error) {
1114 /*
1115 * Free the transaction structure.
1116 */
Dave Chinner24513372014-06-25 14:58:08 +10001117 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001118 break;
1119 }
1120 xfs_ilock(ip, XFS_ILOCK_EXCL);
1121 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1122 0, quota_flag);
1123 if (error)
1124 goto error1;
1125
1126 xfs_trans_ijoin(tp, ip, 0);
1127
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001128 xfs_defer_init(&dfops, &firstfsb);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001129 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1130 allocatesize_fsb, alloc_type, &firstfsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001131 resblks, imapp, &nimaps, &dfops);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001132 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001133 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001134
1135 /*
1136 * Complete the transaction
1137 */
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001138 error = xfs_defer_finish(&tp, &dfops);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001139 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001140 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001141
Christoph Hellwig70393312015-06-04 13:48:08 +10001142 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001143 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001144 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001145 break;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001146
1147 allocated_fsb = imapp->br_blockcount;
1148
1149 if (nimaps == 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001150 error = -ENOSPC;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001151 break;
1152 }
1153
1154 startoffset_fsb += allocated_fsb;
1155 allocatesize_fsb -= allocated_fsb;
1156 }
1157
1158 return error;
1159
1160error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001161 xfs_defer_cancel(&dfops);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001162 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1163
1164error1: /* Just cancel transaction */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001165 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001166 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1167 return error;
1168}
1169
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001170static int
1171xfs_unmap_extent(
1172 struct xfs_inode *ip,
1173 xfs_fileoff_t startoffset_fsb,
1174 xfs_filblks_t len_fsb,
1175 int *done)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001176{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001177 struct xfs_mount *mp = ip->i_mount;
1178 struct xfs_trans *tp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001179 struct xfs_defer_ops dfops;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001180 xfs_fsblock_t firstfsb;
1181 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1182 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001183
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001184 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1185 if (error) {
1186 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1187 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001188 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001189
1190 xfs_ilock(ip, XFS_ILOCK_EXCL);
1191 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1192 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1193 if (error)
1194 goto out_trans_cancel;
1195
1196 xfs_trans_ijoin(tp, ip, 0);
1197
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001198 xfs_defer_init(&dfops, &firstfsb);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001199 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001200 &dfops, done);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001201 if (error)
1202 goto out_bmap_cancel;
1203
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001204 xfs_defer_ijoin(&dfops, ip);
1205 error = xfs_defer_finish(&tp, &dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001206 if (error)
1207 goto out_bmap_cancel;
1208
1209 error = xfs_trans_commit(tp);
1210out_unlock:
1211 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001212 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001213
1214out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001215 xfs_defer_cancel(&dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001216out_trans_cancel:
1217 xfs_trans_cancel(tp);
1218 goto out_unlock;
1219}
1220
1221static int
1222xfs_adjust_extent_unmap_boundaries(
1223 struct xfs_inode *ip,
1224 xfs_fileoff_t *startoffset_fsb,
1225 xfs_fileoff_t *endoffset_fsb)
1226{
1227 struct xfs_mount *mp = ip->i_mount;
1228 struct xfs_bmbt_irec imap;
1229 int nimap, error;
1230 xfs_extlen_t mod = 0;
1231
1232 nimap = 1;
1233 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1234 if (error)
1235 return error;
1236
1237 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001238 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
Eric Sandeen4f1adf32017-04-19 15:19:32 -07001239 mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001240 if (mod)
1241 *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1242 }
1243
1244 nimap = 1;
1245 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1246 if (error)
1247 return error;
1248
1249 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1250 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1251 mod++;
1252 if (mod && mod != mp->m_sb.sb_rextsize)
1253 *endoffset_fsb -= mod;
1254 }
1255
1256 return 0;
1257}
1258
1259static int
1260xfs_flush_unmap_range(
1261 struct xfs_inode *ip,
1262 xfs_off_t offset,
1263 xfs_off_t len)
1264{
1265 struct xfs_mount *mp = ip->i_mount;
1266 struct inode *inode = VFS_I(ip);
1267 xfs_off_t rounding, start, end;
1268 int error;
1269
1270 /* wait for the completion of any pending DIOs */
1271 inode_dio_wait(inode);
1272
1273 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1274 start = round_down(offset, rounding);
1275 end = round_up(offset + len, rounding) - 1;
1276
1277 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1278 if (error)
1279 return error;
1280 truncate_pagecache_range(inode, start, end);
1281 return 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001282}
1283
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001284int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001285xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001286 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001287 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001288 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001289{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001290 struct xfs_mount *mp = ip->i_mount;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001291 xfs_fileoff_t startoffset_fsb;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001292 xfs_fileoff_t endoffset_fsb;
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001293 int done = 0, error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001294
1295 trace_xfs_free_file_space(ip);
1296
1297 error = xfs_qm_dqattach(ip, 0);
1298 if (error)
1299 return error;
1300
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001301 if (len <= 0) /* if nothing being freed */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001302 return 0;
1303
1304 error = xfs_flush_unmap_range(ip, offset, len);
1305 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001306 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001307
1308 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001309 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1310
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001311 /*
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001312 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1313 * and we can't use unwritten extents then we actually need to ensure
1314 * to zero the whole extent, otherwise we just need to take of block
1315 * boundaries, and xfs_bunmapi will handle the rest.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001316 */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001317 if (XFS_IS_REALTIME_INODE(ip) &&
1318 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1319 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1320 &endoffset_fsb);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001321 if (error)
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001322 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001323 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001324
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001325 if (endoffset_fsb > startoffset_fsb) {
1326 while (!done) {
1327 error = xfs_unmap_extent(ip, startoffset_fsb,
1328 endoffset_fsb - startoffset_fsb, &done);
1329 if (error)
1330 return error;
1331 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001332 }
1333
1334 /*
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001335 * Now that we've unmap all full blocks we'll have to zero out any
1336 * partial block at the beginning and/or end. xfs_zero_range is
Calvin Owens3dd09d52017-04-03 12:22:29 -07001337 * smart enough to skip any holes, including those we just created,
1338 * but we must take care not to zero beyond EOF and enlarge i_size.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001339 */
Calvin Owens3dd09d52017-04-03 12:22:29 -07001340
1341 if (offset >= XFS_ISIZE(ip))
1342 return 0;
1343
1344 if (offset + len > XFS_ISIZE(ip))
1345 len = XFS_ISIZE(ip) - offset;
1346
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001347 return xfs_zero_range(ip, offset, len, NULL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001348}
1349
Brian Foster5d11fb42014-10-30 10:35:11 +11001350/*
1351 * Preallocate and zero a range of a file. This mechanism has the allocation
1352 * semantics of fallocate and in addition converts data in the range to zeroes.
1353 */
Christoph Hellwig865e9442013-10-12 00:55:08 -07001354int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001355xfs_zero_file_space(
1356 struct xfs_inode *ip,
1357 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001358 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001359{
1360 struct xfs_mount *mp = ip->i_mount;
Brian Foster5d11fb42014-10-30 10:35:11 +11001361 uint blksize;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001362 int error;
1363
Dave Chinner897b73b2014-04-14 18:15:11 +10001364 trace_xfs_zero_file_space(ip);
1365
Brian Foster5d11fb42014-10-30 10:35:11 +11001366 blksize = 1 << mp->m_sb.sb_blocklog;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001367
1368 /*
Brian Foster5d11fb42014-10-30 10:35:11 +11001369 * Punch a hole and prealloc the range. We use hole punch rather than
1370 * unwritten extent conversion for two reasons:
1371 *
1372 * 1.) Hole punch handles partial block zeroing for us.
1373 *
1374 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1375 * by virtue of the hole punch.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001376 */
Brian Foster5d11fb42014-10-30 10:35:11 +11001377 error = xfs_free_file_space(ip, offset, len);
1378 if (error)
1379 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001380
Brian Foster5d11fb42014-10-30 10:35:11 +11001381 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1382 round_up(offset + len, blksize) -
1383 round_down(offset, blksize),
1384 XFS_BMAPI_PREALLOC);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001385out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001386 return error;
1387
1388}
1389
1390/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001391 * @next_fsb will keep track of the extent currently undergoing shift.
1392 * @stop_fsb will keep track of the extent at which we have to stop.
1393 * If we are shifting left, we will start with block (offset + len) and
1394 * shift each extent till last extent.
1395 * If we are shifting right, we will start with last extent inside file space
1396 * and continue until we reach the block corresponding to offset.
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001397 */
kbuild test robot72c1a732015-04-13 11:25:04 +10001398static int
Namjae Jeona904b1c2015-03-25 15:08:56 +11001399xfs_shift_file_space(
1400 struct xfs_inode *ip,
1401 xfs_off_t offset,
1402 xfs_off_t len,
1403 enum shift_direction direction)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001404{
1405 int done = 0;
1406 struct xfs_mount *mp = ip->i_mount;
1407 struct xfs_trans *tp;
1408 int error;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001409 struct xfs_defer_ops dfops;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001410 xfs_fsblock_t first_block;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001411 xfs_fileoff_t stop_fsb;
Brian Foster2c845f52014-09-23 15:37:09 +10001412 xfs_fileoff_t next_fsb;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001413 xfs_fileoff_t shift_fsb;
Brian Foster48af96a2017-02-15 10:18:10 -08001414 uint resblks;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001415
Namjae Jeona904b1c2015-03-25 15:08:56 +11001416 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001417
Namjae Jeona904b1c2015-03-25 15:08:56 +11001418 if (direction == SHIFT_LEFT) {
Brian Foster48af96a2017-02-15 10:18:10 -08001419 /*
1420 * Reserve blocks to cover potential extent merges after left
1421 * shift operations.
1422 */
1423 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
Namjae Jeona904b1c2015-03-25 15:08:56 +11001424 next_fsb = XFS_B_TO_FSB(mp, offset + len);
1425 stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1426 } else {
1427 /*
1428 * If right shift, delegate the work of initialization of
1429 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1430 */
Brian Foster48af96a2017-02-15 10:18:10 -08001431 resblks = 0;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001432 next_fsb = NULLFSBLOCK;
1433 stop_fsb = XFS_B_TO_FSB(mp, offset);
1434 }
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001435
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001436 shift_fsb = XFS_B_TO_FSB(mp, len);
1437
Brian Fosterf71721d2014-09-23 15:39:05 +10001438 /*
1439 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1440 * into the accessible region of the file.
1441 */
Brian Foster41b9d722014-09-02 12:12:53 +10001442 if (xfs_can_free_eofblocks(ip, true)) {
Brian Fostera36b9262017-01-27 23:22:55 -08001443 error = xfs_free_eofblocks(ip);
Brian Foster41b9d722014-09-02 12:12:53 +10001444 if (error)
1445 return error;
1446 }
Dave Chinner1669a8c2014-09-02 12:12:53 +10001447
Brian Fosterf71721d2014-09-23 15:39:05 +10001448 /*
1449 * Writeback and invalidate cache for the remainder of the file as we're
Namjae Jeona904b1c2015-03-25 15:08:56 +11001450 * about to shift down every extent from offset to EOF.
Brian Fosterf71721d2014-09-23 15:39:05 +10001451 */
1452 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
Namjae Jeona904b1c2015-03-25 15:08:56 +11001453 offset, -1);
Brian Fosterf71721d2014-09-23 15:39:05 +10001454 if (error)
1455 return error;
1456 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001457 offset >> PAGE_SHIFT, -1);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001458 if (error)
1459 return error;
1460
Namjae Jeona904b1c2015-03-25 15:08:56 +11001461 /*
Darrick J. Wong3af423b2017-09-18 09:41:17 -07001462 * Clean out anything hanging around in the cow fork now that
1463 * we've flushed all the dirty data out to disk to avoid having
1464 * CoW extents at the wrong offsets.
1465 */
1466 if (xfs_is_reflink_inode(ip)) {
1467 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1468 true);
1469 if (error)
1470 return error;
1471 }
1472
1473 /*
1474 * The extent shifting code works on extent granularity. So, if
Namjae Jeona904b1c2015-03-25 15:08:56 +11001475 * stop_fsb is not the starting block of extent, we need to split
1476 * the extent at stop_fsb.
1477 */
1478 if (direction == SHIFT_RIGHT) {
1479 error = xfs_bmap_split_extent(ip, stop_fsb);
1480 if (error)
1481 return error;
1482 }
1483
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001484 while (!error && !done) {
Brian Foster48af96a2017-02-15 10:18:10 -08001485 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1486 &tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001487 if (error)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001488 break;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001489
1490 xfs_ilock(ip, XFS_ILOCK_EXCL);
1491 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
Brian Foster48af96a2017-02-15 10:18:10 -08001492 ip->i_gdquot, ip->i_pdquot, resblks, 0,
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001493 XFS_QMOPT_RES_REGBLKS);
1494 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001495 goto out_trans_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001496
Namjae Jeona904b1c2015-03-25 15:08:56 +11001497 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001498
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001499 xfs_defer_init(&dfops, &first_block);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001500
1501 /*
1502 * We are using the write transaction in which max 2 bmbt
1503 * updates are allowed
1504 */
Namjae Jeona904b1c2015-03-25 15:08:56 +11001505 error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001506 &done, stop_fsb, &first_block, &dfops,
Namjae Jeona904b1c2015-03-25 15:08:56 +11001507 direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001508 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001509 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001510
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001511 error = xfs_defer_finish(&tp, &dfops);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001512 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001513 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001514
Christoph Hellwig70393312015-06-04 13:48:08 +10001515 error = xfs_trans_commit(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001516 }
1517
1518 return error;
1519
Brian Fosterd4a97a02015-08-19 10:01:40 +10001520out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001521 xfs_defer_cancel(&dfops);
Brian Fosterd4a97a02015-08-19 10:01:40 +10001522out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001523 xfs_trans_cancel(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001524 return error;
1525}
1526
1527/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001528 * xfs_collapse_file_space()
1529 * This routine frees disk space and shift extent for the given file.
1530 * The first thing we do is to free data blocks in the specified range
1531 * by calling xfs_free_file_space(). It would also sync dirty data
1532 * and invalidate page cache over the region on which collapse range
1533 * is working. And Shift extent records to the left to cover a hole.
1534 * RETURNS:
1535 * 0 on success
1536 * errno on error
1537 *
1538 */
1539int
1540xfs_collapse_file_space(
1541 struct xfs_inode *ip,
1542 xfs_off_t offset,
1543 xfs_off_t len)
1544{
1545 int error;
1546
1547 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1548 trace_xfs_collapse_file_space(ip);
1549
1550 error = xfs_free_file_space(ip, offset, len);
1551 if (error)
1552 return error;
1553
1554 return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1555}
1556
1557/*
1558 * xfs_insert_file_space()
1559 * This routine create hole space by shifting extents for the given file.
1560 * The first thing we do is to sync dirty data and invalidate page cache
1561 * over the region on which insert range is working. And split an extent
1562 * to two extents at given offset by calling xfs_bmap_split_extent.
1563 * And shift all extent records which are laying between [offset,
1564 * last allocated extent] to the right to reserve hole range.
1565 * RETURNS:
1566 * 0 on success
1567 * errno on error
1568 */
1569int
1570xfs_insert_file_space(
1571 struct xfs_inode *ip,
1572 loff_t offset,
1573 loff_t len)
1574{
1575 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1576 trace_xfs_insert_file_space(ip);
1577
1578 return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1579}
1580
1581/*
Dave Chinnera133d952013-08-12 20:49:48 +10001582 * We need to check that the format of the data fork in the temporary inode is
1583 * valid for the target inode before doing the swap. This is not a problem with
1584 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1585 * data fork depending on the space the attribute fork is taking so we can get
1586 * invalid formats on the target inode.
1587 *
1588 * E.g. target has space for 7 extents in extent format, temp inode only has
1589 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1590 * btree, but when swapped it needs to be in extent format. Hence we can't just
1591 * blindly swap data forks on attr2 filesystems.
1592 *
1593 * Note that we check the swap in both directions so that we don't end up with
1594 * a corrupt temporary inode, either.
1595 *
1596 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1597 * inode will prevent this situation from occurring, so all we do here is
1598 * reject and log the attempt. basically we are putting the responsibility on
1599 * userspace to get this right.
1600 */
1601static int
1602xfs_swap_extents_check_format(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001603 struct xfs_inode *ip, /* target inode */
1604 struct xfs_inode *tip) /* tmp inode */
Dave Chinnera133d952013-08-12 20:49:48 +10001605{
1606
1607 /* Should never get a local format */
1608 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1609 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +10001610 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001611
1612 /*
1613 * if the target inode has less extents that then temporary inode then
1614 * why did userspace call us?
1615 */
1616 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
Dave Chinner24513372014-06-25 14:58:08 +10001617 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001618
1619 /*
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001620 * If we have to use the (expensive) rmap swap method, we can
1621 * handle any number of extents and any format.
1622 */
1623 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1624 return 0;
1625
1626 /*
Dave Chinnera133d952013-08-12 20:49:48 +10001627 * if the target inode is in extent form and the temp inode is in btree
1628 * form then we will end up with the target inode in the wrong format
1629 * as we already know there are less extents in the temp inode.
1630 */
1631 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1632 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +10001633 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001634
1635 /* Check temp in extent form to max in target */
1636 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1637 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1638 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001639 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001640
1641 /* Check target in extent form to max in temp */
1642 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1643 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1644 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001645 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001646
1647 /*
1648 * If we are in a btree format, check that the temp root block will fit
1649 * in the target and that it has enough extents to be in btree format
1650 * in the target.
1651 *
1652 * Note that we have to be careful to allow btree->extent conversions
1653 * (a common defrag case) which will occur when the temp inode is in
1654 * extent format...
1655 */
1656 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001657 if (XFS_IFORK_Q(ip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001658 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
Dave Chinner24513372014-06-25 14:58:08 +10001659 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001660 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1661 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001662 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001663 }
1664
1665 /* Reciprocal target->temp btree format checks */
1666 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001667 if (XFS_IFORK_Q(tip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001668 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
Dave Chinner24513372014-06-25 14:58:08 +10001669 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001670 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1671 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001672 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001673 }
1674
1675 return 0;
1676}
1677
Dave Chinner7abbb8f2014-09-23 16:20:11 +10001678static int
Dave Chinner4ef897a2014-08-04 13:44:08 +10001679xfs_swap_extent_flush(
1680 struct xfs_inode *ip)
1681{
1682 int error;
1683
1684 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1685 if (error)
1686 return error;
1687 truncate_pagecache_range(VFS_I(ip), 0, -1);
1688
1689 /* Verify O_DIRECT for ftmp */
1690 if (VFS_I(ip)->i_mapping->nrpages)
1691 return -EINVAL;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001692 return 0;
1693}
1694
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001695/*
1696 * Move extents from one file to another, when rmap is enabled.
1697 */
1698STATIC int
1699xfs_swap_extent_rmap(
1700 struct xfs_trans **tpp,
1701 struct xfs_inode *ip,
1702 struct xfs_inode *tip)
1703{
1704 struct xfs_bmbt_irec irec;
1705 struct xfs_bmbt_irec uirec;
1706 struct xfs_bmbt_irec tirec;
1707 xfs_fileoff_t offset_fsb;
1708 xfs_fileoff_t end_fsb;
1709 xfs_filblks_t count_fsb;
1710 xfs_fsblock_t firstfsb;
1711 struct xfs_defer_ops dfops;
1712 int error;
1713 xfs_filblks_t ilen;
1714 xfs_filblks_t rlen;
1715 int nimaps;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001716 uint64_t tip_flags2;
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001717
1718 /*
1719 * If the source file has shared blocks, we must flag the donor
1720 * file as having shared blocks so that we get the shared-block
1721 * rmap functions when we go to fix up the rmaps. The flags
1722 * will be switch for reals later.
1723 */
1724 tip_flags2 = tip->i_d.di_flags2;
1725 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1726 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1727
1728 offset_fsb = 0;
1729 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1730 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1731
1732 while (count_fsb) {
1733 /* Read extent from the donor file */
1734 nimaps = 1;
1735 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1736 &nimaps, 0);
1737 if (error)
1738 goto out;
1739 ASSERT(nimaps == 1);
1740 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1741
1742 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1743 ilen = tirec.br_blockcount;
1744
1745 /* Unmap the old blocks in the source file. */
1746 while (tirec.br_blockcount) {
1747 xfs_defer_init(&dfops, &firstfsb);
1748 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1749
1750 /* Read extent from the source file */
1751 nimaps = 1;
1752 error = xfs_bmapi_read(ip, tirec.br_startoff,
1753 tirec.br_blockcount, &irec,
1754 &nimaps, 0);
1755 if (error)
1756 goto out_defer;
1757 ASSERT(nimaps == 1);
1758 ASSERT(tirec.br_startoff == irec.br_startoff);
1759 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1760
1761 /* Trim the extent. */
1762 uirec = tirec;
1763 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1764 tirec.br_blockcount,
1765 irec.br_blockcount);
1766 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1767
1768 /* Remove the mapping from the donor file. */
1769 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1770 tip, &uirec);
1771 if (error)
1772 goto out_defer;
1773
1774 /* Remove the mapping from the source file. */
1775 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1776 ip, &irec);
1777 if (error)
1778 goto out_defer;
1779
1780 /* Map the donor file's blocks into the source file. */
1781 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1782 ip, &uirec);
1783 if (error)
1784 goto out_defer;
1785
1786 /* Map the source file's blocks into the donor file. */
1787 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1788 tip, &irec);
1789 if (error)
1790 goto out_defer;
1791
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001792 xfs_defer_ijoin(&dfops, ip);
1793 error = xfs_defer_finish(tpp, &dfops);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001794 if (error)
1795 goto out_defer;
1796
1797 tirec.br_startoff += rlen;
1798 if (tirec.br_startblock != HOLESTARTBLOCK &&
1799 tirec.br_startblock != DELAYSTARTBLOCK)
1800 tirec.br_startblock += rlen;
1801 tirec.br_blockcount -= rlen;
1802 }
1803
1804 /* Roll on... */
1805 count_fsb -= ilen;
1806 offset_fsb += ilen;
1807 }
1808
1809 tip->i_d.di_flags2 = tip_flags2;
1810 return 0;
1811
1812out_defer:
1813 xfs_defer_cancel(&dfops);
1814out:
1815 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1816 tip->i_d.di_flags2 = tip_flags2;
1817 return error;
1818}
1819
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001820/* Swap the extents of two files by swapping data forks. */
1821STATIC int
1822xfs_swap_extent_forks(
1823 struct xfs_trans *tp,
1824 struct xfs_inode *ip,
1825 struct xfs_inode *tip,
1826 int *src_log_flags,
1827 int *target_log_flags)
1828{
1829 struct xfs_ifork tempifp, *ifp, *tifp;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001830 xfs_filblks_t aforkblks = 0;
1831 xfs_filblks_t taforkblks = 0;
1832 xfs_extnum_t junk;
Eric Sandeen4dfce572016-11-08 12:55:18 +11001833 xfs_extnum_t nextents;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001834 uint64_t tmp;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001835 int error;
1836
1837 /*
1838 * Count the number of extended attribute blocks
1839 */
1840 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1841 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001842 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001843 &aforkblks);
1844 if (error)
1845 return error;
1846 }
1847 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1848 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001849 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001850 &taforkblks);
1851 if (error)
1852 return error;
1853 }
1854
1855 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07001856 * Btree format (v3) inodes have the inode number stamped in the bmbt
1857 * block headers. We can't start changing the bmbt blocks until the
1858 * inode owner change is logged so recovery does the right thing in the
1859 * event of a crash. Set the owner change log flags now and leave the
1860 * bmbt scan as the last step.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001861 */
1862 if (ip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001863 ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001864 (*target_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001865 if (tip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001866 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001867 (*src_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001868
1869 /*
1870 * Swap the data forks of the inodes
1871 */
1872 ifp = &ip->i_df;
1873 tifp = &tip->i_df;
1874 tempifp = *ifp; /* struct copy */
1875 *ifp = *tifp; /* struct copy */
1876 *tifp = tempifp; /* struct copy */
1877
1878 /*
1879 * Fix the on-disk inode values
1880 */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001881 tmp = (uint64_t)ip->i_d.di_nblocks;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001882 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1883 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1884
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001885 tmp = (uint64_t) ip->i_d.di_nextents;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001886 ip->i_d.di_nextents = tip->i_d.di_nextents;
1887 tip->i_d.di_nextents = tmp;
1888
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001889 tmp = (uint64_t) ip->i_d.di_format;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001890 ip->i_d.di_format = tip->i_d.di_format;
1891 tip->i_d.di_format = tmp;
1892
1893 /*
1894 * The extents in the source inode could still contain speculative
1895 * preallocation beyond EOF (e.g. the file is open but not modified
1896 * while defrag is in progress). In that case, we need to copy over the
1897 * number of delalloc blocks the data fork in the source inode is
1898 * tracking beyond EOF so that when the fork is truncated away when the
1899 * temporary inode is unlinked we don't underrun the i_delayed_blks
1900 * counter on that inode.
1901 */
1902 ASSERT(tip->i_delayed_blks == 0);
1903 tip->i_delayed_blks = ip->i_delayed_blks;
1904 ip->i_delayed_blks = 0;
1905
1906 switch (ip->i_d.di_format) {
1907 case XFS_DINODE_FMT_EXTENTS:
Eric Sandeen5d829302016-11-08 12:59:42 +11001908 /*
1909 * If the extents fit in the inode, fix the pointer. Otherwise
1910 * it's already NULL or pointing to the extent.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001911 */
Eric Sandeen5d829302016-11-08 12:59:42 +11001912 nextents = xfs_iext_count(&ip->i_df);
1913 if (nextents <= XFS_INLINE_EXTS)
1914 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001915 (*src_log_flags) |= XFS_ILOG_DEXT;
1916 break;
1917 case XFS_DINODE_FMT_BTREE:
1918 ASSERT(ip->i_d.di_version < 3 ||
1919 (*src_log_flags & XFS_ILOG_DOWNER));
1920 (*src_log_flags) |= XFS_ILOG_DBROOT;
1921 break;
1922 }
1923
1924 switch (tip->i_d.di_format) {
1925 case XFS_DINODE_FMT_EXTENTS:
Eric Sandeen5d829302016-11-08 12:59:42 +11001926 /*
1927 * If the extents fit in the inode, fix the pointer. Otherwise
1928 * it's already NULL or pointing to the extent.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001929 */
Eric Sandeen5d829302016-11-08 12:59:42 +11001930 nextents = xfs_iext_count(&tip->i_df);
1931 if (nextents <= XFS_INLINE_EXTS)
1932 tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001933 (*target_log_flags) |= XFS_ILOG_DEXT;
1934 break;
1935 case XFS_DINODE_FMT_BTREE:
1936 (*target_log_flags) |= XFS_ILOG_DBROOT;
1937 ASSERT(tip->i_d.di_version < 3 ||
1938 (*target_log_flags & XFS_ILOG_DOWNER));
1939 break;
1940 }
1941
1942 return 0;
1943}
1944
Brian Foster2dd3d702017-08-29 10:08:40 -07001945/*
1946 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1947 * change owner scan attempts to order all modified buffers in the current
1948 * transaction. In the event of ordered buffer failure, the offending buffer is
1949 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1950 * the transaction in this case to replenish the fallback log reservation and
1951 * restart the scan. This process repeats until the scan completes.
1952 */
1953static int
1954xfs_swap_change_owner(
1955 struct xfs_trans **tpp,
1956 struct xfs_inode *ip,
1957 struct xfs_inode *tmpip)
1958{
1959 int error;
1960 struct xfs_trans *tp = *tpp;
1961
1962 do {
1963 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1964 NULL);
1965 /* success or fatal error */
1966 if (error != -EAGAIN)
1967 break;
1968
1969 error = xfs_trans_roll(tpp);
1970 if (error)
1971 break;
1972 tp = *tpp;
1973
1974 /*
1975 * Redirty both inodes so they can relog and keep the log tail
1976 * moving forward.
1977 */
1978 xfs_trans_ijoin(tp, ip, 0);
1979 xfs_trans_ijoin(tp, tmpip, 0);
1980 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1981 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1982 } while (true);
1983
1984 return error;
1985}
1986
Dave Chinner4ef897a2014-08-04 13:44:08 +10001987int
Dave Chinnera133d952013-08-12 20:49:48 +10001988xfs_swap_extents(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001989 struct xfs_inode *ip, /* target inode */
1990 struct xfs_inode *tip, /* tmp inode */
1991 struct xfs_swapext *sxp)
Dave Chinnera133d952013-08-12 20:49:48 +10001992{
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001993 struct xfs_mount *mp = ip->i_mount;
1994 struct xfs_trans *tp;
1995 struct xfs_bstat *sbp = &sxp->sx_stat;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001996 int src_log_flags, target_log_flags;
1997 int error = 0;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001998 int lock_flags;
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07001999 struct xfs_ifork *cowfp;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07002000 uint64_t f;
Brian Foster2dd3d702017-08-29 10:08:40 -07002001 int resblks = 0;
Dave Chinnera133d952013-08-12 20:49:48 +10002002
Dave Chinnera133d952013-08-12 20:49:48 +10002003 /*
Dave Chinner723cac42015-02-23 21:47:29 +11002004 * Lock the inodes against other IO, page faults and truncate to
2005 * begin with. Then we can ensure the inodes are flushed and have no
2006 * page cache safely. Once we have done this we can take the ilocks and
2007 * do the rest of the checks.
Dave Chinnera133d952013-08-12 20:49:48 +10002008 */
Christoph Hellwig65523212016-11-30 14:33:25 +11002009 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2010 lock_flags = XFS_MMAPLOCK_EXCL;
Dave Chinner723cac42015-02-23 21:47:29 +11002011 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
Dave Chinnera133d952013-08-12 20:49:48 +10002012
2013 /* Verify that both files have the same format */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002014 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
Dave Chinner24513372014-06-25 14:58:08 +10002015 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10002016 goto out_unlock;
2017 }
2018
2019 /* Verify both files are either real-time or non-realtime */
2020 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
Dave Chinner24513372014-06-25 14:58:08 +10002021 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10002022 goto out_unlock;
2023 }
2024
Dave Chinner4ef897a2014-08-04 13:44:08 +10002025 error = xfs_swap_extent_flush(ip);
Dave Chinnera133d952013-08-12 20:49:48 +10002026 if (error)
2027 goto out_unlock;
Dave Chinner4ef897a2014-08-04 13:44:08 +10002028 error = xfs_swap_extent_flush(tip);
2029 if (error)
2030 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10002031
Darrick J. Wong1f08af52016-10-03 09:11:53 -07002032 /*
2033 * Extent "swapping" with rmap requires a permanent reservation and
2034 * a block reservation because it's really just a remap operation
2035 * performed with log redo items!
2036 */
2037 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
2038 /*
2039 * Conceptually this shouldn't affect the shape of either
2040 * bmbt, but since we atomically move extents one by one,
2041 * we reserve enough space to rebuild both trees.
2042 */
2043 resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
2044 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
2045 XFS_DATA_FORK) +
2046 XFS_SWAP_RMAP_SPACE_RES(mp,
2047 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
2048 XFS_DATA_FORK);
Brian Foster2dd3d702017-08-29 10:08:40 -07002049 }
2050 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10002051 if (error)
Dave Chinnera133d952013-08-12 20:49:48 +10002052 goto out_unlock;
Dave Chinner723cac42015-02-23 21:47:29 +11002053
2054 /*
2055 * Lock and join the inodes to the tansaction so that transaction commit
2056 * or cancel will unlock the inodes from this point onwards.
2057 */
Dave Chinner4ef897a2014-08-04 13:44:08 +10002058 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
2059 lock_flags |= XFS_ILOCK_EXCL;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002060 xfs_trans_ijoin(tp, ip, 0);
2061 xfs_trans_ijoin(tp, tip, 0);
Dave Chinner723cac42015-02-23 21:47:29 +11002062
Dave Chinnera133d952013-08-12 20:49:48 +10002063
2064 /* Verify all data are being swapped */
2065 if (sxp->sx_offset != 0 ||
2066 sxp->sx_length != ip->i_d.di_size ||
2067 sxp->sx_length != tip->i_d.di_size) {
Dave Chinner24513372014-06-25 14:58:08 +10002068 error = -EFAULT;
Dave Chinner4ef897a2014-08-04 13:44:08 +10002069 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002070 }
2071
2072 trace_xfs_swap_extent_before(ip, 0);
2073 trace_xfs_swap_extent_before(tip, 1);
2074
2075 /* check inode formats now that data is flushed */
2076 error = xfs_swap_extents_check_format(ip, tip);
2077 if (error) {
2078 xfs_notice(mp,
2079 "%s: inode 0x%llx format is incompatible for exchanging.",
2080 __func__, ip->i_ino);
Dave Chinner4ef897a2014-08-04 13:44:08 +10002081 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002082 }
2083
2084 /*
2085 * Compare the current change & modify times with that
2086 * passed in. If they differ, we abort this swap.
2087 * This is the mechanism used to ensure the calling
2088 * process that the file was not changed out from
2089 * under it.
2090 */
2091 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
2092 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
2093 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
2094 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
Dave Chinner24513372014-06-25 14:58:08 +10002095 error = -EBUSY;
Dave Chinner81217682014-08-04 13:29:32 +10002096 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002097 }
Dave Chinnera133d952013-08-12 20:49:48 +10002098
Dave Chinner21b5c972013-08-30 10:23:44 +10002099 /*
Dave Chinner21b5c972013-08-30 10:23:44 +10002100 * Note the trickiness in setting the log flags - we set the owner log
2101 * flag on the opposite inode (i.e. the inode we are setting the new
2102 * owner to be) because once we swap the forks and log that, log
2103 * recovery is going to see the fork as owned by the swapped inode,
2104 * not the pre-swapped inodes.
2105 */
2106 src_log_flags = XFS_ILOG_CORE;
2107 target_log_flags = XFS_ILOG_CORE;
Dave Chinner21b5c972013-08-30 10:23:44 +10002108
Darrick J. Wong1f08af52016-10-03 09:11:53 -07002109 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2110 error = xfs_swap_extent_rmap(&tp, ip, tip);
2111 else
2112 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
2113 &target_log_flags);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002114 if (error)
2115 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002116
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002117 /* Do we have to swap reflink flags? */
2118 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
2119 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
2120 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2121 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2122 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2123 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2124 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
Darrick J. Wong52bfcdd2017-09-18 09:41:18 -07002125 }
2126
2127 /* Swap the cow forks. */
2128 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
2129 xfs_extnum_t extnum;
2130
2131 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2132 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2133
2134 extnum = ip->i_cnextents;
2135 ip->i_cnextents = tip->i_cnextents;
2136 tip->i_cnextents = extnum;
2137
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002138 cowfp = ip->i_cowfp;
2139 ip->i_cowfp = tip->i_cowfp;
2140 tip->i_cowfp = cowfp;
Darrick J. Wong52bfcdd2017-09-18 09:41:18 -07002141
2142 if (ip->i_cowfp && ip->i_cnextents)
2143 xfs_inode_set_cowblocks_tag(ip);
2144 else
2145 xfs_inode_clear_cowblocks_tag(ip);
2146 if (tip->i_cowfp && tip->i_cnextents)
2147 xfs_inode_set_cowblocks_tag(tip);
2148 else
2149 xfs_inode_clear_cowblocks_tag(tip);
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002150 }
2151
Dave Chinnera133d952013-08-12 20:49:48 +10002152 xfs_trans_log_inode(tp, ip, src_log_flags);
2153 xfs_trans_log_inode(tp, tip, target_log_flags);
2154
2155 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07002156 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2157 * have inode number owner values in the bmbt blocks that still refer to
2158 * the old inode. Scan each bmbt to fix up the owner values with the
2159 * inode number of the current inode.
2160 */
2161 if (src_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002162 error = xfs_swap_change_owner(&tp, ip, tip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002163 if (error)
2164 goto out_trans_cancel;
2165 }
2166 if (target_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002167 error = xfs_swap_change_owner(&tp, tip, ip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002168 if (error)
2169 goto out_trans_cancel;
2170 }
2171
2172 /*
Dave Chinnera133d952013-08-12 20:49:48 +10002173 * If this is a synchronous mount, make sure that the
2174 * transaction goes to disk before returning to the user.
2175 */
2176 if (mp->m_flags & XFS_MOUNT_WSYNC)
2177 xfs_trans_set_sync(tp);
2178
Christoph Hellwig70393312015-06-04 13:48:08 +10002179 error = xfs_trans_commit(tp);
Dave Chinnera133d952013-08-12 20:49:48 +10002180
2181 trace_xfs_swap_extent_after(ip, 0);
2182 trace_xfs_swap_extent_after(tip, 1);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002183
Christoph Hellwig65523212016-11-30 14:33:25 +11002184out_unlock:
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002185 xfs_iunlock(ip, lock_flags);
2186 xfs_iunlock(tip, lock_flags);
Christoph Hellwig65523212016-11-30 14:33:25 +11002187 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
Dave Chinnera133d952013-08-12 20:49:48 +10002188 return error;
2189
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002190out_trans_cancel:
2191 xfs_trans_cancel(tp);
Christoph Hellwig65523212016-11-30 14:33:25 +11002192 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10002193}