blob: 170b74c7f2d564bd61136271c7ca43370ac6c3e9 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110021#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100026#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110027#include "xfs_da_format.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100028#include "xfs_defer.h"
Dave Chinner68988112013-08-12 20:49:42 +100029#include "xfs_inode.h"
30#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110031#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100032#include "xfs_extfree_item.h"
33#include "xfs_alloc.h"
34#include "xfs_bmap.h"
35#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110036#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100037#include "xfs_rtalloc.h"
38#include "xfs_error.h"
39#include "xfs_quota.h"
40#include "xfs_trans_space.h"
41#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100042#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110043#include "xfs_log.h"
Darrick J. Wong9c194642016-08-03 12:16:05 +100044#include "xfs_rmap_btree.h"
Darrick J. Wongf86f4032016-10-03 09:11:41 -070045#include "xfs_iomap.h"
46#include "xfs_reflink.h"
47#include "xfs_refcount.h"
Dave Chinner68988112013-08-12 20:49:42 +100048
49/* Kernel only BMAP related definitions and functions */
50
51/*
52 * Convert the given file system block to a disk block. We have to treat it
53 * differently based on whether the file is a real time file or not, because the
54 * bmap code does.
55 */
56xfs_daddr_t
57xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58{
59 return (XFS_IS_REALTIME_INODE(ip) ? \
60 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62}
63
64/*
Dave Chinner3fbbbea2015-11-03 12:27:22 +110065 * Routine to zero an extent on disk allocated to the specific inode.
66 *
67 * The VFS functions take a linearised filesystem block offset, so we have to
68 * convert the sparse xfs fsb to the right format first.
69 * VFS types are real funky, too.
70 */
71int
72xfs_zero_extent(
73 struct xfs_inode *ip,
74 xfs_fsblock_t start_fsb,
75 xfs_off_t count_fsb)
76{
77 struct xfs_mount *mp = ip->i_mount;
78 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
79 sector_t block = XFS_BB_TO_FSBT(mp, sector);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110080
Matthew Wilcox3dc29162016-03-15 11:20:41 -060081 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 block << (mp->m_super->s_blocksize_bits - 9),
83 count_fsb << (mp->m_super->s_blocksize_bits - 9),
Christoph Hellwigee472d82017-04-05 19:21:08 +020084 GFP_NOFS, 0);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110085}
86
Dave Chinnerbb9c2e52017-10-09 11:37:22 -070087#ifdef CONFIG_XFS_RT
Dave Chinner68988112013-08-12 20:49:42 +100088int
89xfs_bmap_rtalloc(
90 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
91{
Dave Chinner68988112013-08-12 20:49:42 +100092 int error; /* error return value */
93 xfs_mount_t *mp; /* mount point structure */
94 xfs_extlen_t prod = 0; /* product factor for allocators */
95 xfs_extlen_t ralen = 0; /* realtime allocation length */
96 xfs_extlen_t align; /* minimum allocation alignment */
97 xfs_rtblock_t rtb;
98
99 mp = ap->ip->i_mount;
100 align = xfs_get_extsz_hint(ap->ip);
101 prod = align / mp->m_sb.sb_rextsize;
102 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
103 align, 1, ap->eof, 0,
104 ap->conv, &ap->offset, &ap->length);
105 if (error)
106 return error;
107 ASSERT(ap->length);
108 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
109
110 /*
111 * If the offset & length are not perfectly aligned
112 * then kill prod, it will just get us in trouble.
113 */
114 if (do_mod(ap->offset, align) || ap->length % align)
115 prod = 1;
116 /*
117 * Set ralen to be the actual requested length in rtextents.
118 */
119 ralen = ap->length / mp->m_sb.sb_rextsize;
120 /*
121 * If the old value was close enough to MAXEXTLEN that
122 * we rounded up to it, cut it back so it's valid again.
123 * Note that if it's a really large request (bigger than
124 * MAXEXTLEN), we don't hear about that number, and can't
125 * adjust the starting point to match it.
126 */
127 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
128 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
129
130 /*
Dave Chinner4b680af2016-02-08 10:46:51 +1100131 * Lock out modifications to both the RT bitmap and summary inodes
Dave Chinner68988112013-08-12 20:49:42 +1000132 */
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000133 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
Dave Chinner68988112013-08-12 20:49:42 +1000134 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000135 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
Dave Chinner4b680af2016-02-08 10:46:51 +1100136 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
Dave Chinner68988112013-08-12 20:49:42 +1000137
138 /*
139 * If it's an allocation to an empty file at offset 0,
140 * pick an extent that will space things out in the rt area.
141 */
142 if (ap->eof && ap->offset == 0) {
143 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
144
145 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
146 if (error)
147 return error;
148 ap->blkno = rtx * mp->m_sb.sb_rextsize;
149 } else {
150 ap->blkno = 0;
151 }
152
153 xfs_bmap_adjacent(ap);
154
155 /*
156 * Realtime allocation, done through xfs_rtallocate_extent.
157 */
Dave Chinner68988112013-08-12 20:49:42 +1000158 do_div(ap->blkno, mp->m_sb.sb_rextsize);
159 rtb = ap->blkno;
160 ap->length = ralen;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800161 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
162 &ralen, ap->wasdel, prod, &rtb);
163 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000164 return error;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800165
Dave Chinner68988112013-08-12 20:49:42 +1000166 ap->blkno = rtb;
167 if (ap->blkno != NULLFSBLOCK) {
168 ap->blkno *= mp->m_sb.sb_rextsize;
169 ralen *= mp->m_sb.sb_rextsize;
170 ap->length = ralen;
171 ap->ip->i_d.di_nblocks += ralen;
172 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
173 if (ap->wasdel)
174 ap->ip->i_delayed_blks -= ralen;
175 /*
176 * Adjust the disk quota also. This was reserved
177 * earlier.
178 */
179 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
180 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
181 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100182
183 /* Zero the extent if we were asked to do so */
Dave Chinner292378e2016-09-26 08:21:28 +1000184 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100185 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
186 if (error)
187 return error;
188 }
Dave Chinner68988112013-08-12 20:49:42 +1000189 } else {
190 ap->length = 0;
191 }
192 return 0;
193}
Dave Chinnerbb9c2e52017-10-09 11:37:22 -0700194#endif /* CONFIG_XFS_RT */
Dave Chinner68988112013-08-12 20:49:42 +1000195
196/*
Dave Chinner68988112013-08-12 20:49:42 +1000197 * Check if the endoff is outside the last extent. If so the caller will grow
198 * the allocation to a stripe unit boundary. All offsets are considered outside
199 * the end of file for an empty fork, so 1 is returned in *eof in that case.
200 */
201int
202xfs_bmap_eof(
203 struct xfs_inode *ip,
204 xfs_fileoff_t endoff,
205 int whichfork,
206 int *eof)
207{
208 struct xfs_bmbt_irec rec;
209 int error;
210
211 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
212 if (error || *eof)
213 return error;
214
215 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
216 return 0;
217}
218
219/*
220 * Extent tree block counting routines.
221 */
222
223/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700224 * Count leaf blocks given a range of extent records. Delayed allocation
225 * extents are not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000226 */
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700227xfs_extnum_t
Dave Chinner68988112013-08-12 20:49:42 +1000228xfs_bmap_count_leaves(
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700229 struct xfs_ifork *ifp,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700230 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000231{
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700232 struct xfs_bmbt_irec got;
233 xfs_extnum_t numrecs = 0, i = 0;
Dave Chinner68988112013-08-12 20:49:42 +1000234
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700235 while (xfs_iext_get_extent(ifp, i++, &got)) {
236 if (!isnullstartblock(got.br_startblock)) {
237 *count += got.br_blockcount;
238 numrecs++;
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700239 }
Dave Chinner68988112013-08-12 20:49:42 +1000240 }
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700241 return numrecs;
Dave Chinner68988112013-08-12 20:49:42 +1000242}
243
244/*
245 * Count leaf blocks given a range of extent records originally
246 * in btree format.
247 */
248STATIC void
249xfs_bmap_disk_count_leaves(
250 struct xfs_mount *mp,
251 struct xfs_btree_block *block,
252 int numrecs,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700253 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000254{
255 int b;
256 xfs_bmbt_rec_t *frp;
257
258 for (b = 1; b <= numrecs; b++) {
259 frp = XFS_BMBT_REC_ADDR(mp, block, b);
260 *count += xfs_bmbt_disk_get_blockcount(frp);
261 }
262}
263
264/*
265 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000266 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000267 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700268STATIC int
Dave Chinner68988112013-08-12 20:49:42 +1000269xfs_bmap_count_tree(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700270 struct xfs_mount *mp,
271 struct xfs_trans *tp,
272 struct xfs_ifork *ifp,
273 xfs_fsblock_t blockno,
274 int levelin,
275 xfs_extnum_t *nextents,
276 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000277{
278 int error;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700279 struct xfs_buf *bp, *nbp;
Dave Chinner68988112013-08-12 20:49:42 +1000280 int level = levelin;
281 __be64 *pp;
282 xfs_fsblock_t bno = blockno;
283 xfs_fsblock_t nextbno;
284 struct xfs_btree_block *block, *nextblock;
285 int numrecs;
286
287 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
288 &xfs_bmbt_buf_ops);
289 if (error)
290 return error;
291 *count += 1;
292 block = XFS_BUF_TO_BLOCK(bp);
293
294 if (--level) {
295 /* Not at node above leaves, count this level of nodes */
296 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
297 while (nextbno != NULLFSBLOCK) {
298 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
299 XFS_BMAP_BTREE_REF,
300 &xfs_bmbt_buf_ops);
301 if (error)
302 return error;
303 *count += 1;
304 nextblock = XFS_BUF_TO_BLOCK(nbp);
305 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
306 xfs_trans_brelse(tp, nbp);
307 }
308
309 /* Dive to the next level */
310 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
311 bno = be64_to_cpu(*pp);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700312 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
313 count);
314 if (error) {
Dave Chinner68988112013-08-12 20:49:42 +1000315 xfs_trans_brelse(tp, bp);
316 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
317 XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000318 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000319 }
320 xfs_trans_brelse(tp, bp);
321 } else {
322 /* count all level 1 nodes and their leaves */
323 for (;;) {
324 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
325 numrecs = be16_to_cpu(block->bb_numrecs);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700326 (*nextents) += numrecs;
Dave Chinner68988112013-08-12 20:49:42 +1000327 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
328 xfs_trans_brelse(tp, bp);
329 if (nextbno == NULLFSBLOCK)
330 break;
331 bno = nextbno;
332 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
333 XFS_BMAP_BTREE_REF,
334 &xfs_bmbt_buf_ops);
335 if (error)
336 return error;
337 *count += 1;
338 block = XFS_BUF_TO_BLOCK(bp);
339 }
340 }
341 return 0;
342}
343
344/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700345 * Count fsblocks of the given fork. Delayed allocation extents are
346 * not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000347 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700348int
Dave Chinner68988112013-08-12 20:49:42 +1000349xfs_bmap_count_blocks(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700350 struct xfs_trans *tp,
351 struct xfs_inode *ip,
352 int whichfork,
353 xfs_extnum_t *nextents,
354 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000355{
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700356 struct xfs_mount *mp; /* file system mount structure */
Dave Chinner68988112013-08-12 20:49:42 +1000357 __be64 *pp; /* pointer to block address */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700358 struct xfs_btree_block *block; /* current btree block */
359 struct xfs_ifork *ifp; /* fork structure */
360 xfs_fsblock_t bno; /* block # of "block" */
361 int level; /* btree level, for checking */
362 int error;
Dave Chinner68988112013-08-12 20:49:42 +1000363
364 bno = NULLFSBLOCK;
365 mp = ip->i_mount;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700366 *nextents = 0;
367 *count = 0;
Dave Chinner68988112013-08-12 20:49:42 +1000368 ifp = XFS_IFORK_PTR(ip, whichfork);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700369 if (!ifp)
Dave Chinner68988112013-08-12 20:49:42 +1000370 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000371
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700372 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
373 case XFS_DINODE_FMT_EXTENTS:
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700374 *nextents = xfs_bmap_count_leaves(ifp, count);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700375 return 0;
376 case XFS_DINODE_FMT_BTREE:
377 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
378 error = xfs_iread_extents(tp, ip, whichfork);
379 if (error)
380 return error;
381 }
Dave Chinner68988112013-08-12 20:49:42 +1000382
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700383 /*
384 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
385 */
386 block = ifp->if_broot;
387 level = be16_to_cpu(block->bb_level);
388 ASSERT(level > 0);
389 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
390 bno = be64_to_cpu(*pp);
391 ASSERT(bno != NULLFSBLOCK);
392 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
393 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
394
395 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
396 nextents, count);
397 if (error) {
398 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
399 XFS_ERRLEVEL_LOW, mp);
400 return -EFSCORRUPTED;
401 }
402 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000403 }
404
405 return 0;
406}
407
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700408static int
409xfs_getbmap_report_one(
410 struct xfs_inode *ip,
411 struct getbmapx *bmv,
Christoph Hellwig232b51942017-10-17 14:16:19 -0700412 struct kgetbmap *out,
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700413 int64_t bmv_end,
414 struct xfs_bmbt_irec *got)
Dave Chinner68988112013-08-12 20:49:42 +1000415{
Christoph Hellwig232b51942017-10-17 14:16:19 -0700416 struct kgetbmap *p = out + bmv->bmv_entries;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700417 bool shared = false, trimmed = false;
418 int error;
Dave Chinner68988112013-08-12 20:49:42 +1000419
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700420 error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700421 if (error)
422 return error;
423
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700424 if (isnullstartblock(got->br_startblock) ||
425 got->br_startblock == DELAYSTARTBLOCK) {
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700426 /*
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700427 * Delalloc extents that start beyond EOF can occur due to
428 * speculative EOF allocation when the delalloc extent is larger
429 * than the largest freespace extent at conversion time. These
430 * extents cannot be converted by data writeback, so can exist
431 * here even if we are not supposed to be finding delalloc
432 * extents.
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700433 */
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700434 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
435 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
436
437 p->bmv_oflags |= BMV_OF_DELALLOC;
438 p->bmv_block = -2;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700439 } else {
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700440 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700441 }
442
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700443 if (got->br_state == XFS_EXT_UNWRITTEN &&
444 (bmv->bmv_iflags & BMV_IF_PREALLOC))
445 p->bmv_oflags |= BMV_OF_PREALLOC;
446
447 if (shared)
448 p->bmv_oflags |= BMV_OF_SHARED;
449
450 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
451 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
452
453 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
454 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
455 bmv->bmv_entries++;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700456 return 0;
457}
458
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700459static void
460xfs_getbmap_report_hole(
461 struct xfs_inode *ip,
462 struct getbmapx *bmv,
Christoph Hellwig232b51942017-10-17 14:16:19 -0700463 struct kgetbmap *out,
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700464 int64_t bmv_end,
465 xfs_fileoff_t bno,
466 xfs_fileoff_t end)
467{
Christoph Hellwig232b51942017-10-17 14:16:19 -0700468 struct kgetbmap *p = out + bmv->bmv_entries;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700469
470 if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
471 return;
472
473 p->bmv_block = -1;
474 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
475 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
476
477 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
478 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
479 bmv->bmv_entries++;
480}
481
482static inline bool
483xfs_getbmap_full(
484 struct getbmapx *bmv)
485{
486 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
487}
488
489static bool
490xfs_getbmap_next_rec(
491 struct xfs_bmbt_irec *rec,
492 xfs_fileoff_t total_end)
493{
494 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
495
496 if (end == total_end)
497 return false;
498
499 rec->br_startoff += rec->br_blockcount;
500 if (!isnullstartblock(rec->br_startblock) &&
501 rec->br_startblock != DELAYSTARTBLOCK)
502 rec->br_startblock += rec->br_blockcount;
503 rec->br_blockcount = total_end - end;
504 return true;
505}
506
Dave Chinner68988112013-08-12 20:49:42 +1000507/*
508 * Get inode's extents as described in bmv, and format for output.
509 * Calls formatter to fill the user's buffer until all extents
510 * are mapped, until the passed-in bmv->bmv_count slots have
511 * been filled, or until the formatter short-circuits the loop,
512 * if it is tracking filled-in extents on its own.
513 */
514int /* error code */
515xfs_getbmap(
Christoph Hellwig232b51942017-10-17 14:16:19 -0700516 struct xfs_inode *ip,
Dave Chinner68988112013-08-12 20:49:42 +1000517 struct getbmapx *bmv, /* user bmap structure */
Christoph Hellwig232b51942017-10-17 14:16:19 -0700518 struct kgetbmap *out)
Dave Chinner68988112013-08-12 20:49:42 +1000519{
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700520 struct xfs_mount *mp = ip->i_mount;
521 int iflags = bmv->bmv_iflags;
Christoph Hellwig232b51942017-10-17 14:16:19 -0700522 int whichfork, lock, error = 0;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700523 int64_t bmv_end, max_len;
524 xfs_fileoff_t bno, first_bno;
525 struct xfs_ifork *ifp;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700526 struct xfs_bmbt_irec got, rec;
527 xfs_filblks_t len;
528 xfs_extnum_t idx;
Dave Chinner68988112013-08-12 20:49:42 +1000529
Christoph Hellwig232b51942017-10-17 14:16:19 -0700530 if (bmv->bmv_iflags & ~BMV_IF_VALID)
531 return -EINVAL;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700532#ifndef DEBUG
533 /* Only allow CoW fork queries if we're debugging. */
534 if (iflags & BMV_IF_COWFORK)
535 return -EINVAL;
536#endif
537 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
538 return -EINVAL;
539
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700540 if (bmv->bmv_length < -1)
541 return -EINVAL;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700542 bmv->bmv_entries = 0;
543 if (bmv->bmv_length == 0)
544 return 0;
545
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700546 if (iflags & BMV_IF_ATTRFORK)
547 whichfork = XFS_ATTR_FORK;
548 else if (iflags & BMV_IF_COWFORK)
549 whichfork = XFS_COW_FORK;
550 else
551 whichfork = XFS_DATA_FORK;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700552 ifp = XFS_IFORK_PTR(ip, whichfork);
Dave Chinner68988112013-08-12 20:49:42 +1000553
554 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700555 switch (whichfork) {
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700556 case XFS_ATTR_FORK:
557 if (!XFS_IFORK_Q(ip))
558 goto out_unlock_iolock;
559
560 max_len = 1LL << 32;
561 lock = xfs_ilock_attr_map_shared(ip);
562 break;
563 case XFS_COW_FORK:
564 /* No CoW fork? Just return */
565 if (!ifp)
566 goto out_unlock_iolock;
567
568 if (xfs_get_cowextsz_hint(ip))
569 max_len = mp->m_super->s_maxbytes;
570 else
571 max_len = XFS_ISIZE(ip);
572
573 lock = XFS_ILOCK_SHARED;
574 xfs_ilock(ip, lock);
575 break;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700576 case XFS_DATA_FORK:
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800577 if (!(iflags & BMV_IF_DELALLOC) &&
578 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner24513372014-06-25 14:58:08 +1000579 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
Dave Chinner68988112013-08-12 20:49:42 +1000580 if (error)
581 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000582
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800583 /*
584 * Even after flushing the inode, there can still be
585 * delalloc blocks on the inode beyond EOF due to
586 * speculative preallocation. These are not removed
587 * until the release function is called or the inode
588 * is inactivated. Hence we cannot assert here that
589 * ip->i_delayed_blks == 0.
590 */
591 }
592
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700593 if (xfs_get_extsz_hint(ip) ||
594 (ip->i_d.di_flags &
595 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
596 max_len = mp->m_super->s_maxbytes;
597 else
598 max_len = XFS_ISIZE(ip);
599
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800600 lock = xfs_ilock_data_map_shared(ip);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700601 break;
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800602 }
Dave Chinner68988112013-08-12 20:49:42 +1000603
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700604 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
605 case XFS_DINODE_FMT_EXTENTS:
606 case XFS_DINODE_FMT_BTREE:
607 break;
608 case XFS_DINODE_FMT_LOCAL:
609 /* Local format inode forks report no extents. */
Dave Chinner68988112013-08-12 20:49:42 +1000610 goto out_unlock_ilock;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700611 default:
612 error = -EINVAL;
613 goto out_unlock_ilock;
Dave Chinner68988112013-08-12 20:49:42 +1000614 }
615
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700616 if (bmv->bmv_length == -1) {
617 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
618 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
619 }
620
621 bmv_end = bmv->bmv_offset + bmv->bmv_length;
622
623 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
624 len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
625
626 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
627 error = xfs_iread_extents(NULL, ip, whichfork);
Dave Chinner68988112013-08-12 20:49:42 +1000628 if (error)
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700629 goto out_unlock_ilock;
630 }
Dave Chinner68988112013-08-12 20:49:42 +1000631
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700632 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) {
633 /*
634 * Report a whole-file hole if the delalloc flag is set to
635 * stay compatible with the old implementation.
636 */
637 if (iflags & BMV_IF_DELALLOC)
638 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
639 XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
640 goto out_unlock_ilock;
641 }
Dave Chinner68988112013-08-12 20:49:42 +1000642
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700643 while (!xfs_getbmap_full(bmv)) {
644 xfs_trim_extent(&got, first_bno, len);
Dave Chinner68988112013-08-12 20:49:42 +1000645
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700646 /*
647 * Report an entry for a hole if this extent doesn't directly
648 * follow the previous one.
649 */
650 if (got.br_startoff > bno) {
651 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
652 got.br_startoff);
653 if (xfs_getbmap_full(bmv))
654 break;
Dave Chinner68988112013-08-12 20:49:42 +1000655 }
Dave Chinner68988112013-08-12 20:49:42 +1000656
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700657 /*
658 * In order to report shared extents accurately, we report each
659 * distinct shared / unshared part of a single bmbt record with
660 * an individual getbmapx record.
661 */
662 bno = got.br_startoff + got.br_blockcount;
663 rec = got;
664 do {
665 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
666 &rec);
667 if (error || xfs_getbmap_full(bmv))
668 goto out_unlock_ilock;
669 } while (xfs_getbmap_next_rec(&rec, bno));
670
671 if (!xfs_iext_get_extent(ifp, ++idx, &got)) {
672 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
673
674 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
675
676 if (whichfork != XFS_ATTR_FORK && bno < end &&
677 !xfs_getbmap_full(bmv)) {
678 xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
679 bno, end);
680 }
681 break;
682 }
683
684 if (bno >= first_bno + len)
685 break;
686 }
687
688out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800689 xfs_iunlock(ip, lock);
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700690out_unlock_iolock:
Dave Chinner68988112013-08-12 20:49:42 +1000691 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Dave Chinner68988112013-08-12 20:49:42 +1000692 return error;
693}
694
695/*
696 * dead simple method of punching delalyed allocation blocks from a range in
697 * the inode. Walks a block at a time so will be slow, but is only executed in
Zhi Yong Wuad4809b2013-08-12 03:14:55 +0000698 * rare error cases so the overhead is not critical. This will always punch out
Dave Chinner68988112013-08-12 20:49:42 +1000699 * both the start and end blocks, even if the ranges only partially overlap
700 * them, so it is up to the caller to ensure that partial blocks are not
701 * passed in.
702 */
703int
704xfs_bmap_punch_delalloc_range(
705 struct xfs_inode *ip,
706 xfs_fileoff_t start_fsb,
707 xfs_fileoff_t length)
708{
709 xfs_fileoff_t remaining = length;
710 int error = 0;
711
712 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
713
714 do {
715 int done;
716 xfs_bmbt_irec_t imap;
717 int nimaps = 1;
718 xfs_fsblock_t firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000719 struct xfs_defer_ops dfops;
Dave Chinner68988112013-08-12 20:49:42 +1000720
721 /*
722 * Map the range first and check that it is a delalloc extent
723 * before trying to unmap the range. Otherwise we will be
724 * trying to remove a real extent (which requires a
725 * transaction) or a hole, which is probably a bad idea...
726 */
727 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
728 XFS_BMAPI_ENTIRE);
729
730 if (error) {
731 /* something screwed, just bail */
732 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
733 xfs_alert(ip->i_mount,
734 "Failed delalloc mapping lookup ino %lld fsb %lld.",
735 ip->i_ino, start_fsb);
736 }
737 break;
738 }
739 if (!nimaps) {
740 /* nothing there */
741 goto next_block;
742 }
743 if (imap.br_startblock != DELAYSTARTBLOCK) {
744 /* been converted, ignore */
745 goto next_block;
746 }
747 WARN_ON(imap.br_blockcount == 0);
748
749 /*
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000750 * Note: while we initialise the firstblock/dfops pair, they
Dave Chinner68988112013-08-12 20:49:42 +1000751 * should never be used because blocks should never be
752 * allocated or freed for a delalloc extent and hence we need
753 * don't cancel or finish them after the xfs_bunmapi() call.
754 */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000755 xfs_defer_init(&dfops, &firstblock);
Dave Chinner68988112013-08-12 20:49:42 +1000756 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000757 &dfops, &done);
Dave Chinner68988112013-08-12 20:49:42 +1000758 if (error)
759 break;
760
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000761 ASSERT(!xfs_defer_has_unfinished_work(&dfops));
Dave Chinner68988112013-08-12 20:49:42 +1000762next_block:
763 start_fsb++;
764 remaining--;
765 } while(remaining > 0);
766
767 return error;
768}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000769
770/*
771 * Test whether it is appropriate to check an inode for and free post EOF
772 * blocks. The 'force' parameter determines whether we should also consider
773 * regular files that are marked preallocated or append-only.
774 */
775bool
776xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
777{
778 /* prealloc/delalloc exists only on regular files */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100779 if (!S_ISREG(VFS_I(ip)->i_mode))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000780 return false;
781
782 /*
783 * Zero sized files with no cached pages and delalloc blocks will not
784 * have speculative prealloc/delalloc blocks to remove.
785 */
786 if (VFS_I(ip)->i_size == 0 &&
Dave Chinner2667c6f2014-08-04 13:23:15 +1000787 VFS_I(ip)->i_mapping->nrpages == 0 &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000788 ip->i_delayed_blks == 0)
789 return false;
790
791 /* If we haven't read in the extent list, then don't do it now. */
792 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
793 return false;
794
795 /*
796 * Do not free real preallocated or append-only files unless the file
797 * has delalloc blocks and we are forced to remove them.
798 */
799 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
800 if (!force || ip->i_delayed_blks == 0)
801 return false;
802
803 return true;
804}
805
806/*
Brian Foster3b4683c2017-04-11 10:50:05 -0700807 * This is called to free any blocks beyond eof. The caller must hold
808 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
809 * reference to the inode.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000810 */
811int
812xfs_free_eofblocks(
Brian Fostera36b9262017-01-27 23:22:55 -0800813 struct xfs_inode *ip)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000814{
Brian Fostera36b9262017-01-27 23:22:55 -0800815 struct xfs_trans *tp;
816 int error;
817 xfs_fileoff_t end_fsb;
818 xfs_fileoff_t last_fsb;
819 xfs_filblks_t map_len;
820 int nimaps;
821 struct xfs_bmbt_irec imap;
822 struct xfs_mount *mp = ip->i_mount;
823
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000824 /*
825 * Figure out if there are any blocks beyond the end
826 * of the file. If not, then there is nothing to do.
827 */
828 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
829 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
830 if (last_fsb <= end_fsb)
831 return 0;
832 map_len = last_fsb - end_fsb;
833
834 nimaps = 1;
835 xfs_ilock(ip, XFS_ILOCK_SHARED);
836 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
837 xfs_iunlock(ip, XFS_ILOCK_SHARED);
838
Brian Fostera36b9262017-01-27 23:22:55 -0800839 /*
840 * If there are blocks after the end of file, truncate the file to its
841 * current size to free them up.
842 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000843 if (!error && (nimaps != 0) &&
844 (imap.br_startblock != HOLESTARTBLOCK ||
845 ip->i_delayed_blks)) {
846 /*
847 * Attach the dquots to the inode up front.
848 */
849 error = xfs_qm_dqattach(ip, 0);
850 if (error)
851 return error;
852
Brian Fostere4229d6b2017-01-27 23:22:57 -0800853 /* wait on dio to ensure i_size has settled */
854 inode_dio_wait(VFS_I(ip));
855
Christoph Hellwig253f4912016-04-06 09:19:55 +1000856 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
857 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000858 if (error) {
859 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000860 return error;
861 }
862
863 xfs_ilock(ip, XFS_ILOCK_EXCL);
864 xfs_trans_ijoin(tp, ip, 0);
865
866 /*
867 * Do not update the on-disk file size. If we update the
868 * on-disk file size and then the system crashes before the
869 * contents of the file are flushed to disk then the files
870 * may be full of holes (ie NULL files bug).
871 */
872 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
873 XFS_ISIZE(ip));
874 if (error) {
875 /*
876 * If we get an error at this point we simply don't
877 * bother truncating the file.
878 */
Christoph Hellwig4906e212015-06-04 13:47:56 +1000879 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000880 } else {
Christoph Hellwig70393312015-06-04 13:48:08 +1000881 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000882 if (!error)
883 xfs_inode_clear_eofblocks_tag(ip);
884 }
885
886 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000887 }
888 return error;
889}
890
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700891int
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000892xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700893 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000894 xfs_off_t offset,
895 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -0700896 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000897{
898 xfs_mount_t *mp = ip->i_mount;
899 xfs_off_t count;
900 xfs_filblks_t allocated_fsb;
901 xfs_filblks_t allocatesize_fsb;
902 xfs_extlen_t extsz, temp;
903 xfs_fileoff_t startoffset_fsb;
904 xfs_fsblock_t firstfsb;
905 int nimaps;
906 int quota_flag;
907 int rt;
908 xfs_trans_t *tp;
909 xfs_bmbt_irec_t imaps[1], *imapp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000910 struct xfs_defer_ops dfops;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000911 uint qblocks, resblks, resrtextents;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000912 int error;
913
914 trace_xfs_alloc_file_space(ip);
915
916 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +1000917 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000918
919 error = xfs_qm_dqattach(ip, 0);
920 if (error)
921 return error;
922
923 if (len <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000924 return -EINVAL;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000925
926 rt = XFS_IS_REALTIME_INODE(ip);
927 extsz = xfs_get_extsz_hint(ip);
928
929 count = len;
930 imapp = &imaps[0];
931 nimaps = 1;
932 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
933 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
934
935 /*
936 * Allocate file space until done or until there is an error
937 */
938 while (allocatesize_fsb && !error) {
939 xfs_fileoff_t s, e;
940
941 /*
942 * Determine space reservations for data/realtime.
943 */
944 if (unlikely(extsz)) {
945 s = startoffset_fsb;
946 do_div(s, extsz);
947 s *= extsz;
948 e = startoffset_fsb + allocatesize_fsb;
949 if ((temp = do_mod(startoffset_fsb, extsz)))
950 e += temp;
951 if ((temp = do_mod(e, extsz)))
952 e += extsz - temp;
953 } else {
954 s = 0;
955 e = allocatesize_fsb;
956 }
957
958 /*
959 * The transaction reservation is limited to a 32-bit block
960 * count, hence we need to limit the number of blocks we are
961 * trying to reserve to avoid an overflow. We can't allocate
962 * more than @nimaps extents, and an extent is limited on disk
963 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
964 */
965 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
966 if (unlikely(rt)) {
967 resrtextents = qblocks = resblks;
968 resrtextents /= mp->m_sb.sb_rextsize;
969 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
970 quota_flag = XFS_QMOPT_RES_RTBLKS;
971 } else {
972 resrtextents = 0;
973 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
974 quota_flag = XFS_QMOPT_RES_REGBLKS;
975 }
976
977 /*
978 * Allocate and setup the transaction.
979 */
Christoph Hellwig253f4912016-04-06 09:19:55 +1000980 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
981 resrtextents, 0, &tp);
982
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000983 /*
984 * Check for running out of space
985 */
986 if (error) {
987 /*
988 * Free the transaction structure.
989 */
Dave Chinner24513372014-06-25 14:58:08 +1000990 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000991 break;
992 }
993 xfs_ilock(ip, XFS_ILOCK_EXCL);
994 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
995 0, quota_flag);
996 if (error)
997 goto error1;
998
999 xfs_trans_ijoin(tp, ip, 0);
1000
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001001 xfs_defer_init(&dfops, &firstfsb);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001002 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1003 allocatesize_fsb, alloc_type, &firstfsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001004 resblks, imapp, &nimaps, &dfops);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001005 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001006 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001007
1008 /*
1009 * Complete the transaction
1010 */
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001011 error = xfs_defer_finish(&tp, &dfops);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001012 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001013 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001014
Christoph Hellwig70393312015-06-04 13:48:08 +10001015 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001016 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001017 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001018 break;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001019
1020 allocated_fsb = imapp->br_blockcount;
1021
1022 if (nimaps == 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001023 error = -ENOSPC;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001024 break;
1025 }
1026
1027 startoffset_fsb += allocated_fsb;
1028 allocatesize_fsb -= allocated_fsb;
1029 }
1030
1031 return error;
1032
1033error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001034 xfs_defer_cancel(&dfops);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001035 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1036
1037error1: /* Just cancel transaction */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001038 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001039 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1040 return error;
1041}
1042
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001043static int
1044xfs_unmap_extent(
1045 struct xfs_inode *ip,
1046 xfs_fileoff_t startoffset_fsb,
1047 xfs_filblks_t len_fsb,
1048 int *done)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001049{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001050 struct xfs_mount *mp = ip->i_mount;
1051 struct xfs_trans *tp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001052 struct xfs_defer_ops dfops;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001053 xfs_fsblock_t firstfsb;
1054 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1055 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001056
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001057 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1058 if (error) {
1059 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1060 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001061 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001062
1063 xfs_ilock(ip, XFS_ILOCK_EXCL);
1064 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1065 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1066 if (error)
1067 goto out_trans_cancel;
1068
1069 xfs_trans_ijoin(tp, ip, 0);
1070
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001071 xfs_defer_init(&dfops, &firstfsb);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001072 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001073 &dfops, done);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001074 if (error)
1075 goto out_bmap_cancel;
1076
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001077 xfs_defer_ijoin(&dfops, ip);
1078 error = xfs_defer_finish(&tp, &dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001079 if (error)
1080 goto out_bmap_cancel;
1081
1082 error = xfs_trans_commit(tp);
1083out_unlock:
1084 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001085 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001086
1087out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001088 xfs_defer_cancel(&dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001089out_trans_cancel:
1090 xfs_trans_cancel(tp);
1091 goto out_unlock;
1092}
1093
1094static int
1095xfs_adjust_extent_unmap_boundaries(
1096 struct xfs_inode *ip,
1097 xfs_fileoff_t *startoffset_fsb,
1098 xfs_fileoff_t *endoffset_fsb)
1099{
1100 struct xfs_mount *mp = ip->i_mount;
1101 struct xfs_bmbt_irec imap;
1102 int nimap, error;
1103 xfs_extlen_t mod = 0;
1104
1105 nimap = 1;
1106 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1107 if (error)
1108 return error;
1109
1110 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001111 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
Eric Sandeen4f1adf32017-04-19 15:19:32 -07001112 mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001113 if (mod)
1114 *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1115 }
1116
1117 nimap = 1;
1118 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1119 if (error)
1120 return error;
1121
1122 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1123 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1124 mod++;
1125 if (mod && mod != mp->m_sb.sb_rextsize)
1126 *endoffset_fsb -= mod;
1127 }
1128
1129 return 0;
1130}
1131
1132static int
1133xfs_flush_unmap_range(
1134 struct xfs_inode *ip,
1135 xfs_off_t offset,
1136 xfs_off_t len)
1137{
1138 struct xfs_mount *mp = ip->i_mount;
1139 struct inode *inode = VFS_I(ip);
1140 xfs_off_t rounding, start, end;
1141 int error;
1142
1143 /* wait for the completion of any pending DIOs */
1144 inode_dio_wait(inode);
1145
1146 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1147 start = round_down(offset, rounding);
1148 end = round_up(offset + len, rounding) - 1;
1149
1150 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1151 if (error)
1152 return error;
1153 truncate_pagecache_range(inode, start, end);
1154 return 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001155}
1156
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001157int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001158xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001159 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001160 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001161 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001162{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001163 struct xfs_mount *mp = ip->i_mount;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001164 xfs_fileoff_t startoffset_fsb;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001165 xfs_fileoff_t endoffset_fsb;
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001166 int done = 0, error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001167
1168 trace_xfs_free_file_space(ip);
1169
1170 error = xfs_qm_dqattach(ip, 0);
1171 if (error)
1172 return error;
1173
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001174 if (len <= 0) /* if nothing being freed */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001175 return 0;
1176
1177 error = xfs_flush_unmap_range(ip, offset, len);
1178 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001179 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001180
1181 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001182 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1183
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001184 /*
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001185 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1186 * and we can't use unwritten extents then we actually need to ensure
1187 * to zero the whole extent, otherwise we just need to take of block
1188 * boundaries, and xfs_bunmapi will handle the rest.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001189 */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001190 if (XFS_IS_REALTIME_INODE(ip) &&
1191 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1192 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1193 &endoffset_fsb);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001194 if (error)
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001195 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001196 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001197
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001198 if (endoffset_fsb > startoffset_fsb) {
1199 while (!done) {
1200 error = xfs_unmap_extent(ip, startoffset_fsb,
1201 endoffset_fsb - startoffset_fsb, &done);
1202 if (error)
1203 return error;
1204 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001205 }
1206
1207 /*
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001208 * Now that we've unmap all full blocks we'll have to zero out any
1209 * partial block at the beginning and/or end. xfs_zero_range is
Calvin Owens3dd09d52017-04-03 12:22:29 -07001210 * smart enough to skip any holes, including those we just created,
1211 * but we must take care not to zero beyond EOF and enlarge i_size.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001212 */
Calvin Owens3dd09d52017-04-03 12:22:29 -07001213
1214 if (offset >= XFS_ISIZE(ip))
1215 return 0;
1216
1217 if (offset + len > XFS_ISIZE(ip))
1218 len = XFS_ISIZE(ip) - offset;
1219
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001220 return xfs_zero_range(ip, offset, len, NULL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001221}
1222
Brian Foster5d11fb42014-10-30 10:35:11 +11001223/*
1224 * Preallocate and zero a range of a file. This mechanism has the allocation
1225 * semantics of fallocate and in addition converts data in the range to zeroes.
1226 */
Christoph Hellwig865e9442013-10-12 00:55:08 -07001227int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001228xfs_zero_file_space(
1229 struct xfs_inode *ip,
1230 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001231 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001232{
1233 struct xfs_mount *mp = ip->i_mount;
Brian Foster5d11fb42014-10-30 10:35:11 +11001234 uint blksize;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001235 int error;
1236
Dave Chinner897b73b2014-04-14 18:15:11 +10001237 trace_xfs_zero_file_space(ip);
1238
Brian Foster5d11fb42014-10-30 10:35:11 +11001239 blksize = 1 << mp->m_sb.sb_blocklog;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001240
1241 /*
Brian Foster5d11fb42014-10-30 10:35:11 +11001242 * Punch a hole and prealloc the range. We use hole punch rather than
1243 * unwritten extent conversion for two reasons:
1244 *
1245 * 1.) Hole punch handles partial block zeroing for us.
1246 *
1247 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1248 * by virtue of the hole punch.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001249 */
Brian Foster5d11fb42014-10-30 10:35:11 +11001250 error = xfs_free_file_space(ip, offset, len);
1251 if (error)
1252 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001253
Brian Foster5d11fb42014-10-30 10:35:11 +11001254 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1255 round_up(offset + len, blksize) -
1256 round_down(offset, blksize),
1257 XFS_BMAPI_PREALLOC);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001258out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001259 return error;
1260
1261}
1262
kbuild test robot72c1a732015-04-13 11:25:04 +10001263static int
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001264xfs_prepare_shift(
1265 struct xfs_inode *ip,
1266 loff_t offset)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001267{
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001268 int error;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001269
Brian Fosterf71721d2014-09-23 15:39:05 +10001270 /*
1271 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1272 * into the accessible region of the file.
1273 */
Brian Foster41b9d722014-09-02 12:12:53 +10001274 if (xfs_can_free_eofblocks(ip, true)) {
Brian Fostera36b9262017-01-27 23:22:55 -08001275 error = xfs_free_eofblocks(ip);
Brian Foster41b9d722014-09-02 12:12:53 +10001276 if (error)
1277 return error;
1278 }
Dave Chinner1669a8c2014-09-02 12:12:53 +10001279
Brian Fosterf71721d2014-09-23 15:39:05 +10001280 /*
1281 * Writeback and invalidate cache for the remainder of the file as we're
Namjae Jeona904b1c2015-03-25 15:08:56 +11001282 * about to shift down every extent from offset to EOF.
Brian Fosterf71721d2014-09-23 15:39:05 +10001283 */
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001284 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
Brian Fosterf71721d2014-09-23 15:39:05 +10001285 if (error)
1286 return error;
1287 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001288 offset >> PAGE_SHIFT, -1);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001289 if (error)
1290 return error;
1291
Namjae Jeona904b1c2015-03-25 15:08:56 +11001292 /*
Darrick J. Wong3af423b2017-09-18 09:41:17 -07001293 * Clean out anything hanging around in the cow fork now that
1294 * we've flushed all the dirty data out to disk to avoid having
1295 * CoW extents at the wrong offsets.
1296 */
1297 if (xfs_is_reflink_inode(ip)) {
1298 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1299 true);
1300 if (error)
1301 return error;
1302 }
1303
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001304 return 0;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001305}
1306
1307/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001308 * xfs_collapse_file_space()
1309 * This routine frees disk space and shift extent for the given file.
1310 * The first thing we do is to free data blocks in the specified range
1311 * by calling xfs_free_file_space(). It would also sync dirty data
1312 * and invalidate page cache over the region on which collapse range
1313 * is working. And Shift extent records to the left to cover a hole.
1314 * RETURNS:
1315 * 0 on success
1316 * errno on error
1317 *
1318 */
1319int
1320xfs_collapse_file_space(
1321 struct xfs_inode *ip,
1322 xfs_off_t offset,
1323 xfs_off_t len)
1324{
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001325 struct xfs_mount *mp = ip->i_mount;
1326 struct xfs_trans *tp;
1327 int error;
1328 struct xfs_defer_ops dfops;
1329 xfs_fsblock_t first_block;
1330 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1331 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
1332 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1333 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001334 bool done = false;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001335
1336 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwig9ad1a23a2017-10-23 16:32:38 -07001337 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1338
Namjae Jeona904b1c2015-03-25 15:08:56 +11001339 trace_xfs_collapse_file_space(ip);
1340
1341 error = xfs_free_file_space(ip, offset, len);
1342 if (error)
1343 return error;
1344
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001345 error = xfs_prepare_shift(ip, offset);
1346 if (error)
1347 return error;
1348
1349 while (!error && !done) {
1350 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1351 &tp);
1352 if (error)
1353 break;
1354
1355 xfs_ilock(ip, XFS_ILOCK_EXCL);
1356 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1357 ip->i_gdquot, ip->i_pdquot, resblks, 0,
1358 XFS_QMOPT_RES_REGBLKS);
1359 if (error)
1360 goto out_trans_cancel;
1361 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1362
1363 xfs_defer_init(&dfops, &first_block);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001364 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1365 &done, stop_fsb, &first_block, &dfops);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001366 if (error)
1367 goto out_bmap_cancel;
1368
1369 error = xfs_defer_finish(&tp, &dfops);
1370 if (error)
1371 goto out_bmap_cancel;
1372 error = xfs_trans_commit(tp);
1373 }
1374
1375 return error;
1376
1377out_bmap_cancel:
1378 xfs_defer_cancel(&dfops);
1379out_trans_cancel:
1380 xfs_trans_cancel(tp);
1381 return error;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001382}
1383
1384/*
1385 * xfs_insert_file_space()
1386 * This routine create hole space by shifting extents for the given file.
1387 * The first thing we do is to sync dirty data and invalidate page cache
1388 * over the region on which insert range is working. And split an extent
1389 * to two extents at given offset by calling xfs_bmap_split_extent.
1390 * And shift all extent records which are laying between [offset,
1391 * last allocated extent] to the right to reserve hole range.
1392 * RETURNS:
1393 * 0 on success
1394 * errno on error
1395 */
1396int
1397xfs_insert_file_space(
1398 struct xfs_inode *ip,
1399 loff_t offset,
1400 loff_t len)
1401{
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001402 struct xfs_mount *mp = ip->i_mount;
1403 struct xfs_trans *tp;
1404 int error;
1405 struct xfs_defer_ops dfops;
1406 xfs_fsblock_t first_block;
1407 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
1408 xfs_fileoff_t next_fsb = NULLFSBLOCK;
1409 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001410 bool done = false;
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001411
Namjae Jeona904b1c2015-03-25 15:08:56 +11001412 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwig9ad1a23a2017-10-23 16:32:38 -07001413 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1414
Namjae Jeona904b1c2015-03-25 15:08:56 +11001415 trace_xfs_insert_file_space(ip);
1416
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001417 error = xfs_prepare_shift(ip, offset);
1418 if (error)
1419 return error;
1420
1421 /*
1422 * The extent shifting code works on extent granularity. So, if stop_fsb
1423 * is not the starting block of extent, we need to split the extent at
1424 * stop_fsb.
1425 */
1426 error = xfs_bmap_split_extent(ip, stop_fsb);
1427 if (error)
1428 return error;
1429
1430 while (!error && !done) {
1431 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1432 &tp);
1433 if (error)
1434 break;
1435
1436 xfs_ilock(ip, XFS_ILOCK_EXCL);
1437 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1438 xfs_defer_init(&dfops, &first_block);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001439 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1440 &done, stop_fsb, &first_block, &dfops);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001441 if (error)
1442 goto out_bmap_cancel;
1443
1444 error = xfs_defer_finish(&tp, &dfops);
1445 if (error)
1446 goto out_bmap_cancel;
1447 error = xfs_trans_commit(tp);
1448 }
1449
1450 return error;
1451
1452out_bmap_cancel:
1453 xfs_defer_cancel(&dfops);
1454 xfs_trans_cancel(tp);
1455 return error;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001456}
1457
1458/*
Dave Chinnera133d952013-08-12 20:49:48 +10001459 * We need to check that the format of the data fork in the temporary inode is
1460 * valid for the target inode before doing the swap. This is not a problem with
1461 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1462 * data fork depending on the space the attribute fork is taking so we can get
1463 * invalid formats on the target inode.
1464 *
1465 * E.g. target has space for 7 extents in extent format, temp inode only has
1466 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1467 * btree, but when swapped it needs to be in extent format. Hence we can't just
1468 * blindly swap data forks on attr2 filesystems.
1469 *
1470 * Note that we check the swap in both directions so that we don't end up with
1471 * a corrupt temporary inode, either.
1472 *
1473 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1474 * inode will prevent this situation from occurring, so all we do here is
1475 * reject and log the attempt. basically we are putting the responsibility on
1476 * userspace to get this right.
1477 */
1478static int
1479xfs_swap_extents_check_format(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001480 struct xfs_inode *ip, /* target inode */
1481 struct xfs_inode *tip) /* tmp inode */
Dave Chinnera133d952013-08-12 20:49:48 +10001482{
1483
1484 /* Should never get a local format */
1485 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1486 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +10001487 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001488
1489 /*
1490 * if the target inode has less extents that then temporary inode then
1491 * why did userspace call us?
1492 */
1493 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
Dave Chinner24513372014-06-25 14:58:08 +10001494 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001495
1496 /*
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001497 * If we have to use the (expensive) rmap swap method, we can
1498 * handle any number of extents and any format.
1499 */
1500 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1501 return 0;
1502
1503 /*
Dave Chinnera133d952013-08-12 20:49:48 +10001504 * if the target inode is in extent form and the temp inode is in btree
1505 * form then we will end up with the target inode in the wrong format
1506 * as we already know there are less extents in the temp inode.
1507 */
1508 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1509 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +10001510 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001511
1512 /* Check temp in extent form to max in target */
1513 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1514 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1515 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001516 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001517
1518 /* Check target in extent form to max in temp */
1519 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1520 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1521 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001522 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001523
1524 /*
1525 * If we are in a btree format, check that the temp root block will fit
1526 * in the target and that it has enough extents to be in btree format
1527 * in the target.
1528 *
1529 * Note that we have to be careful to allow btree->extent conversions
1530 * (a common defrag case) which will occur when the temp inode is in
1531 * extent format...
1532 */
1533 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001534 if (XFS_IFORK_Q(ip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001535 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
Dave Chinner24513372014-06-25 14:58:08 +10001536 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001537 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1538 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001539 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001540 }
1541
1542 /* Reciprocal target->temp btree format checks */
1543 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001544 if (XFS_IFORK_Q(tip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001545 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
Dave Chinner24513372014-06-25 14:58:08 +10001546 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001547 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1548 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001549 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001550 }
1551
1552 return 0;
1553}
1554
Dave Chinner7abbb8f2014-09-23 16:20:11 +10001555static int
Dave Chinner4ef897a2014-08-04 13:44:08 +10001556xfs_swap_extent_flush(
1557 struct xfs_inode *ip)
1558{
1559 int error;
1560
1561 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1562 if (error)
1563 return error;
1564 truncate_pagecache_range(VFS_I(ip), 0, -1);
1565
1566 /* Verify O_DIRECT for ftmp */
1567 if (VFS_I(ip)->i_mapping->nrpages)
1568 return -EINVAL;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001569 return 0;
1570}
1571
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001572/*
1573 * Move extents from one file to another, when rmap is enabled.
1574 */
1575STATIC int
1576xfs_swap_extent_rmap(
1577 struct xfs_trans **tpp,
1578 struct xfs_inode *ip,
1579 struct xfs_inode *tip)
1580{
1581 struct xfs_bmbt_irec irec;
1582 struct xfs_bmbt_irec uirec;
1583 struct xfs_bmbt_irec tirec;
1584 xfs_fileoff_t offset_fsb;
1585 xfs_fileoff_t end_fsb;
1586 xfs_filblks_t count_fsb;
1587 xfs_fsblock_t firstfsb;
1588 struct xfs_defer_ops dfops;
1589 int error;
1590 xfs_filblks_t ilen;
1591 xfs_filblks_t rlen;
1592 int nimaps;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001593 uint64_t tip_flags2;
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001594
1595 /*
1596 * If the source file has shared blocks, we must flag the donor
1597 * file as having shared blocks so that we get the shared-block
1598 * rmap functions when we go to fix up the rmaps. The flags
1599 * will be switch for reals later.
1600 */
1601 tip_flags2 = tip->i_d.di_flags2;
1602 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1603 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1604
1605 offset_fsb = 0;
1606 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1607 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1608
1609 while (count_fsb) {
1610 /* Read extent from the donor file */
1611 nimaps = 1;
1612 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1613 &nimaps, 0);
1614 if (error)
1615 goto out;
1616 ASSERT(nimaps == 1);
1617 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1618
1619 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1620 ilen = tirec.br_blockcount;
1621
1622 /* Unmap the old blocks in the source file. */
1623 while (tirec.br_blockcount) {
1624 xfs_defer_init(&dfops, &firstfsb);
1625 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1626
1627 /* Read extent from the source file */
1628 nimaps = 1;
1629 error = xfs_bmapi_read(ip, tirec.br_startoff,
1630 tirec.br_blockcount, &irec,
1631 &nimaps, 0);
1632 if (error)
1633 goto out_defer;
1634 ASSERT(nimaps == 1);
1635 ASSERT(tirec.br_startoff == irec.br_startoff);
1636 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1637
1638 /* Trim the extent. */
1639 uirec = tirec;
1640 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1641 tirec.br_blockcount,
1642 irec.br_blockcount);
1643 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1644
1645 /* Remove the mapping from the donor file. */
1646 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1647 tip, &uirec);
1648 if (error)
1649 goto out_defer;
1650
1651 /* Remove the mapping from the source file. */
1652 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1653 ip, &irec);
1654 if (error)
1655 goto out_defer;
1656
1657 /* Map the donor file's blocks into the source file. */
1658 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1659 ip, &uirec);
1660 if (error)
1661 goto out_defer;
1662
1663 /* Map the source file's blocks into the donor file. */
1664 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1665 tip, &irec);
1666 if (error)
1667 goto out_defer;
1668
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001669 xfs_defer_ijoin(&dfops, ip);
1670 error = xfs_defer_finish(tpp, &dfops);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001671 if (error)
1672 goto out_defer;
1673
1674 tirec.br_startoff += rlen;
1675 if (tirec.br_startblock != HOLESTARTBLOCK &&
1676 tirec.br_startblock != DELAYSTARTBLOCK)
1677 tirec.br_startblock += rlen;
1678 tirec.br_blockcount -= rlen;
1679 }
1680
1681 /* Roll on... */
1682 count_fsb -= ilen;
1683 offset_fsb += ilen;
1684 }
1685
1686 tip->i_d.di_flags2 = tip_flags2;
1687 return 0;
1688
1689out_defer:
1690 xfs_defer_cancel(&dfops);
1691out:
1692 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1693 tip->i_d.di_flags2 = tip_flags2;
1694 return error;
1695}
1696
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001697/* Swap the extents of two files by swapping data forks. */
1698STATIC int
1699xfs_swap_extent_forks(
1700 struct xfs_trans *tp,
1701 struct xfs_inode *ip,
1702 struct xfs_inode *tip,
1703 int *src_log_flags,
1704 int *target_log_flags)
1705{
1706 struct xfs_ifork tempifp, *ifp, *tifp;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001707 xfs_filblks_t aforkblks = 0;
1708 xfs_filblks_t taforkblks = 0;
1709 xfs_extnum_t junk;
Eric Sandeen4dfce572016-11-08 12:55:18 +11001710 xfs_extnum_t nextents;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001711 uint64_t tmp;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001712 int error;
1713
1714 /*
1715 * Count the number of extended attribute blocks
1716 */
1717 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1718 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001719 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001720 &aforkblks);
1721 if (error)
1722 return error;
1723 }
1724 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1725 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001726 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001727 &taforkblks);
1728 if (error)
1729 return error;
1730 }
1731
1732 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07001733 * Btree format (v3) inodes have the inode number stamped in the bmbt
1734 * block headers. We can't start changing the bmbt blocks until the
1735 * inode owner change is logged so recovery does the right thing in the
1736 * event of a crash. Set the owner change log flags now and leave the
1737 * bmbt scan as the last step.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001738 */
1739 if (ip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001740 ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001741 (*target_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001742 if (tip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001743 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001744 (*src_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001745
1746 /*
1747 * Swap the data forks of the inodes
1748 */
1749 ifp = &ip->i_df;
1750 tifp = &tip->i_df;
1751 tempifp = *ifp; /* struct copy */
1752 *ifp = *tifp; /* struct copy */
1753 *tifp = tempifp; /* struct copy */
1754
1755 /*
1756 * Fix the on-disk inode values
1757 */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001758 tmp = (uint64_t)ip->i_d.di_nblocks;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001759 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1760 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1761
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001762 tmp = (uint64_t) ip->i_d.di_nextents;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001763 ip->i_d.di_nextents = tip->i_d.di_nextents;
1764 tip->i_d.di_nextents = tmp;
1765
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001766 tmp = (uint64_t) ip->i_d.di_format;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001767 ip->i_d.di_format = tip->i_d.di_format;
1768 tip->i_d.di_format = tmp;
1769
1770 /*
1771 * The extents in the source inode could still contain speculative
1772 * preallocation beyond EOF (e.g. the file is open but not modified
1773 * while defrag is in progress). In that case, we need to copy over the
1774 * number of delalloc blocks the data fork in the source inode is
1775 * tracking beyond EOF so that when the fork is truncated away when the
1776 * temporary inode is unlinked we don't underrun the i_delayed_blks
1777 * counter on that inode.
1778 */
1779 ASSERT(tip->i_delayed_blks == 0);
1780 tip->i_delayed_blks = ip->i_delayed_blks;
1781 ip->i_delayed_blks = 0;
1782
1783 switch (ip->i_d.di_format) {
1784 case XFS_DINODE_FMT_EXTENTS:
Eric Sandeen5d829302016-11-08 12:59:42 +11001785 /*
1786 * If the extents fit in the inode, fix the pointer. Otherwise
1787 * it's already NULL or pointing to the extent.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001788 */
Eric Sandeen5d829302016-11-08 12:59:42 +11001789 nextents = xfs_iext_count(&ip->i_df);
1790 if (nextents <= XFS_INLINE_EXTS)
1791 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001792 (*src_log_flags) |= XFS_ILOG_DEXT;
1793 break;
1794 case XFS_DINODE_FMT_BTREE:
1795 ASSERT(ip->i_d.di_version < 3 ||
1796 (*src_log_flags & XFS_ILOG_DOWNER));
1797 (*src_log_flags) |= XFS_ILOG_DBROOT;
1798 break;
1799 }
1800
1801 switch (tip->i_d.di_format) {
1802 case XFS_DINODE_FMT_EXTENTS:
Eric Sandeen5d829302016-11-08 12:59:42 +11001803 /*
1804 * If the extents fit in the inode, fix the pointer. Otherwise
1805 * it's already NULL or pointing to the extent.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001806 */
Eric Sandeen5d829302016-11-08 12:59:42 +11001807 nextents = xfs_iext_count(&tip->i_df);
1808 if (nextents <= XFS_INLINE_EXTS)
1809 tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001810 (*target_log_flags) |= XFS_ILOG_DEXT;
1811 break;
1812 case XFS_DINODE_FMT_BTREE:
1813 (*target_log_flags) |= XFS_ILOG_DBROOT;
1814 ASSERT(tip->i_d.di_version < 3 ||
1815 (*target_log_flags & XFS_ILOG_DOWNER));
1816 break;
1817 }
1818
1819 return 0;
1820}
1821
Brian Foster2dd3d702017-08-29 10:08:40 -07001822/*
1823 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1824 * change owner scan attempts to order all modified buffers in the current
1825 * transaction. In the event of ordered buffer failure, the offending buffer is
1826 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1827 * the transaction in this case to replenish the fallback log reservation and
1828 * restart the scan. This process repeats until the scan completes.
1829 */
1830static int
1831xfs_swap_change_owner(
1832 struct xfs_trans **tpp,
1833 struct xfs_inode *ip,
1834 struct xfs_inode *tmpip)
1835{
1836 int error;
1837 struct xfs_trans *tp = *tpp;
1838
1839 do {
1840 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1841 NULL);
1842 /* success or fatal error */
1843 if (error != -EAGAIN)
1844 break;
1845
1846 error = xfs_trans_roll(tpp);
1847 if (error)
1848 break;
1849 tp = *tpp;
1850
1851 /*
1852 * Redirty both inodes so they can relog and keep the log tail
1853 * moving forward.
1854 */
1855 xfs_trans_ijoin(tp, ip, 0);
1856 xfs_trans_ijoin(tp, tmpip, 0);
1857 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1858 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1859 } while (true);
1860
1861 return error;
1862}
1863
Dave Chinner4ef897a2014-08-04 13:44:08 +10001864int
Dave Chinnera133d952013-08-12 20:49:48 +10001865xfs_swap_extents(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001866 struct xfs_inode *ip, /* target inode */
1867 struct xfs_inode *tip, /* tmp inode */
1868 struct xfs_swapext *sxp)
Dave Chinnera133d952013-08-12 20:49:48 +10001869{
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001870 struct xfs_mount *mp = ip->i_mount;
1871 struct xfs_trans *tp;
1872 struct xfs_bstat *sbp = &sxp->sx_stat;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001873 int src_log_flags, target_log_flags;
1874 int error = 0;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001875 int lock_flags;
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07001876 struct xfs_ifork *cowfp;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001877 uint64_t f;
Brian Foster2dd3d702017-08-29 10:08:40 -07001878 int resblks = 0;
Dave Chinnera133d952013-08-12 20:49:48 +10001879
Dave Chinnera133d952013-08-12 20:49:48 +10001880 /*
Dave Chinner723cac42015-02-23 21:47:29 +11001881 * Lock the inodes against other IO, page faults and truncate to
1882 * begin with. Then we can ensure the inodes are flushed and have no
1883 * page cache safely. Once we have done this we can take the ilocks and
1884 * do the rest of the checks.
Dave Chinnera133d952013-08-12 20:49:48 +10001885 */
Christoph Hellwig65523212016-11-30 14:33:25 +11001886 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1887 lock_flags = XFS_MMAPLOCK_EXCL;
Dave Chinner723cac42015-02-23 21:47:29 +11001888 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
Dave Chinnera133d952013-08-12 20:49:48 +10001889
1890 /* Verify that both files have the same format */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001891 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
Dave Chinner24513372014-06-25 14:58:08 +10001892 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001893 goto out_unlock;
1894 }
1895
1896 /* Verify both files are either real-time or non-realtime */
1897 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
Dave Chinner24513372014-06-25 14:58:08 +10001898 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001899 goto out_unlock;
1900 }
1901
Dave Chinner4ef897a2014-08-04 13:44:08 +10001902 error = xfs_swap_extent_flush(ip);
Dave Chinnera133d952013-08-12 20:49:48 +10001903 if (error)
1904 goto out_unlock;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001905 error = xfs_swap_extent_flush(tip);
1906 if (error)
1907 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10001908
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001909 /*
1910 * Extent "swapping" with rmap requires a permanent reservation and
1911 * a block reservation because it's really just a remap operation
1912 * performed with log redo items!
1913 */
1914 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1915 /*
1916 * Conceptually this shouldn't affect the shape of either
1917 * bmbt, but since we atomically move extents one by one,
1918 * we reserve enough space to rebuild both trees.
1919 */
1920 resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
1921 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
1922 XFS_DATA_FORK) +
1923 XFS_SWAP_RMAP_SPACE_RES(mp,
1924 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
1925 XFS_DATA_FORK);
Brian Foster2dd3d702017-08-29 10:08:40 -07001926 }
1927 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001928 if (error)
Dave Chinnera133d952013-08-12 20:49:48 +10001929 goto out_unlock;
Dave Chinner723cac42015-02-23 21:47:29 +11001930
1931 /*
1932 * Lock and join the inodes to the tansaction so that transaction commit
1933 * or cancel will unlock the inodes from this point onwards.
1934 */
Dave Chinner4ef897a2014-08-04 13:44:08 +10001935 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1936 lock_flags |= XFS_ILOCK_EXCL;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001937 xfs_trans_ijoin(tp, ip, 0);
1938 xfs_trans_ijoin(tp, tip, 0);
Dave Chinner723cac42015-02-23 21:47:29 +11001939
Dave Chinnera133d952013-08-12 20:49:48 +10001940
1941 /* Verify all data are being swapped */
1942 if (sxp->sx_offset != 0 ||
1943 sxp->sx_length != ip->i_d.di_size ||
1944 sxp->sx_length != tip->i_d.di_size) {
Dave Chinner24513372014-06-25 14:58:08 +10001945 error = -EFAULT;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001946 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001947 }
1948
1949 trace_xfs_swap_extent_before(ip, 0);
1950 trace_xfs_swap_extent_before(tip, 1);
1951
1952 /* check inode formats now that data is flushed */
1953 error = xfs_swap_extents_check_format(ip, tip);
1954 if (error) {
1955 xfs_notice(mp,
1956 "%s: inode 0x%llx format is incompatible for exchanging.",
1957 __func__, ip->i_ino);
Dave Chinner4ef897a2014-08-04 13:44:08 +10001958 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001959 }
1960
1961 /*
1962 * Compare the current change & modify times with that
1963 * passed in. If they differ, we abort this swap.
1964 * This is the mechanism used to ensure the calling
1965 * process that the file was not changed out from
1966 * under it.
1967 */
1968 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1969 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1970 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1971 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
Dave Chinner24513372014-06-25 14:58:08 +10001972 error = -EBUSY;
Dave Chinner81217682014-08-04 13:29:32 +10001973 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001974 }
Dave Chinnera133d952013-08-12 20:49:48 +10001975
Dave Chinner21b5c972013-08-30 10:23:44 +10001976 /*
Dave Chinner21b5c972013-08-30 10:23:44 +10001977 * Note the trickiness in setting the log flags - we set the owner log
1978 * flag on the opposite inode (i.e. the inode we are setting the new
1979 * owner to be) because once we swap the forks and log that, log
1980 * recovery is going to see the fork as owned by the swapped inode,
1981 * not the pre-swapped inodes.
1982 */
1983 src_log_flags = XFS_ILOG_CORE;
1984 target_log_flags = XFS_ILOG_CORE;
Dave Chinner21b5c972013-08-30 10:23:44 +10001985
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001986 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1987 error = xfs_swap_extent_rmap(&tp, ip, tip);
1988 else
1989 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1990 &target_log_flags);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001991 if (error)
1992 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001993
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07001994 /* Do we have to swap reflink flags? */
1995 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1996 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1997 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1998 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1999 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2000 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2001 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
Darrick J. Wong52bfcdd2017-09-18 09:41:18 -07002002 }
2003
2004 /* Swap the cow forks. */
2005 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
2006 xfs_extnum_t extnum;
2007
2008 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2009 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2010
2011 extnum = ip->i_cnextents;
2012 ip->i_cnextents = tip->i_cnextents;
2013 tip->i_cnextents = extnum;
2014
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002015 cowfp = ip->i_cowfp;
2016 ip->i_cowfp = tip->i_cowfp;
2017 tip->i_cowfp = cowfp;
Darrick J. Wong52bfcdd2017-09-18 09:41:18 -07002018
2019 if (ip->i_cowfp && ip->i_cnextents)
2020 xfs_inode_set_cowblocks_tag(ip);
2021 else
2022 xfs_inode_clear_cowblocks_tag(ip);
2023 if (tip->i_cowfp && tip->i_cnextents)
2024 xfs_inode_set_cowblocks_tag(tip);
2025 else
2026 xfs_inode_clear_cowblocks_tag(tip);
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002027 }
2028
Dave Chinnera133d952013-08-12 20:49:48 +10002029 xfs_trans_log_inode(tp, ip, src_log_flags);
2030 xfs_trans_log_inode(tp, tip, target_log_flags);
2031
2032 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07002033 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2034 * have inode number owner values in the bmbt blocks that still refer to
2035 * the old inode. Scan each bmbt to fix up the owner values with the
2036 * inode number of the current inode.
2037 */
2038 if (src_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002039 error = xfs_swap_change_owner(&tp, ip, tip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002040 if (error)
2041 goto out_trans_cancel;
2042 }
2043 if (target_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002044 error = xfs_swap_change_owner(&tp, tip, ip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002045 if (error)
2046 goto out_trans_cancel;
2047 }
2048
2049 /*
Dave Chinnera133d952013-08-12 20:49:48 +10002050 * If this is a synchronous mount, make sure that the
2051 * transaction goes to disk before returning to the user.
2052 */
2053 if (mp->m_flags & XFS_MOUNT_WSYNC)
2054 xfs_trans_set_sync(tp);
2055
Christoph Hellwig70393312015-06-04 13:48:08 +10002056 error = xfs_trans_commit(tp);
Dave Chinnera133d952013-08-12 20:49:48 +10002057
2058 trace_xfs_swap_extent_after(ip, 0);
2059 trace_xfs_swap_extent_after(tip, 1);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002060
Christoph Hellwig65523212016-11-30 14:33:25 +11002061out_unlock:
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002062 xfs_iunlock(ip, lock_flags);
2063 xfs_iunlock(tip, lock_flags);
Christoph Hellwig65523212016-11-30 14:33:25 +11002064 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
Dave Chinnera133d952013-08-12 20:49:48 +10002065 return error;
2066
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002067out_trans_cancel:
2068 xfs_trans_cancel(tp);
Christoph Hellwig65523212016-11-30 14:33:25 +11002069 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10002070}