blob: 4e30448c44652c0d0719d9ec9a4e257c886caf60 [file] [log] [blame]
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <linux/log2.h>
19
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110023#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100025#include "xfs_mount.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100026#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110027#include "xfs_trans.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100028#include "xfs_inode_item.h"
Darrick J. Wong4056a742017-02-02 15:13:59 -080029#include "xfs_btree.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110030#include "xfs_bmap_btree.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100031#include "xfs_bmap.h"
32#include "xfs_error.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100033#include "xfs_trace.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110034#include "xfs_attr_sf.h"
Darrick J. Wong244efea2016-02-08 15:00:01 +110035#include "xfs_da_format.h"
Darrick J. Wong815414e2017-03-15 00:24:25 -070036#include "xfs_da_btree.h"
37#include "xfs_dir2_priv.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100038
39kmem_zone_t *xfs_ifork_zone;
40
41STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
42STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
43STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
44
45#ifdef DEBUG
46/*
47 * Make sure that the extents in the given memory buffer
48 * are valid.
49 */
50void
51xfs_validate_extents(
52 xfs_ifork_t *ifp,
53 int nrecs,
54 xfs_exntfmt_t fmt)
55{
56 xfs_bmbt_irec_t irec;
57 xfs_bmbt_rec_host_t rec;
58 int i;
59
60 for (i = 0; i < nrecs; i++) {
61 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
62 rec.l0 = get_unaligned(&ep->l0);
63 rec.l1 = get_unaligned(&ep->l1);
64 xfs_bmbt_get_all(&rec, &irec);
65 if (fmt == XFS_EXTFMT_NOSTATE)
66 ASSERT(irec.br_state == XFS_EXT_NORM);
67 }
68}
69#else /* DEBUG */
70#define xfs_validate_extents(ifp, nrecs, fmt)
71#endif /* DEBUG */
72
73
74/*
75 * Move inode type and inode format specific information from the
76 * on-disk inode to the in-core inode. For fifos, devs, and sockets
77 * this means set if_rdev to the proper value. For files, directories,
78 * and symlinks this means to bring in the in-line data or extent
79 * pointers. For a file in B-tree format, only the root is immediately
80 * brought in-core. The rest will be in-lined in if_extents when it
81 * is first referenced (see xfs_iread_extents()).
82 */
83int
84xfs_iformat_fork(
85 xfs_inode_t *ip,
86 xfs_dinode_t *dip)
87{
88 xfs_attr_shortform_t *atp;
89 int size;
90 int error = 0;
91 xfs_fsize_t di_size;
92
93 if (unlikely(be32_to_cpu(dip->di_nextents) +
94 be16_to_cpu(dip->di_anextents) >
95 be64_to_cpu(dip->di_nblocks))) {
96 xfs_warn(ip->i_mount,
97 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
98 (unsigned long long)ip->i_ino,
99 (int)(be32_to_cpu(dip->di_nextents) +
100 be16_to_cpu(dip->di_anextents)),
101 (unsigned long long)
102 be64_to_cpu(dip->di_nblocks));
103 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
104 ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000105 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000106 }
107
108 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
109 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
110 (unsigned long long)ip->i_ino,
111 dip->di_forkoff);
112 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
113 ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000114 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000115 }
116
117 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
118 !ip->i_mount->m_rtdev_targp)) {
119 xfs_warn(ip->i_mount,
120 "corrupt dinode %Lu, has realtime flag set.",
121 ip->i_ino);
122 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
123 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000124 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000125 }
126
Darrick J. Wong11715a22016-10-03 09:11:31 -0700127 if (unlikely(xfs_is_reflink_inode(ip) &&
128 (VFS_I(ip)->i_mode & S_IFMT) != S_IFREG)) {
129 xfs_warn(ip->i_mount,
130 "corrupt dinode %llu, wrong file type for reflink.",
131 ip->i_ino);
132 XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
133 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
134 return -EFSCORRUPTED;
135 }
136
137 if (unlikely(xfs_is_reflink_inode(ip) &&
138 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME))) {
139 xfs_warn(ip->i_mount,
140 "corrupt dinode %llu, has reflink+realtime flag set.",
141 ip->i_ino);
142 XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
143 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
144 return -EFSCORRUPTED;
145 }
146
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100147 switch (VFS_I(ip)->i_mode & S_IFMT) {
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000148 case S_IFIFO:
149 case S_IFCHR:
150 case S_IFBLK:
151 case S_IFSOCK:
152 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
153 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
154 ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000155 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000156 }
157 ip->i_d.di_size = 0;
158 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
159 break;
160
161 case S_IFREG:
162 case S_IFLNK:
163 case S_IFDIR:
164 switch (dip->di_format) {
165 case XFS_DINODE_FMT_LOCAL:
166 /*
167 * no local regular files yet
168 */
169 if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
170 xfs_warn(ip->i_mount,
171 "corrupt inode %Lu (local format for regular file).",
172 (unsigned long long) ip->i_ino);
173 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
174 XFS_ERRLEVEL_LOW,
175 ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000176 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000177 }
178
179 di_size = be64_to_cpu(dip->di_size);
Dan Carpenter0d0ab122013-08-15 08:53:38 +0300180 if (unlikely(di_size < 0 ||
181 di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000182 xfs_warn(ip->i_mount,
183 "corrupt inode %Lu (bad size %Ld for local inode).",
184 (unsigned long long) ip->i_ino,
185 (long long) di_size);
186 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
187 XFS_ERRLEVEL_LOW,
188 ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000189 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000190 }
191
192 size = (int)di_size;
193 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
194 break;
195 case XFS_DINODE_FMT_EXTENTS:
196 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
197 break;
198 case XFS_DINODE_FMT_BTREE:
199 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
200 break;
201 default:
202 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
203 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000204 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000205 }
206 break;
207
208 default:
209 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000210 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000211 }
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700212 if (error)
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000213 return error;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700214
Darrick J. Wongca659e082017-04-03 12:22:20 -0700215 /* Check inline dir contents. */
216 if (S_ISDIR(VFS_I(ip)->i_mode) &&
217 dip->di_format == XFS_DINODE_FMT_LOCAL) {
218 error = xfs_dir2_sf_verify(ip);
219 if (error) {
220 xfs_idestroy_fork(ip, XFS_DATA_FORK);
221 return error;
222 }
223 }
224
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700225 if (xfs_is_reflink_inode(ip)) {
226 ASSERT(ip->i_cowfp == NULL);
227 xfs_ifork_init_cow(ip);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000228 }
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700229
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000230 if (!XFS_DFORK_Q(dip))
231 return 0;
232
233 ASSERT(ip->i_afp == NULL);
234 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
235
236 switch (dip->di_aformat) {
237 case XFS_DINODE_FMT_LOCAL:
238 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
239 size = be16_to_cpu(atp->hdr.totsize);
240
241 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
242 xfs_warn(ip->i_mount,
243 "corrupt inode %Lu (bad attr fork size %Ld).",
244 (unsigned long long) ip->i_ino,
245 (long long) size);
246 XFS_CORRUPTION_ERROR("xfs_iformat(8)",
247 XFS_ERRLEVEL_LOW,
248 ip->i_mount, dip);
Darrick J. Wong11715a22016-10-03 09:11:31 -0700249 error = -EFSCORRUPTED;
250 break;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000251 }
252
253 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
254 break;
255 case XFS_DINODE_FMT_EXTENTS:
256 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
257 break;
258 case XFS_DINODE_FMT_BTREE:
259 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
260 break;
261 default:
Dave Chinner24513372014-06-25 14:58:08 +1000262 error = -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000263 break;
264 }
265 if (error) {
266 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
267 ip->i_afp = NULL;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700268 if (ip->i_cowfp)
269 kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
270 ip->i_cowfp = NULL;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000271 xfs_idestroy_fork(ip, XFS_DATA_FORK);
272 }
273 return error;
274}
275
Christoph Hellwig143f4ae2016-04-06 07:41:43 +1000276void
277xfs_init_local_fork(
278 struct xfs_inode *ip,
279 int whichfork,
280 const void *data,
281 int size)
282{
283 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
Christoph Hellwig30ee0522016-04-06 07:53:29 +1000284 int mem_size = size, real_size = 0;
285 bool zero_terminate;
286
287 /*
288 * If we are using the local fork to store a symlink body we need to
289 * zero-terminate it so that we can pass it back to the VFS directly.
290 * Overallocate the in-memory fork by one for that and add a zero
291 * to terminate it below.
292 */
293 zero_terminate = S_ISLNK(VFS_I(ip)->i_mode);
294 if (zero_terminate)
295 mem_size++;
Christoph Hellwig143f4ae2016-04-06 07:41:43 +1000296
297 if (size == 0)
298 ifp->if_u1.if_data = NULL;
Christoph Hellwig30ee0522016-04-06 07:53:29 +1000299 else if (mem_size <= sizeof(ifp->if_u2.if_inline_data))
Christoph Hellwig143f4ae2016-04-06 07:41:43 +1000300 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
301 else {
Christoph Hellwig30ee0522016-04-06 07:53:29 +1000302 real_size = roundup(mem_size, 4);
Christoph Hellwig143f4ae2016-04-06 07:41:43 +1000303 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
304 }
305
Christoph Hellwig30ee0522016-04-06 07:53:29 +1000306 if (size) {
Christoph Hellwig143f4ae2016-04-06 07:41:43 +1000307 memcpy(ifp->if_u1.if_data, data, size);
Christoph Hellwig30ee0522016-04-06 07:53:29 +1000308 if (zero_terminate)
309 ifp->if_u1.if_data[size] = '\0';
310 }
Christoph Hellwig143f4ae2016-04-06 07:41:43 +1000311
312 ifp->if_bytes = size;
313 ifp->if_real_bytes = real_size;
314 ifp->if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
315 ifp->if_flags |= XFS_IFINLINE;
316}
317
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000318/*
319 * The file is in-lined in the on-disk inode.
320 * If it fits into if_inline_data, then copy
321 * it there, otherwise allocate a buffer for it
322 * and copy the data there. Either way, set
323 * if_data to point at the data.
324 * If we allocate a buffer for the data, make
325 * sure that its size is a multiple of 4 and
326 * record the real size in i_real_bytes.
327 */
328STATIC int
329xfs_iformat_local(
330 xfs_inode_t *ip,
331 xfs_dinode_t *dip,
332 int whichfork,
333 int size)
334{
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000335 /*
336 * If the size is unreasonable, then something
337 * is wrong and we just bail out rather than crash in
338 * kmem_alloc() or memcpy() below.
339 */
340 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
341 xfs_warn(ip->i_mount,
342 "corrupt inode %Lu (bad size %d for local fork, size = %d).",
343 (unsigned long long) ip->i_ino, size,
344 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
345 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
346 ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000347 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000348 }
Christoph Hellwig143f4ae2016-04-06 07:41:43 +1000349
350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000351 return 0;
352}
353
354/*
355 * The file consists of a set of extents all
356 * of which fit into the on-disk inode.
357 * If there are few enough extents to fit into
358 * the if_inline_ext, then copy them there.
359 * Otherwise allocate a buffer for them and copy
360 * them into it. Either way, set if_extents
361 * to point at the extents.
362 */
363STATIC int
364xfs_iformat_extents(
365 xfs_inode_t *ip,
366 xfs_dinode_t *dip,
367 int whichfork)
368{
369 xfs_bmbt_rec_t *dp;
370 xfs_ifork_t *ifp;
371 int nex;
372 int size;
373 int i;
374
375 ifp = XFS_IFORK_PTR(ip, whichfork);
376 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
377 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
378
379 /*
380 * If the number of extents is unreasonable, then something
381 * is wrong and we just bail out rather than crash in
382 * kmem_alloc() or memcpy() below.
383 */
384 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
385 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
386 (unsigned long long) ip->i_ino, nex);
387 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
388 ip->i_mount, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000389 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000390 }
391
392 ifp->if_real_bytes = 0;
393 if (nex == 0)
394 ifp->if_u1.if_extents = NULL;
395 else if (nex <= XFS_INLINE_EXTS)
396 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
397 else
398 xfs_iext_add(ifp, 0, nex);
399
400 ifp->if_bytes = size;
401 if (size) {
402 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
403 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
404 for (i = 0; i < nex; i++, dp++) {
405 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
406 ep->l0 = get_unaligned_be64(&dp->l0);
407 ep->l1 = get_unaligned_be64(&dp->l1);
408 }
409 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
410 if (whichfork != XFS_DATA_FORK ||
411 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
412 if (unlikely(xfs_check_nostate_extents(
413 ifp, 0, nex))) {
414 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
415 XFS_ERRLEVEL_LOW,
416 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000417 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000418 }
419 }
420 ifp->if_flags |= XFS_IFEXTENTS;
421 return 0;
422}
423
424/*
425 * The file has too many extents to fit into
426 * the inode, so they are in B-tree format.
427 * Allocate a buffer for the root of the B-tree
428 * and copy the root into it. The i_extents
429 * field will remain NULL until all of the
430 * extents are read in (when they are needed).
431 */
432STATIC int
433xfs_iformat_btree(
434 xfs_inode_t *ip,
435 xfs_dinode_t *dip,
436 int whichfork)
437{
438 struct xfs_mount *mp = ip->i_mount;
439 xfs_bmdr_block_t *dfp;
440 xfs_ifork_t *ifp;
441 /* REFERENCED */
442 int nrecs;
443 int size;
Darrick J. Wong4056a742017-02-02 15:13:59 -0800444 int level;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000445
446 ifp = XFS_IFORK_PTR(ip, whichfork);
447 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
448 size = XFS_BMAP_BROOT_SPACE(mp, dfp);
449 nrecs = be16_to_cpu(dfp->bb_numrecs);
Darrick J. Wong4056a742017-02-02 15:13:59 -0800450 level = be16_to_cpu(dfp->bb_level);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000451
452 /*
453 * blow out if -- fork has less extents than can fit in
454 * fork (fork shouldn't be a btree format), root btree
455 * block has more records than can fit into the fork,
456 * or the number of extents is greater than the number of
457 * blocks.
458 */
459 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
460 XFS_IFORK_MAXEXT(ip, whichfork) ||
461 XFS_BMDR_SPACE_CALC(nrecs) >
462 XFS_DFORK_SIZE(dip, mp, whichfork) ||
Darrick J. Wong4056a742017-02-02 15:13:59 -0800463 XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks) ||
464 level == 0 || level > XFS_BTREE_MAXLEVELS) {
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000465 xfs_warn(mp, "corrupt inode %Lu (btree).",
466 (unsigned long long) ip->i_ino);
467 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
468 mp, dip);
Dave Chinner24513372014-06-25 14:58:08 +1000469 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000470 }
471
472 ifp->if_broot_bytes = size;
473 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
474 ASSERT(ifp->if_broot != NULL);
475 /*
476 * Copy and convert from the on-disk structure
477 * to the in-memory structure.
478 */
479 xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
480 ifp->if_broot, size);
481 ifp->if_flags &= ~XFS_IFEXTENTS;
482 ifp->if_flags |= XFS_IFBROOT;
483
484 return 0;
485}
486
487/*
488 * Read in extents from a btree-format inode.
489 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
490 */
491int
492xfs_iread_extents(
493 xfs_trans_t *tp,
494 xfs_inode_t *ip,
495 int whichfork)
496{
497 int error;
498 xfs_ifork_t *ifp;
499 xfs_extnum_t nextents;
500
Christoph Hellwigeef334e2013-12-06 12:30:17 -0800501 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
502
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000503 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
504 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
505 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000506 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000507 }
508 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
509 ifp = XFS_IFORK_PTR(ip, whichfork);
510
511 /*
512 * We know that the size is valid (it's checked in iformat_btree)
513 */
514 ifp->if_bytes = ifp->if_real_bytes = 0;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000515 xfs_iext_add(ifp, 0, nextents);
516 error = xfs_bmap_read_extents(tp, ip, whichfork);
517 if (error) {
518 xfs_iext_destroy(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000519 return error;
520 }
521 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
Darrick J. Wong0a6844a2017-02-02 15:13:57 -0800522 ifp->if_flags |= XFS_IFEXTENTS;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000523 return 0;
524}
525/*
526 * Reallocate the space for if_broot based on the number of records
527 * being added or deleted as indicated in rec_diff. Move the records
528 * and pointers in if_broot to fit the new size. When shrinking this
529 * will eliminate holes between the records and pointers created by
530 * the caller. When growing this will create holes to be filled in
531 * by the caller.
532 *
533 * The caller must not request to add more records than would fit in
534 * the on-disk inode root. If the if_broot is currently NULL, then
Zhi Yong Wuf6c27342013-08-07 10:11:04 +0000535 * if we are adding records, one will be allocated. The caller must also
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000536 * not request that the number of records go below zero, although
537 * it can go to zero.
538 *
539 * ip -- the inode whose if_broot area is changing
540 * ext_diff -- the change in the number of records, positive or negative,
541 * requested for the if_broot array.
542 */
543void
544xfs_iroot_realloc(
545 xfs_inode_t *ip,
546 int rec_diff,
547 int whichfork)
548{
549 struct xfs_mount *mp = ip->i_mount;
550 int cur_max;
551 xfs_ifork_t *ifp;
552 struct xfs_btree_block *new_broot;
553 int new_max;
554 size_t new_size;
555 char *np;
556 char *op;
557
558 /*
559 * Handle the degenerate case quietly.
560 */
561 if (rec_diff == 0) {
562 return;
563 }
564
565 ifp = XFS_IFORK_PTR(ip, whichfork);
566 if (rec_diff > 0) {
567 /*
568 * If there wasn't any memory allocated before, just
569 * allocate it now and get out.
570 */
571 if (ifp->if_broot_bytes == 0) {
572 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
573 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
574 ifp->if_broot_bytes = (int)new_size;
575 return;
576 }
577
578 /*
579 * If there is already an existing if_broot, then we need
580 * to realloc() it and shift the pointers to their new
581 * location. The records don't change location because
582 * they are kept butted up against the btree block header.
583 */
584 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
585 new_max = cur_max + rec_diff;
586 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
587 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000588 KM_SLEEP | KM_NOFS);
589 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
590 ifp->if_broot_bytes);
591 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
592 (int)new_size);
593 ifp->if_broot_bytes = (int)new_size;
594 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
595 XFS_IFORK_SIZE(ip, whichfork));
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000596 memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000597 return;
598 }
599
600 /*
601 * rec_diff is less than 0. In this case, we are shrinking the
602 * if_broot buffer. It must already exist. If we go to zero
603 * records, just get rid of the root and clear the status bit.
604 */
605 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
606 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
607 new_max = cur_max + rec_diff;
608 ASSERT(new_max >= 0);
609 if (new_max > 0)
610 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
611 else
612 new_size = 0;
613 if (new_size > 0) {
614 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
615 /*
616 * First copy over the btree block header.
617 */
618 memcpy(new_broot, ifp->if_broot,
619 XFS_BMBT_BLOCK_LEN(ip->i_mount));
620 } else {
621 new_broot = NULL;
622 ifp->if_flags &= ~XFS_IFBROOT;
623 }
624
625 /*
626 * Only copy the records and pointers if there are any.
627 */
628 if (new_max > 0) {
629 /*
630 * First copy the records.
631 */
632 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
633 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
634 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
635
636 /*
637 * Then copy the pointers.
638 */
639 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
640 ifp->if_broot_bytes);
641 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
642 (int)new_size);
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000643 memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000644 }
645 kmem_free(ifp->if_broot);
646 ifp->if_broot = new_broot;
647 ifp->if_broot_bytes = (int)new_size;
648 if (ifp->if_broot)
649 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
650 XFS_IFORK_SIZE(ip, whichfork));
651 return;
652}
653
654
655/*
656 * This is called when the amount of space needed for if_data
657 * is increased or decreased. The change in size is indicated by
658 * the number of bytes that need to be added or deleted in the
659 * byte_diff parameter.
660 *
661 * If the amount of space needed has decreased below the size of the
662 * inline buffer, then switch to using the inline buffer. Otherwise,
663 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
664 * to what is needed.
665 *
666 * ip -- the inode whose if_data area is changing
667 * byte_diff -- the change in the number of bytes, positive or negative,
668 * requested for the if_data array.
669 */
670void
671xfs_idata_realloc(
672 xfs_inode_t *ip,
673 int byte_diff,
674 int whichfork)
675{
676 xfs_ifork_t *ifp;
677 int new_size;
678 int real_size;
679
680 if (byte_diff == 0) {
681 return;
682 }
683
684 ifp = XFS_IFORK_PTR(ip, whichfork);
685 new_size = (int)ifp->if_bytes + byte_diff;
686 ASSERT(new_size >= 0);
687
688 if (new_size == 0) {
689 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
690 kmem_free(ifp->if_u1.if_data);
691 }
692 ifp->if_u1.if_data = NULL;
693 real_size = 0;
694 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
695 /*
696 * If the valid extents/data can fit in if_inline_ext/data,
697 * copy them from the malloc'd vector and free it.
698 */
699 if (ifp->if_u1.if_data == NULL) {
700 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
701 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
702 ASSERT(ifp->if_real_bytes != 0);
703 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
704 new_size);
705 kmem_free(ifp->if_u1.if_data);
706 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
707 }
708 real_size = 0;
709 } else {
710 /*
711 * Stuck with malloc/realloc.
712 * For inline data, the underlying buffer must be
713 * a multiple of 4 bytes in size so that it can be
714 * logged and stay on word boundaries. We enforce
715 * that here.
716 */
717 real_size = roundup(new_size, 4);
718 if (ifp->if_u1.if_data == NULL) {
719 ASSERT(ifp->if_real_bytes == 0);
720 ifp->if_u1.if_data = kmem_alloc(real_size,
721 KM_SLEEP | KM_NOFS);
722 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
723 /*
724 * Only do the realloc if the underlying size
725 * is really changing.
726 */
727 if (ifp->if_real_bytes != real_size) {
728 ifp->if_u1.if_data =
729 kmem_realloc(ifp->if_u1.if_data,
730 real_size,
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000731 KM_SLEEP | KM_NOFS);
732 }
733 } else {
734 ASSERT(ifp->if_real_bytes == 0);
735 ifp->if_u1.if_data = kmem_alloc(real_size,
736 KM_SLEEP | KM_NOFS);
737 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
738 ifp->if_bytes);
739 }
740 }
741 ifp->if_real_bytes = real_size;
742 ifp->if_bytes = new_size;
743 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
744}
745
746void
747xfs_idestroy_fork(
748 xfs_inode_t *ip,
749 int whichfork)
750{
751 xfs_ifork_t *ifp;
752
753 ifp = XFS_IFORK_PTR(ip, whichfork);
754 if (ifp->if_broot != NULL) {
755 kmem_free(ifp->if_broot);
756 ifp->if_broot = NULL;
757 }
758
759 /*
760 * If the format is local, then we can't have an extents
761 * array so just look for an inline data array. If we're
762 * not local then we may or may not have an extents list,
763 * so check and free it up if we do.
764 */
765 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
766 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
767 (ifp->if_u1.if_data != NULL)) {
768 ASSERT(ifp->if_real_bytes != 0);
769 kmem_free(ifp->if_u1.if_data);
770 ifp->if_u1.if_data = NULL;
771 ifp->if_real_bytes = 0;
772 }
773 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
774 ((ifp->if_flags & XFS_IFEXTIREC) ||
775 ((ifp->if_u1.if_extents != NULL) &&
776 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
777 ASSERT(ifp->if_real_bytes != 0);
778 xfs_iext_destroy(ifp);
779 }
780 ASSERT(ifp->if_u1.if_extents == NULL ||
781 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
782 ASSERT(ifp->if_real_bytes == 0);
783 if (whichfork == XFS_ATTR_FORK) {
784 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
785 ip->i_afp = NULL;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700786 } else if (whichfork == XFS_COW_FORK) {
787 kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
788 ip->i_cowfp = NULL;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000789 }
790}
791
Eric Sandeenf380ee72017-01-09 16:38:36 +0100792/* Count number of incore extents based on if_bytes */
793xfs_extnum_t
794xfs_iext_count(struct xfs_ifork *ifp)
795{
796 return ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
797}
798
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000799/*
Christoph Hellwigda776502013-12-13 11:34:04 +1100800 * Convert in-core extents to on-disk form
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000801 *
Christoph Hellwigda776502013-12-13 11:34:04 +1100802 * For either the data or attr fork in extent format, we need to endian convert
803 * the in-core extent as we place them into the on-disk inode.
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000804 *
Christoph Hellwigda776502013-12-13 11:34:04 +1100805 * In the case of the data fork, the in-core and on-disk fork sizes can be
806 * different due to delayed allocation extents. We only copy on-disk extents
807 * here, so callers must always use the physical fork size to determine the
808 * size of the buffer passed to this routine. We will return the size actually
809 * used.
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000810 */
811int
812xfs_iextents_copy(
813 xfs_inode_t *ip,
814 xfs_bmbt_rec_t *dp,
815 int whichfork)
816{
817 int copied;
818 int i;
819 xfs_ifork_t *ifp;
820 int nrecs;
821 xfs_fsblock_t start_block;
822
823 ifp = XFS_IFORK_PTR(ip, whichfork);
824 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
825 ASSERT(ifp->if_bytes > 0);
826
Eric Sandeenf380ee72017-01-09 16:38:36 +0100827 nrecs = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000828 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
829 ASSERT(nrecs > 0);
830
831 /*
832 * There are some delayed allocation extents in the
833 * inode, so copy the extents one at a time and skip
834 * the delayed ones. There must be at least one
835 * non-delayed extent.
836 */
837 copied = 0;
838 for (i = 0; i < nrecs; i++) {
839 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
840 start_block = xfs_bmbt_get_startblock(ep);
841 if (isnullstartblock(start_block)) {
842 /*
843 * It's a delayed allocation extent, so skip it.
844 */
845 continue;
846 }
847
848 /* Translate to on disk format */
Dave Chinnerc5c249b2013-08-12 20:49:43 +1000849 put_unaligned_be64(ep->l0, &dp->l0);
850 put_unaligned_be64(ep->l1, &dp->l1);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000851 dp++;
852 copied++;
853 }
854 ASSERT(copied != 0);
855 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
856
857 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
858}
859
860/*
861 * Each of the following cases stores data into the same region
862 * of the on-disk inode, so only one of them can be valid at
863 * any given time. While it is possible to have conflicting formats
864 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
865 * in EXTENTS format, this can only happen when the fork has
866 * changed formats after being modified but before being flushed.
867 * In these cases, the format always takes precedence, because the
868 * format indicates the current state of the fork.
869 */
Darrick J. Wongca659e082017-04-03 12:22:20 -0700870void
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000871xfs_iflush_fork(
872 xfs_inode_t *ip,
873 xfs_dinode_t *dip,
874 xfs_inode_log_item_t *iip,
Eric Sandeenfd9fdba2014-04-14 19:04:46 +1000875 int whichfork)
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000876{
877 char *cp;
878 xfs_ifork_t *ifp;
879 xfs_mount_t *mp;
880 static const short brootflag[2] =
881 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
882 static const short dataflag[2] =
883 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
884 static const short extflag[2] =
885 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
886
887 if (!iip)
Darrick J. Wongca659e082017-04-03 12:22:20 -0700888 return;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000889 ifp = XFS_IFORK_PTR(ip, whichfork);
890 /*
891 * This can happen if we gave up in iformat in an error path,
892 * for the attribute fork.
893 */
894 if (!ifp) {
895 ASSERT(whichfork == XFS_ATTR_FORK);
Darrick J. Wongca659e082017-04-03 12:22:20 -0700896 return;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000897 }
898 cp = XFS_DFORK_PTR(dip, whichfork);
899 mp = ip->i_mount;
900 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
901 case XFS_DINODE_FMT_LOCAL:
902 if ((iip->ili_fields & dataflag[whichfork]) &&
903 (ifp->if_bytes > 0)) {
904 ASSERT(ifp->if_u1.if_data != NULL);
905 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
906 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
907 }
908 break;
909
910 case XFS_DINODE_FMT_EXTENTS:
911 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
912 !(iip->ili_fields & extflag[whichfork]));
913 if ((iip->ili_fields & extflag[whichfork]) &&
914 (ifp->if_bytes > 0)) {
915 ASSERT(xfs_iext_get_ext(ifp, 0));
916 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
917 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
918 whichfork);
919 }
920 break;
921
922 case XFS_DINODE_FMT_BTREE:
923 if ((iip->ili_fields & brootflag[whichfork]) &&
924 (ifp->if_broot_bytes > 0)) {
925 ASSERT(ifp->if_broot != NULL);
926 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
927 XFS_IFORK_SIZE(ip, whichfork));
928 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
929 (xfs_bmdr_block_t *)cp,
930 XFS_DFORK_SIZE(dip, mp, whichfork));
931 }
932 break;
933
934 case XFS_DINODE_FMT_DEV:
935 if (iip->ili_fields & XFS_ILOG_DEV) {
936 ASSERT(whichfork == XFS_DATA_FORK);
937 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
938 }
939 break;
940
941 case XFS_DINODE_FMT_UUID:
942 if (iip->ili_fields & XFS_ILOG_UUID) {
943 ASSERT(whichfork == XFS_DATA_FORK);
944 memcpy(XFS_DFORK_DPTR(dip),
945 &ip->i_df.if_u2.if_uuid,
946 sizeof(uuid_t));
947 }
948 break;
949
950 default:
951 ASSERT(0);
952 break;
953 }
954}
955
956/*
957 * Return a pointer to the extent record at file index idx.
958 */
959xfs_bmbt_rec_host_t *
960xfs_iext_get_ext(
961 xfs_ifork_t *ifp, /* inode fork pointer */
962 xfs_extnum_t idx) /* index of target extent */
963{
964 ASSERT(idx >= 0);
Eric Sandeenf380ee72017-01-09 16:38:36 +0100965 ASSERT(idx < xfs_iext_count(ifp));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000966
967 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
968 return ifp->if_u1.if_ext_irec->er_extbuf;
969 } else if (ifp->if_flags & XFS_IFEXTIREC) {
970 xfs_ext_irec_t *erp; /* irec pointer */
971 int erp_idx = 0; /* irec index */
972 xfs_extnum_t page_idx = idx; /* ext index in target list */
973
974 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
975 return &erp->er_extbuf[page_idx];
976 } else if (ifp->if_bytes) {
977 return &ifp->if_u1.if_extents[idx];
978 } else {
979 return NULL;
980 }
981}
982
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700983/* Convert bmap state flags to an inode fork. */
984struct xfs_ifork *
985xfs_iext_state_to_fork(
986 struct xfs_inode *ip,
987 int state)
988{
989 if (state & BMAP_COWFORK)
990 return ip->i_cowfp;
991 else if (state & BMAP_ATTRFORK)
992 return ip->i_afp;
993 return &ip->i_df;
994}
995
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000996/*
997 * Insert new item(s) into the extent records for incore inode
998 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
999 */
1000void
1001xfs_iext_insert(
1002 xfs_inode_t *ip, /* incore inode pointer */
1003 xfs_extnum_t idx, /* starting index of new items */
1004 xfs_extnum_t count, /* number of inserted items */
1005 xfs_bmbt_irec_t *new, /* items to insert */
1006 int state) /* type of extent conversion */
1007{
Darrick J. Wong3993bae2016-10-03 09:11:32 -07001008 xfs_ifork_t *ifp = xfs_iext_state_to_fork(ip, state);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001009 xfs_extnum_t i; /* extent record index */
1010
1011 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
1012
1013 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1014 xfs_iext_add(ifp, idx, count);
1015 for (i = idx; i < idx + count; i++, new++)
1016 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
1017}
1018
1019/*
1020 * This is called when the amount of space required for incore file
1021 * extents needs to be increased. The ext_diff parameter stores the
1022 * number of new extents being added and the idx parameter contains
1023 * the extent index where the new extents will be added. If the new
1024 * extents are being appended, then we just need to (re)allocate and
1025 * initialize the space. Otherwise, if the new extents are being
1026 * inserted into the middle of the existing entries, a bit more work
1027 * is required to make room for the new extents to be inserted. The
1028 * caller is responsible for filling in the new extent entries upon
1029 * return.
1030 */
1031void
1032xfs_iext_add(
1033 xfs_ifork_t *ifp, /* inode fork pointer */
1034 xfs_extnum_t idx, /* index to begin adding exts */
1035 int ext_diff) /* number of extents to add */
1036{
1037 int byte_diff; /* new bytes being added */
1038 int new_size; /* size of extents after adding */
1039 xfs_extnum_t nextents; /* number of extents in file */
1040
Eric Sandeenf380ee72017-01-09 16:38:36 +01001041 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001042 ASSERT((idx >= 0) && (idx <= nextents));
1043 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
1044 new_size = ifp->if_bytes + byte_diff;
1045 /*
1046 * If the new number of extents (nextents + ext_diff)
1047 * fits inside the inode, then continue to use the inline
1048 * extent buffer.
1049 */
1050 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
1051 if (idx < nextents) {
1052 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
1053 &ifp->if_u2.if_inline_ext[idx],
1054 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
1055 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
1056 }
1057 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
1058 ifp->if_real_bytes = 0;
1059 }
1060 /*
1061 * Otherwise use a linear (direct) extent list.
1062 * If the extents are currently inside the inode,
1063 * xfs_iext_realloc_direct will switch us from
1064 * inline to direct extent allocation mode.
1065 */
1066 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
1067 xfs_iext_realloc_direct(ifp, new_size);
1068 if (idx < nextents) {
1069 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
1070 &ifp->if_u1.if_extents[idx],
1071 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
1072 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
1073 }
1074 }
1075 /* Indirection array */
1076 else {
1077 xfs_ext_irec_t *erp;
1078 int erp_idx = 0;
1079 int page_idx = idx;
1080
1081 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
1082 if (ifp->if_flags & XFS_IFEXTIREC) {
1083 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
1084 } else {
1085 xfs_iext_irec_init(ifp);
1086 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1087 erp = ifp->if_u1.if_ext_irec;
1088 }
1089 /* Extents fit in target extent page */
1090 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
1091 if (page_idx < erp->er_extcount) {
1092 memmove(&erp->er_extbuf[page_idx + ext_diff],
1093 &erp->er_extbuf[page_idx],
1094 (erp->er_extcount - page_idx) *
1095 sizeof(xfs_bmbt_rec_t));
1096 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
1097 }
1098 erp->er_extcount += ext_diff;
1099 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
1100 }
1101 /* Insert a new extent page */
1102 else if (erp) {
1103 xfs_iext_add_indirect_multi(ifp,
1104 erp_idx, page_idx, ext_diff);
1105 }
1106 /*
1107 * If extent(s) are being appended to the last page in
1108 * the indirection array and the new extent(s) don't fit
1109 * in the page, then erp is NULL and erp_idx is set to
1110 * the next index needed in the indirection array.
1111 */
1112 else {
Jie Liubb86d212013-10-25 14:52:44 +08001113 uint count = ext_diff;
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001114
1115 while (count) {
1116 erp = xfs_iext_irec_new(ifp, erp_idx);
Jie Liubb86d212013-10-25 14:52:44 +08001117 erp->er_extcount = min(count, XFS_LINEAR_EXTS);
1118 count -= erp->er_extcount;
1119 if (count)
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001120 erp_idx++;
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001121 }
1122 }
1123 }
1124 ifp->if_bytes = new_size;
1125}
1126
1127/*
1128 * This is called when incore extents are being added to the indirection
1129 * array and the new extents do not fit in the target extent list. The
1130 * erp_idx parameter contains the irec index for the target extent list
1131 * in the indirection array, and the idx parameter contains the extent
1132 * index within the list. The number of extents being added is stored
1133 * in the count parameter.
1134 *
1135 * |-------| |-------|
1136 * | | | | idx - number of extents before idx
1137 * | idx | | count |
1138 * | | | | count - number of extents being inserted at idx
1139 * |-------| |-------|
1140 * | count | | nex2 | nex2 - number of extents after idx + count
1141 * |-------| |-------|
1142 */
1143void
1144xfs_iext_add_indirect_multi(
1145 xfs_ifork_t *ifp, /* inode fork pointer */
1146 int erp_idx, /* target extent irec index */
1147 xfs_extnum_t idx, /* index within target list */
1148 int count) /* new extents being added */
1149{
1150 int byte_diff; /* new bytes being added */
1151 xfs_ext_irec_t *erp; /* pointer to irec entry */
1152 xfs_extnum_t ext_diff; /* number of extents to add */
1153 xfs_extnum_t ext_cnt; /* new extents still needed */
1154 xfs_extnum_t nex2; /* extents after idx + count */
1155 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
1156 int nlists; /* number of irec's (lists) */
1157
1158 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1159 erp = &ifp->if_u1.if_ext_irec[erp_idx];
1160 nex2 = erp->er_extcount - idx;
1161 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1162
1163 /*
1164 * Save second part of target extent list
1165 * (all extents past */
1166 if (nex2) {
1167 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
1168 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
1169 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
1170 erp->er_extcount -= nex2;
1171 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
1172 memset(&erp->er_extbuf[idx], 0, byte_diff);
1173 }
1174
1175 /*
1176 * Add the new extents to the end of the target
1177 * list, then allocate new irec record(s) and
1178 * extent buffer(s) as needed to store the rest
1179 * of the new extents.
1180 */
1181 ext_cnt = count;
1182 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
1183 if (ext_diff) {
1184 erp->er_extcount += ext_diff;
1185 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
1186 ext_cnt -= ext_diff;
1187 }
1188 while (ext_cnt) {
1189 erp_idx++;
1190 erp = xfs_iext_irec_new(ifp, erp_idx);
1191 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
1192 erp->er_extcount = ext_diff;
1193 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
1194 ext_cnt -= ext_diff;
1195 }
1196
1197 /* Add nex2 extents back to indirection array */
1198 if (nex2) {
1199 xfs_extnum_t ext_avail;
1200 int i;
1201
1202 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
1203 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
1204 i = 0;
1205 /*
1206 * If nex2 extents fit in the current page, append
1207 * nex2_ep after the new extents.
1208 */
1209 if (nex2 <= ext_avail) {
1210 i = erp->er_extcount;
1211 }
1212 /*
1213 * Otherwise, check if space is available in the
1214 * next page.
1215 */
1216 else if ((erp_idx < nlists - 1) &&
1217 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
1218 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
1219 erp_idx++;
1220 erp++;
1221 /* Create a hole for nex2 extents */
1222 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
1223 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
1224 }
1225 /*
1226 * Final choice, create a new extent page for
1227 * nex2 extents.
1228 */
1229 else {
1230 erp_idx++;
1231 erp = xfs_iext_irec_new(ifp, erp_idx);
1232 }
1233 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
1234 kmem_free(nex2_ep);
1235 erp->er_extcount += nex2;
1236 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
1237 }
1238}
1239
1240/*
1241 * This is called when the amount of space required for incore file
1242 * extents needs to be decreased. The ext_diff parameter stores the
1243 * number of extents to be removed and the idx parameter contains
1244 * the extent index where the extents will be removed from.
1245 *
1246 * If the amount of space needed has decreased below the linear
1247 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
1248 * extent array. Otherwise, use kmem_realloc() to adjust the
1249 * size to what is needed.
1250 */
1251void
1252xfs_iext_remove(
1253 xfs_inode_t *ip, /* incore inode pointer */
1254 xfs_extnum_t idx, /* index to begin removing exts */
1255 int ext_diff, /* number of extents to remove */
1256 int state) /* type of extent conversion */
1257{
Darrick J. Wong3993bae2016-10-03 09:11:32 -07001258 xfs_ifork_t *ifp = xfs_iext_state_to_fork(ip, state);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001259 xfs_extnum_t nextents; /* number of extents in file */
1260 int new_size; /* size of extents after removal */
1261
1262 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
1263
1264 ASSERT(ext_diff > 0);
Eric Sandeenf380ee72017-01-09 16:38:36 +01001265 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001266 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
1267
1268 if (new_size == 0) {
1269 xfs_iext_destroy(ifp);
1270 } else if (ifp->if_flags & XFS_IFEXTIREC) {
1271 xfs_iext_remove_indirect(ifp, idx, ext_diff);
1272 } else if (ifp->if_real_bytes) {
1273 xfs_iext_remove_direct(ifp, idx, ext_diff);
1274 } else {
1275 xfs_iext_remove_inline(ifp, idx, ext_diff);
1276 }
1277 ifp->if_bytes = new_size;
1278}
1279
1280/*
1281 * This removes ext_diff extents from the inline buffer, beginning
1282 * at extent index idx.
1283 */
1284void
1285xfs_iext_remove_inline(
1286 xfs_ifork_t *ifp, /* inode fork pointer */
1287 xfs_extnum_t idx, /* index to begin removing exts */
1288 int ext_diff) /* number of extents to remove */
1289{
1290 int nextents; /* number of extents in file */
1291
1292 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
1293 ASSERT(idx < XFS_INLINE_EXTS);
Eric Sandeenf380ee72017-01-09 16:38:36 +01001294 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001295 ASSERT(((nextents - ext_diff) > 0) &&
1296 (nextents - ext_diff) < XFS_INLINE_EXTS);
1297
1298 if (idx + ext_diff < nextents) {
1299 memmove(&ifp->if_u2.if_inline_ext[idx],
1300 &ifp->if_u2.if_inline_ext[idx + ext_diff],
1301 (nextents - (idx + ext_diff)) *
1302 sizeof(xfs_bmbt_rec_t));
1303 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
1304 0, ext_diff * sizeof(xfs_bmbt_rec_t));
1305 } else {
1306 memset(&ifp->if_u2.if_inline_ext[idx], 0,
1307 ext_diff * sizeof(xfs_bmbt_rec_t));
1308 }
1309}
1310
1311/*
1312 * This removes ext_diff extents from a linear (direct) extent list,
1313 * beginning at extent index idx. If the extents are being removed
1314 * from the end of the list (ie. truncate) then we just need to re-
1315 * allocate the list to remove the extra space. Otherwise, if the
1316 * extents are being removed from the middle of the existing extent
1317 * entries, then we first need to move the extent records beginning
1318 * at idx + ext_diff up in the list to overwrite the records being
1319 * removed, then remove the extra space via kmem_realloc.
1320 */
1321void
1322xfs_iext_remove_direct(
1323 xfs_ifork_t *ifp, /* inode fork pointer */
1324 xfs_extnum_t idx, /* index to begin removing exts */
1325 int ext_diff) /* number of extents to remove */
1326{
1327 xfs_extnum_t nextents; /* number of extents in file */
1328 int new_size; /* size of extents after removal */
1329
1330 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
1331 new_size = ifp->if_bytes -
1332 (ext_diff * sizeof(xfs_bmbt_rec_t));
Eric Sandeenf380ee72017-01-09 16:38:36 +01001333 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001334
1335 if (new_size == 0) {
1336 xfs_iext_destroy(ifp);
1337 return;
1338 }
1339 /* Move extents up in the list (if needed) */
1340 if (idx + ext_diff < nextents) {
1341 memmove(&ifp->if_u1.if_extents[idx],
1342 &ifp->if_u1.if_extents[idx + ext_diff],
1343 (nextents - (idx + ext_diff)) *
1344 sizeof(xfs_bmbt_rec_t));
1345 }
1346 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
1347 0, ext_diff * sizeof(xfs_bmbt_rec_t));
1348 /*
1349 * Reallocate the direct extent list. If the extents
1350 * will fit inside the inode then xfs_iext_realloc_direct
1351 * will switch from direct to inline extent allocation
1352 * mode for us.
1353 */
1354 xfs_iext_realloc_direct(ifp, new_size);
1355 ifp->if_bytes = new_size;
1356}
1357
1358/*
1359 * This is called when incore extents are being removed from the
1360 * indirection array and the extents being removed span multiple extent
1361 * buffers. The idx parameter contains the file extent index where we
1362 * want to begin removing extents, and the count parameter contains
1363 * how many extents need to be removed.
1364 *
1365 * |-------| |-------|
1366 * | nex1 | | | nex1 - number of extents before idx
1367 * |-------| | count |
1368 * | | | | count - number of extents being removed at idx
1369 * | count | |-------|
1370 * | | | nex2 | nex2 - number of extents after idx + count
1371 * |-------| |-------|
1372 */
1373void
1374xfs_iext_remove_indirect(
1375 xfs_ifork_t *ifp, /* inode fork pointer */
1376 xfs_extnum_t idx, /* index to begin removing extents */
1377 int count) /* number of extents to remove */
1378{
1379 xfs_ext_irec_t *erp; /* indirection array pointer */
1380 int erp_idx = 0; /* indirection array index */
1381 xfs_extnum_t ext_cnt; /* extents left to remove */
1382 xfs_extnum_t ext_diff; /* extents to remove in current list */
1383 xfs_extnum_t nex1; /* number of extents before idx */
1384 xfs_extnum_t nex2; /* extents after idx + count */
1385 int page_idx = idx; /* index in target extent list */
1386
1387 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1388 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
1389 ASSERT(erp != NULL);
1390 nex1 = page_idx;
1391 ext_cnt = count;
1392 while (ext_cnt) {
1393 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
1394 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
1395 /*
1396 * Check for deletion of entire list;
1397 * xfs_iext_irec_remove() updates extent offsets.
1398 */
1399 if (ext_diff == erp->er_extcount) {
1400 xfs_iext_irec_remove(ifp, erp_idx);
1401 ext_cnt -= ext_diff;
1402 nex1 = 0;
1403 if (ext_cnt) {
1404 ASSERT(erp_idx < ifp->if_real_bytes /
1405 XFS_IEXT_BUFSZ);
1406 erp = &ifp->if_u1.if_ext_irec[erp_idx];
1407 nex1 = 0;
1408 continue;
1409 } else {
1410 break;
1411 }
1412 }
1413 /* Move extents up (if needed) */
1414 if (nex2) {
1415 memmove(&erp->er_extbuf[nex1],
1416 &erp->er_extbuf[nex1 + ext_diff],
1417 nex2 * sizeof(xfs_bmbt_rec_t));
1418 }
1419 /* Zero out rest of page */
1420 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
1421 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
1422 /* Update remaining counters */
1423 erp->er_extcount -= ext_diff;
1424 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
1425 ext_cnt -= ext_diff;
1426 nex1 = 0;
1427 erp_idx++;
1428 erp++;
1429 }
1430 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
1431 xfs_iext_irec_compact(ifp);
1432}
1433
1434/*
1435 * Create, destroy, or resize a linear (direct) block of extents.
1436 */
1437void
1438xfs_iext_realloc_direct(
1439 xfs_ifork_t *ifp, /* inode fork pointer */
Jie Liu17ec81c2013-09-22 16:25:15 +08001440 int new_size) /* new size of extents after adding */
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001441{
1442 int rnew_size; /* real new size of extents */
1443
1444 rnew_size = new_size;
1445
1446 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
1447 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
1448 (new_size != ifp->if_real_bytes)));
1449
1450 /* Free extent records */
1451 if (new_size == 0) {
1452 xfs_iext_destroy(ifp);
1453 }
1454 /* Resize direct extent list and zero any new bytes */
1455 else if (ifp->if_real_bytes) {
1456 /* Check if extents will fit inside the inode */
1457 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
1458 xfs_iext_direct_to_inline(ifp, new_size /
1459 (uint)sizeof(xfs_bmbt_rec_t));
1460 ifp->if_bytes = new_size;
1461 return;
1462 }
1463 if (!is_power_of_2(new_size)){
1464 rnew_size = roundup_pow_of_two(new_size);
1465 }
1466 if (rnew_size != ifp->if_real_bytes) {
1467 ifp->if_u1.if_extents =
1468 kmem_realloc(ifp->if_u1.if_extents,
Christoph Hellwig664b60f2016-04-06 09:47:01 +10001469 rnew_size, KM_NOFS);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001470 }
1471 if (rnew_size > ifp->if_real_bytes) {
1472 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
1473 (uint)sizeof(xfs_bmbt_rec_t)], 0,
1474 rnew_size - ifp->if_real_bytes);
1475 }
1476 }
Jie Liu17ec81c2013-09-22 16:25:15 +08001477 /* Switch from the inline extent buffer to a direct extent list */
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001478 else {
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001479 if (!is_power_of_2(new_size)) {
1480 rnew_size = roundup_pow_of_two(new_size);
1481 }
1482 xfs_iext_inline_to_direct(ifp, rnew_size);
1483 }
1484 ifp->if_real_bytes = rnew_size;
1485 ifp->if_bytes = new_size;
1486}
1487
1488/*
1489 * Switch from linear (direct) extent records to inline buffer.
1490 */
1491void
1492xfs_iext_direct_to_inline(
1493 xfs_ifork_t *ifp, /* inode fork pointer */
1494 xfs_extnum_t nextents) /* number of extents in file */
1495{
1496 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1497 ASSERT(nextents <= XFS_INLINE_EXTS);
1498 /*
1499 * The inline buffer was zeroed when we switched
1500 * from inline to direct extent allocation mode,
1501 * so we don't need to clear it here.
1502 */
1503 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
1504 nextents * sizeof(xfs_bmbt_rec_t));
1505 kmem_free(ifp->if_u1.if_extents);
1506 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
1507 ifp->if_real_bytes = 0;
1508}
1509
1510/*
1511 * Switch from inline buffer to linear (direct) extent records.
1512 * new_size should already be rounded up to the next power of 2
1513 * by the caller (when appropriate), so use new_size as it is.
1514 * However, since new_size may be rounded up, we can't update
1515 * if_bytes here. It is the caller's responsibility to update
1516 * if_bytes upon return.
1517 */
1518void
1519xfs_iext_inline_to_direct(
1520 xfs_ifork_t *ifp, /* inode fork pointer */
1521 int new_size) /* number of extents in file */
1522{
1523 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
1524 memset(ifp->if_u1.if_extents, 0, new_size);
1525 if (ifp->if_bytes) {
1526 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
1527 ifp->if_bytes);
1528 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
1529 sizeof(xfs_bmbt_rec_t));
1530 }
1531 ifp->if_real_bytes = new_size;
1532}
1533
1534/*
1535 * Resize an extent indirection array to new_size bytes.
1536 */
1537STATIC void
1538xfs_iext_realloc_indirect(
1539 xfs_ifork_t *ifp, /* inode fork pointer */
1540 int new_size) /* new indirection array size */
1541{
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001542 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001543 ASSERT(ifp->if_real_bytes);
Darrick J. Wongae04a8c2017-08-31 15:11:06 -07001544 ASSERT((new_size >= 0) &&
1545 (new_size != ((ifp->if_real_bytes / XFS_IEXT_BUFSZ) *
1546 sizeof(xfs_ext_irec_t))));
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001547 if (new_size == 0) {
1548 xfs_iext_destroy(ifp);
1549 } else {
Christoph Hellwig664b60f2016-04-06 09:47:01 +10001550 ifp->if_u1.if_ext_irec =
1551 kmem_realloc(ifp->if_u1.if_ext_irec, new_size, KM_NOFS);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001552 }
1553}
1554
1555/*
1556 * Switch from indirection array to linear (direct) extent allocations.
1557 */
1558STATIC void
1559xfs_iext_indirect_to_direct(
1560 xfs_ifork_t *ifp) /* inode fork pointer */
1561{
1562 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1563 xfs_extnum_t nextents; /* number of extents in file */
1564 int size; /* size of file extents */
1565
1566 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
Eric Sandeenf380ee72017-01-09 16:38:36 +01001567 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001568 ASSERT(nextents <= XFS_LINEAR_EXTS);
1569 size = nextents * sizeof(xfs_bmbt_rec_t);
1570
1571 xfs_iext_irec_compact_pages(ifp);
1572 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
1573
1574 ep = ifp->if_u1.if_ext_irec->er_extbuf;
1575 kmem_free(ifp->if_u1.if_ext_irec);
1576 ifp->if_flags &= ~XFS_IFEXTIREC;
1577 ifp->if_u1.if_extents = ep;
1578 ifp->if_bytes = size;
1579 if (nextents < XFS_LINEAR_EXTS) {
1580 xfs_iext_realloc_direct(ifp, size);
1581 }
1582}
1583
1584/*
Alex Lyakas32b43ab2016-05-18 14:01:52 +10001585 * Remove all records from the indirection array.
1586 */
1587STATIC void
1588xfs_iext_irec_remove_all(
1589 struct xfs_ifork *ifp)
1590{
1591 int nlists;
1592 int i;
1593
1594 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1595 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1596 for (i = 0; i < nlists; i++)
1597 kmem_free(ifp->if_u1.if_ext_irec[i].er_extbuf);
1598 kmem_free(ifp->if_u1.if_ext_irec);
1599 ifp->if_flags &= ~XFS_IFEXTIREC;
1600}
1601
1602/*
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001603 * Free incore file extents.
1604 */
1605void
1606xfs_iext_destroy(
1607 xfs_ifork_t *ifp) /* inode fork pointer */
1608{
1609 if (ifp->if_flags & XFS_IFEXTIREC) {
Alex Lyakas32b43ab2016-05-18 14:01:52 +10001610 xfs_iext_irec_remove_all(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001611 } else if (ifp->if_real_bytes) {
1612 kmem_free(ifp->if_u1.if_extents);
1613 } else if (ifp->if_bytes) {
1614 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
1615 sizeof(xfs_bmbt_rec_t));
1616 }
1617 ifp->if_u1.if_extents = NULL;
1618 ifp->if_real_bytes = 0;
1619 ifp->if_bytes = 0;
1620}
1621
1622/*
1623 * Return a pointer to the extent record for file system block bno.
1624 */
1625xfs_bmbt_rec_host_t * /* pointer to found extent record */
1626xfs_iext_bno_to_ext(
1627 xfs_ifork_t *ifp, /* inode fork pointer */
1628 xfs_fileoff_t bno, /* block number to search for */
1629 xfs_extnum_t *idxp) /* index of target extent */
1630{
1631 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
1632 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
1633 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
1634 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
1635 int high; /* upper boundary in search */
1636 xfs_extnum_t idx = 0; /* index of target extent */
1637 int low; /* lower boundary in search */
1638 xfs_extnum_t nextents; /* number of file extents */
1639 xfs_fileoff_t startoff = 0; /* start offset of extent */
1640
Eric Sandeenf380ee72017-01-09 16:38:36 +01001641 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001642 if (nextents == 0) {
1643 *idxp = 0;
1644 return NULL;
1645 }
1646 low = 0;
1647 if (ifp->if_flags & XFS_IFEXTIREC) {
1648 /* Find target extent list */
1649 int erp_idx = 0;
1650 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
1651 base = erp->er_extbuf;
1652 high = erp->er_extcount - 1;
1653 } else {
1654 base = ifp->if_u1.if_extents;
1655 high = nextents - 1;
1656 }
1657 /* Binary search extent records */
1658 while (low <= high) {
1659 idx = (low + high) >> 1;
1660 ep = base + idx;
1661 startoff = xfs_bmbt_get_startoff(ep);
1662 blockcount = xfs_bmbt_get_blockcount(ep);
1663 if (bno < startoff) {
1664 high = idx - 1;
1665 } else if (bno >= startoff + blockcount) {
1666 low = idx + 1;
1667 } else {
1668 /* Convert back to file-based extent index */
1669 if (ifp->if_flags & XFS_IFEXTIREC) {
1670 idx += erp->er_extoff;
1671 }
1672 *idxp = idx;
1673 return ep;
1674 }
1675 }
1676 /* Convert back to file-based extent index */
1677 if (ifp->if_flags & XFS_IFEXTIREC) {
1678 idx += erp->er_extoff;
1679 }
1680 if (bno >= startoff + blockcount) {
1681 if (++idx == nextents) {
1682 ep = NULL;
1683 } else {
1684 ep = xfs_iext_get_ext(ifp, idx);
1685 }
1686 }
1687 *idxp = idx;
1688 return ep;
1689}
1690
1691/*
1692 * Return a pointer to the indirection array entry containing the
1693 * extent record for filesystem block bno. Store the index of the
1694 * target irec in *erp_idxp.
1695 */
1696xfs_ext_irec_t * /* pointer to found extent record */
1697xfs_iext_bno_to_irec(
1698 xfs_ifork_t *ifp, /* inode fork pointer */
1699 xfs_fileoff_t bno, /* block number to search for */
1700 int *erp_idxp) /* irec index of target ext list */
1701{
1702 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
1703 xfs_ext_irec_t *erp_next; /* next indirection array entry */
1704 int erp_idx; /* indirection array index */
1705 int nlists; /* number of extent irec's (lists) */
1706 int high; /* binary search upper limit */
1707 int low; /* binary search lower limit */
1708
1709 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1710 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1711 erp_idx = 0;
1712 low = 0;
1713 high = nlists - 1;
1714 while (low <= high) {
1715 erp_idx = (low + high) >> 1;
1716 erp = &ifp->if_u1.if_ext_irec[erp_idx];
1717 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
1718 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
1719 high = erp_idx - 1;
1720 } else if (erp_next && bno >=
1721 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
1722 low = erp_idx + 1;
1723 } else {
1724 break;
1725 }
1726 }
1727 *erp_idxp = erp_idx;
1728 return erp;
1729}
1730
1731/*
1732 * Return a pointer to the indirection array entry containing the
1733 * extent record at file extent index *idxp. Store the index of the
1734 * target irec in *erp_idxp and store the page index of the target
1735 * extent record in *idxp.
1736 */
1737xfs_ext_irec_t *
1738xfs_iext_idx_to_irec(
1739 xfs_ifork_t *ifp, /* inode fork pointer */
1740 xfs_extnum_t *idxp, /* extent index (file -> page) */
1741 int *erp_idxp, /* pointer to target irec */
1742 int realloc) /* new bytes were just added */
1743{
1744 xfs_ext_irec_t *prev; /* pointer to previous irec */
1745 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
1746 int erp_idx; /* indirection array index */
1747 int nlists; /* number of irec's (ex lists) */
1748 int high; /* binary search upper limit */
1749 int low; /* binary search lower limit */
1750 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
1751
1752 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1753 ASSERT(page_idx >= 0);
Eric Sandeenf380ee72017-01-09 16:38:36 +01001754 ASSERT(page_idx <= xfs_iext_count(ifp));
1755 ASSERT(page_idx < xfs_iext_count(ifp) || realloc);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001756
1757 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1758 erp_idx = 0;
1759 low = 0;
1760 high = nlists - 1;
1761
1762 /* Binary search extent irec's */
1763 while (low <= high) {
1764 erp_idx = (low + high) >> 1;
1765 erp = &ifp->if_u1.if_ext_irec[erp_idx];
1766 prev = erp_idx > 0 ? erp - 1 : NULL;
1767 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
1768 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
1769 high = erp_idx - 1;
1770 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
1771 (page_idx == erp->er_extoff + erp->er_extcount &&
1772 !realloc)) {
1773 low = erp_idx + 1;
1774 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
1775 erp->er_extcount == XFS_LINEAR_EXTS) {
1776 ASSERT(realloc);
1777 page_idx = 0;
1778 erp_idx++;
1779 erp = erp_idx < nlists ? erp + 1 : NULL;
1780 break;
1781 } else {
1782 page_idx -= erp->er_extoff;
1783 break;
1784 }
1785 }
1786 *idxp = page_idx;
1787 *erp_idxp = erp_idx;
Eric Sandeend99831f2014-06-22 15:03:54 +10001788 return erp;
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001789}
1790
1791/*
1792 * Allocate and initialize an indirection array once the space needed
1793 * for incore extents increases above XFS_IEXT_BUFSZ.
1794 */
1795void
1796xfs_iext_irec_init(
1797 xfs_ifork_t *ifp) /* inode fork pointer */
1798{
1799 xfs_ext_irec_t *erp; /* indirection array pointer */
1800 xfs_extnum_t nextents; /* number of extents in file */
1801
1802 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
Eric Sandeenf380ee72017-01-09 16:38:36 +01001803 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001804 ASSERT(nextents <= XFS_LINEAR_EXTS);
1805
1806 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
1807
1808 if (nextents == 0) {
1809 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
1810 } else if (!ifp->if_real_bytes) {
1811 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
1812 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
1813 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
1814 }
1815 erp->er_extbuf = ifp->if_u1.if_extents;
1816 erp->er_extcount = nextents;
1817 erp->er_extoff = 0;
1818
1819 ifp->if_flags |= XFS_IFEXTIREC;
1820 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
1821 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
1822 ifp->if_u1.if_ext_irec = erp;
1823
1824 return;
1825}
1826
1827/*
1828 * Allocate and initialize a new entry in the indirection array.
1829 */
1830xfs_ext_irec_t *
1831xfs_iext_irec_new(
1832 xfs_ifork_t *ifp, /* inode fork pointer */
1833 int erp_idx) /* index for new irec */
1834{
1835 xfs_ext_irec_t *erp; /* indirection array pointer */
1836 int i; /* loop counter */
1837 int nlists; /* number of irec's (ex lists) */
1838
1839 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1840 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1841
1842 /* Resize indirection array */
1843 xfs_iext_realloc_indirect(ifp, ++nlists *
1844 sizeof(xfs_ext_irec_t));
1845 /*
1846 * Move records down in the array so the
1847 * new page can use erp_idx.
1848 */
1849 erp = ifp->if_u1.if_ext_irec;
1850 for (i = nlists - 1; i > erp_idx; i--) {
1851 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
1852 }
1853 ASSERT(i == erp_idx);
1854
1855 /* Initialize new extent record */
1856 erp = ifp->if_u1.if_ext_irec;
1857 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
1858 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
1859 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
1860 erp[erp_idx].er_extcount = 0;
1861 erp[erp_idx].er_extoff = erp_idx > 0 ?
1862 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
1863 return (&erp[erp_idx]);
1864}
1865
1866/*
1867 * Remove a record from the indirection array.
1868 */
1869void
1870xfs_iext_irec_remove(
1871 xfs_ifork_t *ifp, /* inode fork pointer */
1872 int erp_idx) /* irec index to remove */
1873{
1874 xfs_ext_irec_t *erp; /* indirection array pointer */
1875 int i; /* loop counter */
1876 int nlists; /* number of irec's (ex lists) */
1877
1878 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1879 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1880 erp = &ifp->if_u1.if_ext_irec[erp_idx];
1881 if (erp->er_extbuf) {
1882 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
1883 -erp->er_extcount);
1884 kmem_free(erp->er_extbuf);
1885 }
1886 /* Compact extent records */
1887 erp = ifp->if_u1.if_ext_irec;
1888 for (i = erp_idx; i < nlists - 1; i++) {
1889 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
1890 }
1891 /*
1892 * Manually free the last extent record from the indirection
1893 * array. A call to xfs_iext_realloc_indirect() with a size
1894 * of zero would result in a call to xfs_iext_destroy() which
1895 * would in turn call this function again, creating a nasty
1896 * infinite loop.
1897 */
1898 if (--nlists) {
1899 xfs_iext_realloc_indirect(ifp,
1900 nlists * sizeof(xfs_ext_irec_t));
1901 } else {
1902 kmem_free(ifp->if_u1.if_ext_irec);
1903 }
1904 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
1905}
1906
1907/*
1908 * This is called to clean up large amounts of unused memory allocated
1909 * by the indirection array. Before compacting anything though, verify
1910 * that the indirection array is still needed and switch back to the
1911 * linear extent list (or even the inline buffer) if possible. The
1912 * compaction policy is as follows:
1913 *
1914 * Full Compaction: Extents fit into a single page (or inline buffer)
1915 * Partial Compaction: Extents occupy less than 50% of allocated space
1916 * No Compaction: Extents occupy at least 50% of allocated space
1917 */
1918void
1919xfs_iext_irec_compact(
1920 xfs_ifork_t *ifp) /* inode fork pointer */
1921{
1922 xfs_extnum_t nextents; /* number of extents in file */
1923 int nlists; /* number of irec's (ex lists) */
1924
1925 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1926 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
Eric Sandeenf380ee72017-01-09 16:38:36 +01001927 nextents = xfs_iext_count(ifp);
Dave Chinner5c4d97d2013-08-12 20:49:33 +10001928
1929 if (nextents == 0) {
1930 xfs_iext_destroy(ifp);
1931 } else if (nextents <= XFS_INLINE_EXTS) {
1932 xfs_iext_indirect_to_direct(ifp);
1933 xfs_iext_direct_to_inline(ifp, nextents);
1934 } else if (nextents <= XFS_LINEAR_EXTS) {
1935 xfs_iext_indirect_to_direct(ifp);
1936 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
1937 xfs_iext_irec_compact_pages(ifp);
1938 }
1939}
1940
1941/*
1942 * Combine extents from neighboring extent pages.
1943 */
1944void
1945xfs_iext_irec_compact_pages(
1946 xfs_ifork_t *ifp) /* inode fork pointer */
1947{
1948 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
1949 int erp_idx = 0; /* indirection array index */
1950 int nlists; /* number of irec's (ex lists) */
1951
1952 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1953 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1954 while (erp_idx < nlists - 1) {
1955 erp = &ifp->if_u1.if_ext_irec[erp_idx];
1956 erp_next = erp + 1;
1957 if (erp_next->er_extcount <=
1958 (XFS_LINEAR_EXTS - erp->er_extcount)) {
1959 memcpy(&erp->er_extbuf[erp->er_extcount],
1960 erp_next->er_extbuf, erp_next->er_extcount *
1961 sizeof(xfs_bmbt_rec_t));
1962 erp->er_extcount += erp_next->er_extcount;
1963 /*
1964 * Free page before removing extent record
1965 * so er_extoffs don't get modified in
1966 * xfs_iext_irec_remove.
1967 */
1968 kmem_free(erp_next->er_extbuf);
1969 erp_next->er_extbuf = NULL;
1970 xfs_iext_irec_remove(ifp, erp_idx + 1);
1971 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1972 } else {
1973 erp_idx++;
1974 }
1975 }
1976}
1977
1978/*
1979 * This is called to update the er_extoff field in the indirection
1980 * array when extents have been added or removed from one of the
1981 * extent lists. erp_idx contains the irec index to begin updating
1982 * at and ext_diff contains the number of extents that were added
1983 * or removed.
1984 */
1985void
1986xfs_iext_irec_update_extoffs(
1987 xfs_ifork_t *ifp, /* inode fork pointer */
1988 int erp_idx, /* irec index to update */
1989 int ext_diff) /* number of new extents */
1990{
1991 int i; /* loop counter */
1992 int nlists; /* number of irec's (ex lists */
1993
1994 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
1995 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
1996 for (i = erp_idx; i < nlists; i++) {
1997 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
1998 }
1999}
Darrick J. Wong3993bae2016-10-03 09:11:32 -07002000
2001/*
2002 * Initialize an inode's copy-on-write fork.
2003 */
2004void
2005xfs_ifork_init_cow(
2006 struct xfs_inode *ip)
2007{
2008 if (ip->i_cowfp)
2009 return;
2010
2011 ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone,
2012 KM_SLEEP | KM_NOFS);
2013 ip->i_cowfp->if_flags = XFS_IFEXTENTS;
2014 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
2015 ip->i_cnextents = 0;
2016}
Christoph Hellwig49dc1992017-01-09 16:38:39 +01002017
2018/*
2019 * Lookup the extent covering bno.
2020 *
2021 * If there is an extent covering bno return the extent index, and store the
2022 * expanded extent structure in *gotp, and the extent index in *idx.
2023 * If there is no extent covering bno, but there is an extent after it (e.g.
2024 * it lies in a hole) return that extent in *gotp and its index in *idx
2025 * instead.
2026 * If bno is beyond the last extent return false, and return the index after
2027 * the last valid index in *idxp.
2028 */
2029bool
2030xfs_iext_lookup_extent(
2031 struct xfs_inode *ip,
2032 struct xfs_ifork *ifp,
2033 xfs_fileoff_t bno,
2034 xfs_extnum_t *idxp,
2035 struct xfs_bmbt_irec *gotp)
2036{
2037 struct xfs_bmbt_rec_host *ep;
2038
2039 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
2040
2041 ep = xfs_iext_bno_to_ext(ifp, bno, idxp);
2042 if (!ep)
2043 return false;
2044 xfs_bmbt_get_all(ep, gotp);
2045 return true;
2046}
2047
2048/*
2049 * Return true if there is an extent at index idx, and return the expanded
2050 * extent structure at idx in that case. Else return false.
2051 */
2052bool
2053xfs_iext_get_extent(
2054 struct xfs_ifork *ifp,
2055 xfs_extnum_t idx,
2056 struct xfs_bmbt_irec *gotp)
2057{
2058 if (idx < 0 || idx >= xfs_iext_count(ifp))
2059 return false;
2060 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), gotp);
2061 return true;
2062}