blob: ac9d4aeedb093fc80fb28badf461b68d47914765 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Dave Chinner632b89e2013-10-29 22:11:58 +110020#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +100021#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110024#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100026#include "xfs_defer.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110028#include "xfs_trans.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_inode_item.h"
30#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_btree.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110032#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_bmap.h"
34#include "xfs_error.h"
35#include "xfs_quota.h"
Dave Chinner3d3e6f62012-11-12 22:54:08 +110036#include "xfs_trace.h"
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050037#include "xfs_cksum.h"
Darrick J. Wong340785c2016-08-03 11:33:42 +100038#include "xfs_rmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 * Convert on-disk form of btree root to in-memory form.
42 */
43void
44xfs_bmdr_to_bmbt(
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050045 struct xfs_inode *ip,
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 xfs_bmdr_block_t *dblock,
47 int dblocklen,
Christoph Hellwig7cc95a82008-10-30 17:14:34 +110048 struct xfs_btree_block *rblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 int rblocklen)
50{
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050051 struct xfs_mount *mp = ip->i_mount;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 int dmxr;
53 xfs_bmbt_key_t *fkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +100054 __be64 *fpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 xfs_bmbt_key_t *tkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +100056 __be64 *tpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Eric Sandeenb6f41e42017-01-27 23:16:39 -080058 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
59 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
Eric Sandeenf88ae462017-01-27 23:16:37 -080060 XFS_BTREE_LONG_PTRS);
Christoph Hellwig16259e72005-11-02 15:11:25 +110061 rblock->bb_level = dblock->bb_level;
62 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
63 rblock->bb_numrecs = dblock->bb_numrecs;
Eric Sandeen152d93b2014-04-14 18:58:51 +100064 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
Christoph Hellwig136341b2008-10-30 17:11:40 +110065 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
66 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
67 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
Christoph Hellwig60197e82008-10-30 17:11:19 +110068 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
Christoph Hellwig16259e72005-11-02 15:11:25 +110069 dmxr = be16_to_cpu(dblock->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
Christoph Hellwig576039c2006-09-28 10:58:06 +100071 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074void
Christoph Hellwig6bdcf262017-11-03 10:34:46 -070075xfs_bmbt_disk_get_all(
76 struct xfs_bmbt_rec *rec,
77 struct xfs_bmbt_irec *irec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Christoph Hellwig6bdcf262017-11-03 10:34:46 -070079 uint64_t l0 = get_unaligned_be64(&rec->l0);
80 uint64_t l1 = get_unaligned_be64(&rec->l1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Christoph Hellwig6bdcf262017-11-03 10:34:46 -070082 irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
83 irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
84 irec->br_blockcount = l1 & xfs_mask64lo(21);
85 if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
86 irec->br_state = XFS_EXT_UNWRITTEN;
87 else
88 irec->br_state = XFS_EXT_NORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091/*
92 * Extract the blockcount field from an on disk bmap extent record.
93 */
94xfs_filblks_t
95xfs_bmbt_disk_get_blockcount(
96 xfs_bmbt_rec_t *r)
97{
Eric Sandeenfb825572009-01-09 15:53:54 +110098 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 * Extract the startoff field from a disk format bmap extent record.
103 */
104xfs_fileoff_t
105xfs_bmbt_disk_get_startoff(
106 xfs_bmbt_rec_t *r)
107{
Christoph Hellwigcd8b0a92007-08-16 16:24:15 +1000108 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
Eric Sandeenfb825572009-01-09 15:53:54 +1100109 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Christoph Hellwig8cba4342007-08-16 16:23:53 +1000112/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * Set all the fields in a bmap extent record from the uncompressed form.
114 */
115void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116xfs_bmbt_disk_set_all(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700117 struct xfs_bmbt_rec *r,
118 struct xfs_bmbt_irec *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700120 int extent_flag = (s->br_state != XFS_EXT_NORM);
121
122 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
123 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
124 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
125 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
126
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700127 put_unaligned_be64(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700128 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
129 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700130 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
131 put_unaligned_be64(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700132 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
133 ((xfs_bmbt_rec_base_t)s->br_blockcount &
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700134 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 * Convert in-memory form of btree root to on-disk form.
139 */
140void
141xfs_bmbt_to_bmdr(
Christoph Hellwig60197e82008-10-30 17:11:19 +1100142 struct xfs_mount *mp,
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100143 struct xfs_btree_block *rblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 int rblocklen,
145 xfs_bmdr_block_t *dblock,
146 int dblocklen)
147{
148 int dmxr;
149 xfs_bmbt_key_t *fkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +1000150 __be64 *fpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 xfs_bmbt_key_t *tkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +1000152 __be64 *tpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500154 if (xfs_sb_version_hascrc(&mp->m_sb)) {
155 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
Eric Sandeence748ea2015-07-29 11:53:31 +1000156 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
157 &mp->m_sb.sb_meta_uuid));
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500158 ASSERT(rblock->bb_u.l.bb_blkno ==
159 cpu_to_be64(XFS_BUF_DADDR_NULL));
160 } else
161 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000162 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
163 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200164 ASSERT(rblock->bb_level != 0);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100165 dblock->bb_level = rblock->bb_level;
166 dblock->bb_numrecs = rblock->bb_numrecs;
Eric Sandeen152d93b2014-04-14 18:58:51 +1000167 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
Christoph Hellwig136341b2008-10-30 17:11:40 +1100168 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
169 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
Christoph Hellwig60197e82008-10-30 17:11:19 +1100170 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
Christoph Hellwig136341b2008-10-30 17:11:40 +1100171 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100172 dmxr = be16_to_cpu(dblock->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
Christoph Hellwig576039c2006-09-28 10:58:06 +1000174 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100177STATIC struct xfs_btree_cur *
178xfs_bmbt_dup_cursor(
179 struct xfs_btree_cur *cur)
180{
181 struct xfs_btree_cur *new;
182
183 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
184 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
185
186 /*
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000187 * Copy the firstblock, dfops, and flags values,
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100188 * since init cursor doesn't get them.
189 */
190 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000191 new->bc_private.b.dfops = cur->bc_private.b.dfops;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100192 new->bc_private.b.flags = cur->bc_private.b.flags;
193
194 return new;
195}
196
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100197STATIC void
198xfs_bmbt_update_cursor(
199 struct xfs_btree_cur *src,
200 struct xfs_btree_cur *dst)
201{
202 ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
203 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000204 ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100205
206 dst->bc_private.b.allocated += src->bc_private.b.allocated;
207 dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
208
209 src->bc_private.b.allocated = 0;
210}
211
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100212STATIC int
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100213xfs_bmbt_alloc_block(
214 struct xfs_btree_cur *cur,
215 union xfs_btree_ptr *start,
216 union xfs_btree_ptr *new,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100217 int *stat)
218{
219 xfs_alloc_arg_t args; /* block allocation args */
220 int error; /* error return value */
221
222 memset(&args, 0, sizeof(args));
223 args.tp = cur->bc_tp;
224 args.mp = cur->bc_mp;
225 args.fsbno = cur->bc_private.b.firstblock;
226 args.firstblock = args.fsbno;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000227 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
228 cur->bc_private.b.whichfork);
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100229
230 if (args.fsbno == NULLFSBLOCK) {
231 args.fsbno = be64_to_cpu(start->l);
232 args.type = XFS_ALLOCTYPE_START_BNO;
233 /*
234 * Make sure there is sufficient room left in the AG to
235 * complete a full tree split for an extent insert. If
236 * we are converting the middle part of an extent then
237 * we may need space for two tree splits.
238 *
239 * We are relying on the caller to make the correct block
240 * reservation for this operation to succeed. If the
241 * reservation amount is insufficient then we may fail a
242 * block allocation here and corrupt the filesystem.
243 */
Christoph Hellwiga7e5d032016-03-02 09:58:21 +1100244 args.minleft = args.tp->t_blk_res;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000245 } else if (cur->bc_private.b.dfops->dop_low) {
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100246 args.type = XFS_ALLOCTYPE_START_BNO;
247 } else {
248 args.type = XFS_ALLOCTYPE_NEAR_BNO;
249 }
250
251 args.minlen = args.maxlen = args.prod = 1;
252 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
Christoph Hellwiga7e5d032016-03-02 09:58:21 +1100253 if (!args.wasdel && args.tp->t_blk_res == 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000254 error = -ENOSPC;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100255 goto error0;
256 }
257 error = xfs_alloc_vextent(&args);
258 if (error)
259 goto error0;
260
261 if (args.fsbno == NULLFSBLOCK && args.minleft) {
262 /*
263 * Could not find an AG with enough free space to satisfy
Christoph Hellwig255c5162017-01-09 13:36:19 -0800264 * a full btree split. Try again and if
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100265 * successful activate the lowspace algorithm.
266 */
267 args.fsbno = 0;
268 args.type = XFS_ALLOCTYPE_FIRST_AG;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100269 error = xfs_alloc_vextent(&args);
270 if (error)
271 goto error0;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000272 cur->bc_private.b.dfops->dop_low = true;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100273 }
Christoph Hellwig2fcc3192017-03-08 10:38:53 -0800274 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100275 *stat = 0;
276 return 0;
277 }
Carlos Maiolinoe157ebd2018-03-06 17:03:30 -0800278
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100279 ASSERT(args.len == 1);
280 cur->bc_private.b.firstblock = args.fsbno;
281 cur->bc_private.b.allocated++;
282 cur->bc_private.b.ip->i_d.di_nblocks++;
283 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200284 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100285 XFS_TRANS_DQ_BCOUNT, 1L);
286
287 new->l = cpu_to_be64(args.fsbno);
288
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100289 *stat = 1;
290 return 0;
291
292 error0:
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100293 return error;
294}
295
296STATIC int
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100297xfs_bmbt_free_block(
298 struct xfs_btree_cur *cur,
299 struct xfs_buf *bp)
300{
301 struct xfs_mount *mp = cur->bc_mp;
302 struct xfs_inode *ip = cur->bc_private.b.ip;
303 struct xfs_trans *tp = cur->bc_tp;
304 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
Darrick J. Wong340785c2016-08-03 11:33:42 +1000305 struct xfs_owner_info oinfo;
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100306
Darrick J. Wong340785c2016-08-03 11:33:42 +1000307 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
308 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100309 ip->i_d.di_nblocks--;
310
311 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200312 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100313 return 0;
314}
315
316STATIC int
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100317xfs_bmbt_get_minrecs(
318 struct xfs_btree_cur *cur,
319 int level)
320{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100321 if (level == cur->bc_nlevels - 1) {
322 struct xfs_ifork *ifp;
323
324 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
325 cur->bc_private.b.whichfork);
326
327 return xfs_bmbt_maxrecs(cur->bc_mp,
328 ifp->if_broot_bytes, level == 0) / 2;
329 }
330
331 return cur->bc_mp->m_bmap_dmnr[level != 0];
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100332}
333
Christoph Hellwig60197e82008-10-30 17:11:19 +1100334int
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100335xfs_bmbt_get_maxrecs(
336 struct xfs_btree_cur *cur,
337 int level)
338{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100339 if (level == cur->bc_nlevels - 1) {
340 struct xfs_ifork *ifp;
341
342 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
343 cur->bc_private.b.whichfork);
344
345 return xfs_bmbt_maxrecs(cur->bc_mp,
346 ifp->if_broot_bytes, level == 0);
347 }
348
349 return cur->bc_mp->m_bmap_dmxr[level != 0];
350
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100351}
352
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100353/*
354 * Get the maximum records we could store in the on-disk format.
355 *
356 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
357 * for the root node this checks the available space in the dinode fork
358 * so that we can resize the in-memory buffer to match it. After a
359 * resize to the maximum size this function returns the same value
360 * as xfs_bmbt_get_maxrecs for the root node, too.
361 */
362STATIC int
363xfs_bmbt_get_dmaxrecs(
364 struct xfs_btree_cur *cur,
365 int level)
366{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100367 if (level != cur->bc_nlevels - 1)
368 return cur->bc_mp->m_bmap_dmxr[level != 0];
Eric Sandeen152d93b2014-04-14 18:58:51 +1000369 return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100370}
371
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100372STATIC void
373xfs_bmbt_init_key_from_rec(
374 union xfs_btree_key *key,
375 union xfs_btree_rec *rec)
376{
377 key->bmbt.br_startoff =
378 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
379}
380
381STATIC void
Darrick J. Wong118bb472017-06-16 11:00:08 -0700382xfs_bmbt_init_high_key_from_rec(
383 union xfs_btree_key *key,
384 union xfs_btree_rec *rec)
385{
386 key->bmbt.br_startoff = cpu_to_be64(
387 xfs_bmbt_disk_get_startoff(&rec->bmbt) +
388 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
389}
390
391STATIC void
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100392xfs_bmbt_init_rec_from_cur(
393 struct xfs_btree_cur *cur,
394 union xfs_btree_rec *rec)
395{
396 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
397}
398
399STATIC void
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100400xfs_bmbt_init_ptr_from_cur(
401 struct xfs_btree_cur *cur,
402 union xfs_btree_ptr *ptr)
403{
404 ptr->l = 0;
405}
406
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700407STATIC int64_t
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100408xfs_bmbt_key_diff(
409 struct xfs_btree_cur *cur,
410 union xfs_btree_key *key)
411{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700412 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100413 cur->bc_rec.b.br_startoff;
414}
415
Darrick J. Wong118bb472017-06-16 11:00:08 -0700416STATIC int64_t
417xfs_bmbt_diff_two_keys(
418 struct xfs_btree_cur *cur,
419 union xfs_btree_key *k1,
420 union xfs_btree_key *k2)
421{
422 return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
423 be64_to_cpu(k2->bmbt.br_startoff);
424}
425
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800426static xfs_failaddr_t
Dave Chinner612cfbf2012-11-14 17:52:32 +1100427xfs_bmbt_verify(
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100428 struct xfs_buf *bp)
429{
430 struct xfs_mount *mp = bp->b_target->bt_mount;
431 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800432 xfs_failaddr_t fa;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100433 unsigned int level;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100434
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500435 switch (block->bb_magic) {
436 case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500437 /*
438 * XXX: need a better way of verifying the owner here. Right now
439 * just make sure there has been one set.
440 */
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800441 fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
442 if (fa)
443 return fa;
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500444 /* fall through */
445 case cpu_to_be32(XFS_BMAP_MAGIC):
446 break;
447 default:
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800448 return __this_address;
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500449 }
450
451 /*
452 * numrecs and level verification.
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100453 *
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500454 * We don't know what fork we belong to, so just verify that the level
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100455 * is less than the maximum of the two. Later checks will be more
456 * precise.
457 */
458 level = be16_to_cpu(block->bb_level);
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500459 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800460 return __this_address;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100461
Darrick J. Wong8368a602018-01-08 10:51:00 -0800462 return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
Dave Chinner612cfbf2012-11-14 17:52:32 +1100463}
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100464
Dave Chinner1813dd62012-11-14 17:54:40 +1100465static void
466xfs_bmbt_read_verify(
467 struct xfs_buf *bp)
468{
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800469 xfs_failaddr_t fa;
470
Eric Sandeence5028c2014-02-27 15:23:10 +1100471 if (!xfs_btree_lblock_verify_crc(bp))
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800472 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
473 else {
474 fa = xfs_bmbt_verify(bp);
475 if (fa)
476 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
477 }
Eric Sandeence5028c2014-02-27 15:23:10 +1100478
Darrick J. Wong31ca03c2018-01-08 10:51:02 -0800479 if (bp->b_error)
Eric Sandeence5028c2014-02-27 15:23:10 +1100480 trace_xfs_btree_corrupt(bp, _RET_IP_);
Dave Chinner1813dd62012-11-14 17:54:40 +1100481}
482
483static void
Dave Chinner612cfbf2012-11-14 17:52:32 +1100484xfs_bmbt_write_verify(
485 struct xfs_buf *bp)
486{
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800487 xfs_failaddr_t fa;
488
489 fa = xfs_bmbt_verify(bp);
490 if (fa) {
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500491 trace_xfs_btree_corrupt(bp, _RET_IP_);
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800492 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500493 return;
494 }
495 xfs_btree_lblock_calc_crc(bp);
Dave Chinner612cfbf2012-11-14 17:52:32 +1100496}
497
Dave Chinner1813dd62012-11-14 17:54:40 +1100498const struct xfs_buf_ops xfs_bmbt_buf_ops = {
Eric Sandeen233135b2016-01-04 16:10:19 +1100499 .name = "xfs_bmbt",
Dave Chinner1813dd62012-11-14 17:54:40 +1100500 .verify_read = xfs_bmbt_read_verify,
501 .verify_write = xfs_bmbt_write_verify,
Darrick J. Wongb5572592018-01-08 10:51:08 -0800502 .verify_struct = xfs_bmbt_verify,
Dave Chinner1813dd62012-11-14 17:54:40 +1100503};
504
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100505
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100506STATIC int
507xfs_bmbt_keys_inorder(
508 struct xfs_btree_cur *cur,
509 union xfs_btree_key *k1,
510 union xfs_btree_key *k2)
511{
512 return be64_to_cpu(k1->bmbt.br_startoff) <
513 be64_to_cpu(k2->bmbt.br_startoff);
514}
515
516STATIC int
517xfs_bmbt_recs_inorder(
518 struct xfs_btree_cur *cur,
519 union xfs_btree_rec *r1,
520 union xfs_btree_rec *r2)
521{
522 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
523 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
524 xfs_bmbt_disk_get_startoff(&r2->bmbt);
525}
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100526
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100527static const struct xfs_btree_ops xfs_bmbt_ops = {
Christoph Hellwig65f1eae2008-10-30 16:55:34 +1100528 .rec_len = sizeof(xfs_bmbt_rec_t),
529 .key_len = sizeof(xfs_bmbt_key_t),
530
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100531 .dup_cursor = xfs_bmbt_dup_cursor,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100532 .update_cursor = xfs_bmbt_update_cursor,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100533 .alloc_block = xfs_bmbt_alloc_block,
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100534 .free_block = xfs_bmbt_free_block,
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100535 .get_maxrecs = xfs_bmbt_get_maxrecs,
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100536 .get_minrecs = xfs_bmbt_get_minrecs,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100537 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100538 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
Darrick J. Wong118bb472017-06-16 11:00:08 -0700539 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100540 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100541 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
542 .key_diff = xfs_bmbt_key_diff,
Darrick J. Wong118bb472017-06-16 11:00:08 -0700543 .diff_two_keys = xfs_bmbt_diff_two_keys,
Dave Chinner1813dd62012-11-14 17:54:40 +1100544 .buf_ops = &xfs_bmbt_buf_ops,
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100545 .keys_inorder = xfs_bmbt_keys_inorder,
546 .recs_inorder = xfs_bmbt_recs_inorder,
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100547};
548
549/*
550 * Allocate a new bmap btree cursor.
551 */
552struct xfs_btree_cur * /* new bmap btree cursor */
553xfs_bmbt_init_cursor(
554 struct xfs_mount *mp, /* file system mount point */
555 struct xfs_trans *tp, /* transaction pointer */
556 struct xfs_inode *ip, /* inode owning the btree */
557 int whichfork) /* data or attr fork */
558{
559 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
560 struct xfs_btree_cur *cur;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700561 ASSERT(whichfork != XFS_COW_FORK);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100562
Darrick J. Wongb24a9782016-12-09 16:49:54 +1100563 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100564
565 cur->bc_tp = tp;
566 cur->bc_mp = mp;
567 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
568 cur->bc_btnum = XFS_BTNUM_BMAP;
569 cur->bc_blocklog = mp->m_sb.sb_blocklog;
Dave Chinner11ef38a2016-12-05 14:38:58 +1100570 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100571
572 cur->bc_ops = &xfs_bmbt_ops;
Christoph Hellwige99ab902008-10-30 16:54:33 +1100573 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500574 if (xfs_sb_version_hascrc(&mp->m_sb))
575 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100576
577 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
578 cur->bc_private.b.ip = ip;
579 cur->bc_private.b.firstblock = NULLFSBLOCK;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000580 cur->bc_private.b.dfops = NULL;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100581 cur->bc_private.b.allocated = 0;
582 cur->bc_private.b.flags = 0;
583 cur->bc_private.b.whichfork = whichfork;
584
585 return cur;
586}
Christoph Hellwig60197e82008-10-30 17:11:19 +1100587
588/*
589 * Calculate number of records in a bmap btree block.
590 */
591int
592xfs_bmbt_maxrecs(
593 struct xfs_mount *mp,
594 int blocklen,
595 int leaf)
596{
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100597 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
Christoph Hellwig60197e82008-10-30 17:11:19 +1100598
599 if (leaf)
600 return blocklen / sizeof(xfs_bmbt_rec_t);
601 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
602}
603
604/*
605 * Calculate number of records in a bmap btree inode root.
606 */
607int
608xfs_bmdr_maxrecs(
Christoph Hellwig60197e82008-10-30 17:11:19 +1100609 int blocklen,
610 int leaf)
611{
612 blocklen -= sizeof(xfs_bmdr_block_t);
613
614 if (leaf)
615 return blocklen / sizeof(xfs_bmdr_rec_t);
616 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
617}
Dave Chinner21b5c972013-08-30 10:23:44 +1000618
619/*
620 * Change the owner of a btree format fork fo the inode passed in. Change it to
621 * the owner of that is passed in so that we can change owners before or after
622 * we switch forks between inodes. The operation that the caller is doing will
623 * determine whether is needs to change owner before or after the switch.
624 *
Dave Chinner638f44162013-08-30 10:23:45 +1000625 * For demand paged transactional modification, the fork switch should be done
626 * after reading in all the blocks, modifying them and pinning them in the
627 * transaction. For modification when the buffers are already pinned in memory,
628 * the fork switch can be done before changing the owner as we won't need to
629 * validate the owner until the btree buffers are unpinned and writes can occur
630 * again.
631 *
632 * For recovery based ownership change, there is no transactional context and
633 * so a buffer list must be supplied so that we can record the buffers that we
634 * modified for the caller to issue IO on.
Dave Chinner21b5c972013-08-30 10:23:44 +1000635 */
636int
637xfs_bmbt_change_owner(
638 struct xfs_trans *tp,
639 struct xfs_inode *ip,
640 int whichfork,
Dave Chinner638f44162013-08-30 10:23:45 +1000641 xfs_ino_t new_owner,
642 struct list_head *buffer_list)
Dave Chinner21b5c972013-08-30 10:23:44 +1000643{
644 struct xfs_btree_cur *cur;
645 int error;
646
Dave Chinner638f44162013-08-30 10:23:45 +1000647 ASSERT(tp || buffer_list);
648 ASSERT(!(tp && buffer_list));
Dave Chinner21b5c972013-08-30 10:23:44 +1000649 if (whichfork == XFS_DATA_FORK)
Dan Carpenteraa9e1042013-09-12 00:17:31 +0300650 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
Dave Chinner21b5c972013-08-30 10:23:44 +1000651 else
Dan Carpenteraa9e1042013-09-12 00:17:31 +0300652 ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
Dave Chinner21b5c972013-08-30 10:23:44 +1000653
654 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
Dave Chinner638f44162013-08-30 10:23:45 +1000655 if (!cur)
Dave Chinner24513372014-06-25 14:58:08 +1000656 return -ENOMEM;
Brian Foster99c794c2017-08-29 10:08:39 -0700657 cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
Dave Chinner638f44162013-08-30 10:23:45 +1000658
659 error = xfs_btree_change_owner(cur, new_owner, buffer_list);
Dave Chinner21b5c972013-08-30 10:23:44 +1000660 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
661 return error;
662}
Darrick J. Wong14861c42018-05-09 10:02:01 -0700663
664/* Calculate the bmap btree size for some records. */
665unsigned long long
666xfs_bmbt_calc_size(
667 struct xfs_mount *mp,
668 unsigned long long len)
669{
670 return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
671}