blob: 89260972a0f660785d37f4cd3ec223e9859d849f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Dave Chinner632b89e2013-10-29 22:11:58 +110020#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +100021#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110024#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100026#include "xfs_defer.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110028#include "xfs_trans.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_inode_item.h"
30#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_btree.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110032#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_bmap.h"
34#include "xfs_error.h"
35#include "xfs_quota.h"
Dave Chinner3d3e6f62012-11-12 22:54:08 +110036#include "xfs_trace.h"
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050037#include "xfs_cksum.h"
Darrick J. Wong340785c2016-08-03 11:33:42 +100038#include "xfs_rmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 * Convert on-disk form of btree root to in-memory form.
42 */
43void
44xfs_bmdr_to_bmbt(
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050045 struct xfs_inode *ip,
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 xfs_bmdr_block_t *dblock,
47 int dblocklen,
Christoph Hellwig7cc95a82008-10-30 17:14:34 +110048 struct xfs_btree_block *rblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 int rblocklen)
50{
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050051 struct xfs_mount *mp = ip->i_mount;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 int dmxr;
53 xfs_bmbt_key_t *fkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +100054 __be64 *fpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 xfs_bmbt_key_t *tkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +100056 __be64 *tpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Eric Sandeenb6f41e42017-01-27 23:16:39 -080058 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
59 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
Eric Sandeenf88ae462017-01-27 23:16:37 -080060 XFS_BTREE_LONG_PTRS);
Christoph Hellwig16259e72005-11-02 15:11:25 +110061 rblock->bb_level = dblock->bb_level;
62 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
63 rblock->bb_numrecs = dblock->bb_numrecs;
Eric Sandeen152d93b2014-04-14 18:58:51 +100064 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
Christoph Hellwig136341b2008-10-30 17:11:40 +110065 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
66 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
67 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
Christoph Hellwig60197e82008-10-30 17:11:19 +110068 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
Christoph Hellwig16259e72005-11-02 15:11:25 +110069 dmxr = be16_to_cpu(dblock->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
Christoph Hellwig576039c2006-09-28 10:58:06 +100071 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
74/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 * Convert a compressed bmap extent record to an uncompressed form.
76 * This code must be in sync with the routines xfs_bmbt_get_startoff,
Christoph Hellwigf0387502017-10-17 14:16:28 -070077 * xfs_bmbt_get_startblock and xfs_bmbt_get_blockcount.
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 */
Christoph Hellwigb8f82a42009-11-14 16:17:22 +000079STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -070080__xfs_bmbt_get_all(
Darrick J. Wongc8ce5402017-06-16 11:00:05 -070081 uint64_t l0,
82 uint64_t l1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 xfs_bmbt_irec_t *s)
84{
85 int ext_flag;
86 xfs_exntst_t st;
87
88 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
89 s->br_startoff = ((xfs_fileoff_t)l0 &
Eric Sandeenfb825572009-01-09 15:53:54 +110090 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
Eric Sandeenfb825572009-01-09 15:53:54 +110091 s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 (((xfs_fsblock_t)l1) >> 21);
Eric Sandeenfb825572009-01-09 15:53:54 +110093 s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /* This is xfs_extent_state() in-line */
95 if (ext_flag) {
96 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
97 st = XFS_EXT_UNWRITTEN;
98 } else
99 st = XFS_EXT_NORM;
100 s->br_state = st;
101}
102
103void
104xfs_bmbt_get_all(
Christoph Hellwiga6f64d42007-08-16 16:23:40 +1000105 xfs_bmbt_rec_host_t *r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 xfs_bmbt_irec_t *s)
107{
108 __xfs_bmbt_get_all(r->l0, r->l1, s);
109}
110
111/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * Extract the blockcount field from an in memory bmap extent record.
113 */
114xfs_filblks_t
115xfs_bmbt_get_blockcount(
Christoph Hellwiga6f64d42007-08-16 16:23:40 +1000116 xfs_bmbt_rec_host_t *r)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Eric Sandeenfb825572009-01-09 15:53:54 +1100118 return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
121/*
122 * Extract the startblock field from an in memory bmap extent record.
123 */
124xfs_fsblock_t
125xfs_bmbt_get_startblock(
Christoph Hellwiga6f64d42007-08-16 16:23:40 +1000126 xfs_bmbt_rec_host_t *r)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
Eric Sandeenfb825572009-01-09 15:53:54 +1100128 return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 (((xfs_fsblock_t)r->l1) >> 21);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130}
131
132/*
133 * Extract the startoff field from an in memory bmap extent record.
134 */
135xfs_fileoff_t
136xfs_bmbt_get_startoff(
Christoph Hellwiga6f64d42007-08-16 16:23:40 +1000137 xfs_bmbt_rec_host_t *r)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
139 return ((xfs_fileoff_t)r->l0 &
Eric Sandeenfb825572009-01-09 15:53:54 +1100140 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141}
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
144 * Extract the blockcount field from an on disk bmap extent record.
145 */
146xfs_filblks_t
147xfs_bmbt_disk_get_blockcount(
148 xfs_bmbt_rec_t *r)
149{
Eric Sandeenfb825572009-01-09 15:53:54 +1100150 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151}
152
153/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 * Extract the startoff field from a disk format bmap extent record.
155 */
156xfs_fileoff_t
157xfs_bmbt_disk_get_startoff(
158 xfs_bmbt_rec_t *r)
159{
Christoph Hellwigcd8b0a92007-08-16 16:24:15 +1000160 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
Eric Sandeenfb825572009-01-09 15:53:54 +1100161 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Christoph Hellwig8cba4342007-08-16 16:23:53 +1000164/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 * Set all the fields in a bmap extent record from the uncompressed form.
166 */
167void
168xfs_bmbt_set_all(
Christoph Hellwig9b150702017-10-17 14:16:27 -0700169 struct xfs_bmbt_rec_host *r,
170 struct xfs_bmbt_irec *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
Christoph Hellwig9b150702017-10-17 14:16:27 -0700172 int extent_flag = (s->br_state != XFS_EXT_NORM);
173
174 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
175 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
176 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
177 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
178
179 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
180 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
181 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43);
182 r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
183 ((xfs_bmbt_rec_base_t)s->br_blockcount &
184 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Christoph Hellwig8cba4342007-08-16 16:23:53 +1000187/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * Set all the fields in a bmap extent record from the uncompressed form.
189 */
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700190void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191xfs_bmbt_disk_set_all(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700192 struct xfs_bmbt_rec *r,
193 struct xfs_bmbt_irec *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700195 int extent_flag = (s->br_state != XFS_EXT_NORM);
196
197 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
198 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
199 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
200 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
201
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700202 put_unaligned_be64(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700203 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
204 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700205 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
206 put_unaligned_be64(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700207 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
208 ((xfs_bmbt_rec_base_t)s->br_blockcount &
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700209 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 * Convert in-memory form of btree root to on-disk form.
214 */
215void
216xfs_bmbt_to_bmdr(
Christoph Hellwig60197e82008-10-30 17:11:19 +1100217 struct xfs_mount *mp,
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100218 struct xfs_btree_block *rblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 int rblocklen,
220 xfs_bmdr_block_t *dblock,
221 int dblocklen)
222{
223 int dmxr;
224 xfs_bmbt_key_t *fkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +1000225 __be64 *fpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 xfs_bmbt_key_t *tkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +1000227 __be64 *tpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500229 if (xfs_sb_version_hascrc(&mp->m_sb)) {
230 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
Eric Sandeence748ea2015-07-29 11:53:31 +1000231 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
232 &mp->m_sb.sb_meta_uuid));
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500233 ASSERT(rblock->bb_u.l.bb_blkno ==
234 cpu_to_be64(XFS_BUF_DADDR_NULL));
235 } else
236 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000237 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
238 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200239 ASSERT(rblock->bb_level != 0);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100240 dblock->bb_level = rblock->bb_level;
241 dblock->bb_numrecs = rblock->bb_numrecs;
Eric Sandeen152d93b2014-04-14 18:58:51 +1000242 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
Christoph Hellwig136341b2008-10-30 17:11:40 +1100243 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
244 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
Christoph Hellwig60197e82008-10-30 17:11:19 +1100245 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
Christoph Hellwig136341b2008-10-30 17:11:40 +1100246 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100247 dmxr = be16_to_cpu(dblock->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
Christoph Hellwig576039c2006-09-28 10:58:06 +1000249 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100252STATIC struct xfs_btree_cur *
253xfs_bmbt_dup_cursor(
254 struct xfs_btree_cur *cur)
255{
256 struct xfs_btree_cur *new;
257
258 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
259 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
260
261 /*
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000262 * Copy the firstblock, dfops, and flags values,
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100263 * since init cursor doesn't get them.
264 */
265 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000266 new->bc_private.b.dfops = cur->bc_private.b.dfops;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100267 new->bc_private.b.flags = cur->bc_private.b.flags;
268
269 return new;
270}
271
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100272STATIC void
273xfs_bmbt_update_cursor(
274 struct xfs_btree_cur *src,
275 struct xfs_btree_cur *dst)
276{
277 ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
278 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000279 ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100280
281 dst->bc_private.b.allocated += src->bc_private.b.allocated;
282 dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
283
284 src->bc_private.b.allocated = 0;
285}
286
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100287STATIC int
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100288xfs_bmbt_alloc_block(
289 struct xfs_btree_cur *cur,
290 union xfs_btree_ptr *start,
291 union xfs_btree_ptr *new,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100292 int *stat)
293{
294 xfs_alloc_arg_t args; /* block allocation args */
295 int error; /* error return value */
296
297 memset(&args, 0, sizeof(args));
298 args.tp = cur->bc_tp;
299 args.mp = cur->bc_mp;
300 args.fsbno = cur->bc_private.b.firstblock;
301 args.firstblock = args.fsbno;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000302 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
303 cur->bc_private.b.whichfork);
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100304
305 if (args.fsbno == NULLFSBLOCK) {
306 args.fsbno = be64_to_cpu(start->l);
307 args.type = XFS_ALLOCTYPE_START_BNO;
308 /*
309 * Make sure there is sufficient room left in the AG to
310 * complete a full tree split for an extent insert. If
311 * we are converting the middle part of an extent then
312 * we may need space for two tree splits.
313 *
314 * We are relying on the caller to make the correct block
315 * reservation for this operation to succeed. If the
316 * reservation amount is insufficient then we may fail a
317 * block allocation here and corrupt the filesystem.
318 */
Christoph Hellwiga7e5d032016-03-02 09:58:21 +1100319 args.minleft = args.tp->t_blk_res;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000320 } else if (cur->bc_private.b.dfops->dop_low) {
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100321 args.type = XFS_ALLOCTYPE_START_BNO;
322 } else {
323 args.type = XFS_ALLOCTYPE_NEAR_BNO;
324 }
325
326 args.minlen = args.maxlen = args.prod = 1;
327 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
Christoph Hellwiga7e5d032016-03-02 09:58:21 +1100328 if (!args.wasdel && args.tp->t_blk_res == 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000329 error = -ENOSPC;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100330 goto error0;
331 }
332 error = xfs_alloc_vextent(&args);
333 if (error)
334 goto error0;
335
336 if (args.fsbno == NULLFSBLOCK && args.minleft) {
337 /*
338 * Could not find an AG with enough free space to satisfy
Christoph Hellwig255c5162017-01-09 13:36:19 -0800339 * a full btree split. Try again and if
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100340 * successful activate the lowspace algorithm.
341 */
342 args.fsbno = 0;
343 args.type = XFS_ALLOCTYPE_FIRST_AG;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100344 error = xfs_alloc_vextent(&args);
345 if (error)
346 goto error0;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000347 cur->bc_private.b.dfops->dop_low = true;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100348 }
Christoph Hellwig2fcc3192017-03-08 10:38:53 -0800349 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100350 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
351 *stat = 0;
352 return 0;
353 }
354 ASSERT(args.len == 1);
355 cur->bc_private.b.firstblock = args.fsbno;
356 cur->bc_private.b.allocated++;
357 cur->bc_private.b.ip->i_d.di_nblocks++;
358 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200359 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100360 XFS_TRANS_DQ_BCOUNT, 1L);
361
362 new->l = cpu_to_be64(args.fsbno);
363
364 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
365 *stat = 1;
366 return 0;
367
368 error0:
369 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
370 return error;
371}
372
373STATIC int
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100374xfs_bmbt_free_block(
375 struct xfs_btree_cur *cur,
376 struct xfs_buf *bp)
377{
378 struct xfs_mount *mp = cur->bc_mp;
379 struct xfs_inode *ip = cur->bc_private.b.ip;
380 struct xfs_trans *tp = cur->bc_tp;
381 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
Darrick J. Wong340785c2016-08-03 11:33:42 +1000382 struct xfs_owner_info oinfo;
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100383
Darrick J. Wong340785c2016-08-03 11:33:42 +1000384 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
385 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100386 ip->i_d.di_nblocks--;
387
388 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200389 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100390 return 0;
391}
392
393STATIC int
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100394xfs_bmbt_get_minrecs(
395 struct xfs_btree_cur *cur,
396 int level)
397{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100398 if (level == cur->bc_nlevels - 1) {
399 struct xfs_ifork *ifp;
400
401 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
402 cur->bc_private.b.whichfork);
403
404 return xfs_bmbt_maxrecs(cur->bc_mp,
405 ifp->if_broot_bytes, level == 0) / 2;
406 }
407
408 return cur->bc_mp->m_bmap_dmnr[level != 0];
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100409}
410
Christoph Hellwig60197e82008-10-30 17:11:19 +1100411int
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100412xfs_bmbt_get_maxrecs(
413 struct xfs_btree_cur *cur,
414 int level)
415{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100416 if (level == cur->bc_nlevels - 1) {
417 struct xfs_ifork *ifp;
418
419 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
420 cur->bc_private.b.whichfork);
421
422 return xfs_bmbt_maxrecs(cur->bc_mp,
423 ifp->if_broot_bytes, level == 0);
424 }
425
426 return cur->bc_mp->m_bmap_dmxr[level != 0];
427
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100428}
429
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100430/*
431 * Get the maximum records we could store in the on-disk format.
432 *
433 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
434 * for the root node this checks the available space in the dinode fork
435 * so that we can resize the in-memory buffer to match it. After a
436 * resize to the maximum size this function returns the same value
437 * as xfs_bmbt_get_maxrecs for the root node, too.
438 */
439STATIC int
440xfs_bmbt_get_dmaxrecs(
441 struct xfs_btree_cur *cur,
442 int level)
443{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100444 if (level != cur->bc_nlevels - 1)
445 return cur->bc_mp->m_bmap_dmxr[level != 0];
Eric Sandeen152d93b2014-04-14 18:58:51 +1000446 return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100447}
448
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100449STATIC void
450xfs_bmbt_init_key_from_rec(
451 union xfs_btree_key *key,
452 union xfs_btree_rec *rec)
453{
454 key->bmbt.br_startoff =
455 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
456}
457
458STATIC void
Darrick J. Wong118bb472017-06-16 11:00:08 -0700459xfs_bmbt_init_high_key_from_rec(
460 union xfs_btree_key *key,
461 union xfs_btree_rec *rec)
462{
463 key->bmbt.br_startoff = cpu_to_be64(
464 xfs_bmbt_disk_get_startoff(&rec->bmbt) +
465 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
466}
467
468STATIC void
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100469xfs_bmbt_init_rec_from_cur(
470 struct xfs_btree_cur *cur,
471 union xfs_btree_rec *rec)
472{
473 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
474}
475
476STATIC void
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100477xfs_bmbt_init_ptr_from_cur(
478 struct xfs_btree_cur *cur,
479 union xfs_btree_ptr *ptr)
480{
481 ptr->l = 0;
482}
483
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700484STATIC int64_t
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100485xfs_bmbt_key_diff(
486 struct xfs_btree_cur *cur,
487 union xfs_btree_key *key)
488{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700489 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100490 cur->bc_rec.b.br_startoff;
491}
492
Darrick J. Wong118bb472017-06-16 11:00:08 -0700493STATIC int64_t
494xfs_bmbt_diff_two_keys(
495 struct xfs_btree_cur *cur,
496 union xfs_btree_key *k1,
497 union xfs_btree_key *k2)
498{
499 return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
500 be64_to_cpu(k2->bmbt.br_startoff);
501}
502
Dave Chinnerc5c249b2013-08-12 20:49:43 +1000503static bool
Dave Chinner612cfbf2012-11-14 17:52:32 +1100504xfs_bmbt_verify(
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100505 struct xfs_buf *bp)
506{
507 struct xfs_mount *mp = bp->b_target->bt_mount;
508 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
509 unsigned int level;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100510
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500511 switch (block->bb_magic) {
512 case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
513 if (!xfs_sb_version_hascrc(&mp->m_sb))
514 return false;
Eric Sandeence748ea2015-07-29 11:53:31 +1000515 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500516 return false;
517 if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
518 return false;
519 /*
520 * XXX: need a better way of verifying the owner here. Right now
521 * just make sure there has been one set.
522 */
523 if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
524 return false;
525 /* fall through */
526 case cpu_to_be32(XFS_BMAP_MAGIC):
527 break;
528 default:
529 return false;
530 }
531
532 /*
533 * numrecs and level verification.
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100534 *
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500535 * We don't know what fork we belong to, so just verify that the level
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100536 * is less than the maximum of the two. Later checks will be more
537 * precise.
538 */
539 level = be16_to_cpu(block->bb_level);
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500540 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
541 return false;
542 if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
543 return false;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100544
545 /* sibling pointer verification */
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500546 if (!block->bb_u.l.bb_leftsib ||
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000547 (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500548 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
549 return false;
550 if (!block->bb_u.l.bb_rightsib ||
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000551 (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500552 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
553 return false;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100554
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500555 return true;
Dave Chinner612cfbf2012-11-14 17:52:32 +1100556}
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100557
Dave Chinner1813dd62012-11-14 17:54:40 +1100558static void
559xfs_bmbt_read_verify(
560 struct xfs_buf *bp)
561{
Eric Sandeence5028c2014-02-27 15:23:10 +1100562 if (!xfs_btree_lblock_verify_crc(bp))
Dave Chinner24513372014-06-25 14:58:08 +1000563 xfs_buf_ioerror(bp, -EFSBADCRC);
Eric Sandeence5028c2014-02-27 15:23:10 +1100564 else if (!xfs_bmbt_verify(bp))
Dave Chinner24513372014-06-25 14:58:08 +1000565 xfs_buf_ioerror(bp, -EFSCORRUPTED);
Eric Sandeence5028c2014-02-27 15:23:10 +1100566
567 if (bp->b_error) {
568 trace_xfs_btree_corrupt(bp, _RET_IP_);
569 xfs_verifier_error(bp);
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500570 }
Dave Chinner1813dd62012-11-14 17:54:40 +1100571}
572
573static void
Dave Chinner612cfbf2012-11-14 17:52:32 +1100574xfs_bmbt_write_verify(
575 struct xfs_buf *bp)
576{
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500577 if (!xfs_bmbt_verify(bp)) {
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500578 trace_xfs_btree_corrupt(bp, _RET_IP_);
Dave Chinner24513372014-06-25 14:58:08 +1000579 xfs_buf_ioerror(bp, -EFSCORRUPTED);
Eric Sandeence5028c2014-02-27 15:23:10 +1100580 xfs_verifier_error(bp);
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500581 return;
582 }
583 xfs_btree_lblock_calc_crc(bp);
Dave Chinner612cfbf2012-11-14 17:52:32 +1100584}
585
Dave Chinner1813dd62012-11-14 17:54:40 +1100586const struct xfs_buf_ops xfs_bmbt_buf_ops = {
Eric Sandeen233135b2016-01-04 16:10:19 +1100587 .name = "xfs_bmbt",
Dave Chinner1813dd62012-11-14 17:54:40 +1100588 .verify_read = xfs_bmbt_read_verify,
589 .verify_write = xfs_bmbt_write_verify,
590};
591
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100592
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100593STATIC int
594xfs_bmbt_keys_inorder(
595 struct xfs_btree_cur *cur,
596 union xfs_btree_key *k1,
597 union xfs_btree_key *k2)
598{
599 return be64_to_cpu(k1->bmbt.br_startoff) <
600 be64_to_cpu(k2->bmbt.br_startoff);
601}
602
603STATIC int
604xfs_bmbt_recs_inorder(
605 struct xfs_btree_cur *cur,
606 union xfs_btree_rec *r1,
607 union xfs_btree_rec *r2)
608{
609 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
610 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
611 xfs_bmbt_disk_get_startoff(&r2->bmbt);
612}
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100613
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100614static const struct xfs_btree_ops xfs_bmbt_ops = {
Christoph Hellwig65f1eae2008-10-30 16:55:34 +1100615 .rec_len = sizeof(xfs_bmbt_rec_t),
616 .key_len = sizeof(xfs_bmbt_key_t),
617
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100618 .dup_cursor = xfs_bmbt_dup_cursor,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100619 .update_cursor = xfs_bmbt_update_cursor,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100620 .alloc_block = xfs_bmbt_alloc_block,
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100621 .free_block = xfs_bmbt_free_block,
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100622 .get_maxrecs = xfs_bmbt_get_maxrecs,
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100623 .get_minrecs = xfs_bmbt_get_minrecs,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100624 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100625 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
Darrick J. Wong118bb472017-06-16 11:00:08 -0700626 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100627 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100628 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
629 .key_diff = xfs_bmbt_key_diff,
Darrick J. Wong118bb472017-06-16 11:00:08 -0700630 .diff_two_keys = xfs_bmbt_diff_two_keys,
Dave Chinner1813dd62012-11-14 17:54:40 +1100631 .buf_ops = &xfs_bmbt_buf_ops,
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100632 .keys_inorder = xfs_bmbt_keys_inorder,
633 .recs_inorder = xfs_bmbt_recs_inorder,
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100634};
635
636/*
637 * Allocate a new bmap btree cursor.
638 */
639struct xfs_btree_cur * /* new bmap btree cursor */
640xfs_bmbt_init_cursor(
641 struct xfs_mount *mp, /* file system mount point */
642 struct xfs_trans *tp, /* transaction pointer */
643 struct xfs_inode *ip, /* inode owning the btree */
644 int whichfork) /* data or attr fork */
645{
646 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
647 struct xfs_btree_cur *cur;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700648 ASSERT(whichfork != XFS_COW_FORK);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100649
Darrick J. Wongb24a9782016-12-09 16:49:54 +1100650 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100651
652 cur->bc_tp = tp;
653 cur->bc_mp = mp;
654 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
655 cur->bc_btnum = XFS_BTNUM_BMAP;
656 cur->bc_blocklog = mp->m_sb.sb_blocklog;
Dave Chinner11ef38a2016-12-05 14:38:58 +1100657 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100658
659 cur->bc_ops = &xfs_bmbt_ops;
Christoph Hellwige99ab902008-10-30 16:54:33 +1100660 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500661 if (xfs_sb_version_hascrc(&mp->m_sb))
662 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100663
664 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
665 cur->bc_private.b.ip = ip;
666 cur->bc_private.b.firstblock = NULLFSBLOCK;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000667 cur->bc_private.b.dfops = NULL;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100668 cur->bc_private.b.allocated = 0;
669 cur->bc_private.b.flags = 0;
670 cur->bc_private.b.whichfork = whichfork;
671
672 return cur;
673}
Christoph Hellwig60197e82008-10-30 17:11:19 +1100674
675/*
676 * Calculate number of records in a bmap btree block.
677 */
678int
679xfs_bmbt_maxrecs(
680 struct xfs_mount *mp,
681 int blocklen,
682 int leaf)
683{
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100684 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
Christoph Hellwig60197e82008-10-30 17:11:19 +1100685
686 if (leaf)
687 return blocklen / sizeof(xfs_bmbt_rec_t);
688 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
689}
690
691/*
692 * Calculate number of records in a bmap btree inode root.
693 */
694int
695xfs_bmdr_maxrecs(
Christoph Hellwig60197e82008-10-30 17:11:19 +1100696 int blocklen,
697 int leaf)
698{
699 blocklen -= sizeof(xfs_bmdr_block_t);
700
701 if (leaf)
702 return blocklen / sizeof(xfs_bmdr_rec_t);
703 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
704}
Dave Chinner21b5c972013-08-30 10:23:44 +1000705
706/*
707 * Change the owner of a btree format fork fo the inode passed in. Change it to
708 * the owner of that is passed in so that we can change owners before or after
709 * we switch forks between inodes. The operation that the caller is doing will
710 * determine whether is needs to change owner before or after the switch.
711 *
Dave Chinner638f44162013-08-30 10:23:45 +1000712 * For demand paged transactional modification, the fork switch should be done
713 * after reading in all the blocks, modifying them and pinning them in the
714 * transaction. For modification when the buffers are already pinned in memory,
715 * the fork switch can be done before changing the owner as we won't need to
716 * validate the owner until the btree buffers are unpinned and writes can occur
717 * again.
718 *
719 * For recovery based ownership change, there is no transactional context and
720 * so a buffer list must be supplied so that we can record the buffers that we
721 * modified for the caller to issue IO on.
Dave Chinner21b5c972013-08-30 10:23:44 +1000722 */
723int
724xfs_bmbt_change_owner(
725 struct xfs_trans *tp,
726 struct xfs_inode *ip,
727 int whichfork,
Dave Chinner638f44162013-08-30 10:23:45 +1000728 xfs_ino_t new_owner,
729 struct list_head *buffer_list)
Dave Chinner21b5c972013-08-30 10:23:44 +1000730{
731 struct xfs_btree_cur *cur;
732 int error;
733
Dave Chinner638f44162013-08-30 10:23:45 +1000734 ASSERT(tp || buffer_list);
735 ASSERT(!(tp && buffer_list));
Dave Chinner21b5c972013-08-30 10:23:44 +1000736 if (whichfork == XFS_DATA_FORK)
Dan Carpenteraa9e1042013-09-12 00:17:31 +0300737 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
Dave Chinner21b5c972013-08-30 10:23:44 +1000738 else
Dan Carpenteraa9e1042013-09-12 00:17:31 +0300739 ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
Dave Chinner21b5c972013-08-30 10:23:44 +1000740
741 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
Dave Chinner638f44162013-08-30 10:23:45 +1000742 if (!cur)
Dave Chinner24513372014-06-25 14:58:08 +1000743 return -ENOMEM;
Brian Foster99c794c2017-08-29 10:08:39 -0700744 cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
Dave Chinner638f44162013-08-30 10:23:45 +1000745
746 error = xfs_btree_change_owner(cur, new_owner, buffer_list);
Dave Chinner21b5c972013-08-30 10:23:44 +1000747 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
748 return error;
749}