blob: c5c32999b8103948c134de92e4e42eafb656416e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_types.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_dir2.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_dmapi.h"
29#include "xfs_mount.h"
Nathan Scotta844f452005-11-02 14:38:42 +110030#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110033#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_btree.h"
38#include "xfs_ialloc.h"
39#include "xfs_alloc.h"
40#include "xfs_error.h"
41
42/*
43 * Prototypes for internal functions.
44 */
45
46STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int);
47STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
48STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
49STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
50STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *);
51STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *);
52STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *);
53STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *,
54 xfs_alloc_key_t *, xfs_btree_cur_t **, int *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56/*
57 * Internal functions.
58 */
59
60/*
61 * Single level of the xfs_alloc_delete record deletion routine.
62 * Delete record pointed to by cur/level.
63 * Remove the record from its block then rebalance the tree.
64 * Return 0 for error, 1 for done, 2 to go on to the next level.
65 */
66STATIC int /* error */
67xfs_alloc_delrec(
68 xfs_btree_cur_t *cur, /* btree cursor */
69 int level, /* level removing record from */
70 int *stat) /* fail/done/go-on */
71{
72 xfs_agf_t *agf; /* allocation group freelist header */
73 xfs_alloc_block_t *block; /* btree block record/key lives in */
74 xfs_agblock_t bno; /* btree block number */
75 xfs_buf_t *bp; /* buffer for block */
76 int error; /* error return value */
77 int i; /* loop index */
78 xfs_alloc_key_t key; /* kp points here if block is level 0 */
79 xfs_agblock_t lbno; /* left block's block number */
80 xfs_buf_t *lbp; /* left block's buffer pointer */
81 xfs_alloc_block_t *left; /* left btree block */
82 xfs_alloc_key_t *lkp=NULL; /* left block key pointer */
83 xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */
84 int lrecs=0; /* number of records in left block */
85 xfs_alloc_rec_t *lrp; /* left block record pointer */
86 xfs_mount_t *mp; /* mount structure */
87 int ptr; /* index in btree block for this rec */
88 xfs_agblock_t rbno; /* right block's block number */
89 xfs_buf_t *rbp; /* right block's buffer pointer */
90 xfs_alloc_block_t *right; /* right btree block */
91 xfs_alloc_key_t *rkp; /* right block key pointer */
92 xfs_alloc_ptr_t *rpp; /* right block address pointer */
93 int rrecs=0; /* number of records in right block */
Eric Sandeen91d87232006-09-28 11:05:40 +100094 int numrecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 xfs_alloc_rec_t *rrp; /* right block record pointer */
96 xfs_btree_cur_t *tcur; /* temporary btree cursor */
97
98 /*
99 * Get the index of the entry being deleted, check for nothing there.
100 */
101 ptr = cur->bc_ptrs[level];
102 if (ptr == 0) {
103 *stat = 0;
104 return 0;
105 }
106 /*
107 * Get the buffer & block containing the record or key/ptr.
108 */
109 bp = cur->bc_bufs[level];
110 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
111#ifdef DEBUG
112 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
113 return error;
114#endif
115 /*
116 * Fail if we're off the end of the block.
117 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000118 numrecs = be16_to_cpu(block->bb_numrecs);
119 if (ptr > numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 *stat = 0;
121 return 0;
122 }
123 XFS_STATS_INC(xs_abt_delrec);
124 /*
125 * It's a nonleaf. Excise the key and ptr being deleted, by
126 * sliding the entries past them down one.
127 * Log the changed areas of the block.
128 */
129 if (level > 0) {
130 lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
131 lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
132#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000133 for (i = ptr; i < numrecs; i++) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100134 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 return error;
136 }
137#endif
Eric Sandeen91d87232006-09-28 11:05:40 +1000138 if (ptr < numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 memmove(&lkp[ptr - 1], &lkp[ptr],
Eric Sandeen91d87232006-09-28 11:05:40 +1000140 (numrecs - ptr) * sizeof(*lkp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 memmove(&lpp[ptr - 1], &lpp[ptr],
Eric Sandeen91d87232006-09-28 11:05:40 +1000142 (numrecs - ptr) * sizeof(*lpp));
143 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs - 1);
144 xfs_alloc_log_keys(cur, bp, ptr, numrecs - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 }
146 }
147 /*
148 * It's a leaf. Excise the record being deleted, by sliding the
149 * entries past it down one. Log the changed areas of the block.
150 */
151 else {
152 lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
Eric Sandeen91d87232006-09-28 11:05:40 +1000153 if (ptr < numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 memmove(&lrp[ptr - 1], &lrp[ptr],
Eric Sandeen91d87232006-09-28 11:05:40 +1000155 (numrecs - ptr) * sizeof(*lrp));
156 xfs_alloc_log_recs(cur, bp, ptr, numrecs - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 }
158 /*
159 * If it's the first record in the block, we'll need a key
160 * structure to pass up to the next level (updkey).
161 */
162 if (ptr == 1) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100163 key.ar_startblock = lrp->ar_startblock;
164 key.ar_blockcount = lrp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 lkp = &key;
166 }
167 }
168 /*
169 * Decrement and log the number of entries in the block.
170 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000171 numrecs--;
172 block->bb_numrecs = cpu_to_be16(numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
174 /*
175 * See if the longest free extent in the allocation group was
176 * changed by this operation. True if it's the by-size btree, and
177 * this is the leaf level, and there is no right sibling block,
178 * and this was the last record.
179 */
180 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
181 mp = cur->bc_mp;
182
183 if (level == 0 &&
184 cur->bc_btnum == XFS_BTNUM_CNT &&
Christoph Hellwig16259e72005-11-02 15:11:25 +1100185 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
Eric Sandeen91d87232006-09-28 11:05:40 +1000186 ptr > numrecs) {
187 ASSERT(ptr == numrecs + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 /*
189 * There are still records in the block. Grab the size
190 * from the last one.
191 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000192 if (numrecs) {
193 rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100194 agf->agf_longest = rrp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196 /*
197 * No free extents left.
198 */
199 else
200 agf->agf_longest = 0;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100201 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest =
202 be32_to_cpu(agf->agf_longest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
204 XFS_AGF_LONGEST);
205 }
206 /*
207 * Is this the root level? If so, we're almost done.
208 */
209 if (level == cur->bc_nlevels - 1) {
210 /*
211 * If this is the root level,
212 * and there's only one entry left,
213 * and it's NOT the leaf level,
214 * then we can get rid of this level.
215 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000216 if (numrecs == 1 && level > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 /*
218 * lpp is still set to the first pointer in the block.
219 * Make it the new root of the btree.
220 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100221 bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
222 agf->agf_roots[cur->bc_btnum] = *lpp;
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800223 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100224 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 /*
226 * Put this buffer/block on the ag's freelist.
227 */
David Chinner92821e22007-05-24 15:26:31 +1000228 error = xfs_alloc_put_freelist(cur->bc_tp,
229 cur->bc_private.a.agbp, NULL, bno, 1);
230 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 return error;
232 /*
233 * Since blocks move to the free list without the
234 * coordination used in xfs_bmap_finish, we can't allow
235 * block to be available for reallocation and
236 * non-transaction writing (user data) until we know
237 * that the transaction that moved it to the free list
238 * is permanently on disk. We track the blocks by
239 * declaring these blocks as "busy"; the busy list is
240 * maintained on a per-ag basis and each transaction
241 * records which entries should be removed when the
242 * iclog commits to disk. If a busy block is
243 * allocated, the iclog is pushed up to the LSN
244 * that freed the block.
245 */
246 xfs_alloc_mark_busy(cur->bc_tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +1100247 be32_to_cpu(agf->agf_seqno), bno, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 xfs_trans_agbtree_delta(cur->bc_tp, -1);
250 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
251 XFS_AGF_ROOTS | XFS_AGF_LEVELS);
252 /*
253 * Update the cursor so there's one fewer level.
254 */
255 xfs_btree_setbuf(cur, level, NULL);
256 cur->bc_nlevels--;
257 } else if (level > 0 &&
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100258 (error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 return error;
260 *stat = 1;
261 return 0;
262 }
263 /*
264 * If we deleted the leftmost entry in the block, update the
265 * key values above us in the tree.
266 */
Christoph Hellwig38bb7422008-10-30 16:56:22 +1100267 if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)lkp, level + 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 return error;
269 /*
270 * If the number of records remaining in the block is at least
271 * the minimum, we're done.
272 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000273 if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100274 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 return error;
276 *stat = 1;
277 return 0;
278 }
279 /*
280 * Otherwise, we have to move some records around to keep the
281 * tree balanced. Look at the left and right sibling blocks to
282 * see if we can re-balance by moving only one record.
283 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100284 rbno = be32_to_cpu(block->bb_rightsib);
285 lbno = be32_to_cpu(block->bb_leftsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 bno = NULLAGBLOCK;
287 ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
288 /*
289 * Duplicate the cursor so our btree manipulations here won't
290 * disrupt the next level up.
291 */
292 if ((error = xfs_btree_dup_cursor(cur, &tcur)))
293 return error;
294 /*
295 * If there's a right sibling, see if it's ok to shift an entry
296 * out of it.
297 */
298 if (rbno != NULLAGBLOCK) {
299 /*
300 * Move the temp cursor to the last entry in the next block.
301 * Actually any entry but the first would suffice.
302 */
303 i = xfs_btree_lastrec(tcur, level);
304 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig637aa502008-10-30 16:55:45 +1100305 if ((error = xfs_btree_increment(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 goto error0;
307 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
308 i = xfs_btree_lastrec(tcur, level);
309 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
310 /*
311 * Grab a pointer to the block.
312 */
313 rbp = tcur->bc_bufs[level];
314 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
315#ifdef DEBUG
316 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
317 goto error0;
318#endif
319 /*
320 * Grab the current block number, for future use.
321 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100322 bno = be32_to_cpu(right->bb_leftsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 /*
324 * If right block is full enough so that removing one entry
325 * won't make it too empty, and left-shifting an entry out
326 * of right to us works, we're done.
327 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100328 if (be16_to_cpu(right->bb_numrecs) - 1 >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
330 if ((error = xfs_alloc_lshift(tcur, level, &i)))
331 goto error0;
332 if (i) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100333 ASSERT(be16_to_cpu(block->bb_numrecs) >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 XFS_ALLOC_BLOCK_MINRECS(level, cur));
335 xfs_btree_del_cursor(tcur,
336 XFS_BTREE_NOERROR);
337 if (level > 0 &&
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100338 (error = xfs_btree_decrement(cur, level,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 &i)))
340 return error;
341 *stat = 1;
342 return 0;
343 }
344 }
345 /*
346 * Otherwise, grab the number of records in right for
347 * future reference, and fix up the temp cursor to point
348 * to our block again (last record).
349 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100350 rrecs = be16_to_cpu(right->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 if (lbno != NULLAGBLOCK) {
352 i = xfs_btree_firstrec(tcur, level);
353 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100354 if ((error = xfs_btree_decrement(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 goto error0;
356 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
357 }
358 }
359 /*
360 * If there's a left sibling, see if it's ok to shift an entry
361 * out of it.
362 */
363 if (lbno != NULLAGBLOCK) {
364 /*
365 * Move the temp cursor to the first entry in the
366 * previous block.
367 */
368 i = xfs_btree_firstrec(tcur, level);
369 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100370 if ((error = xfs_btree_decrement(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 goto error0;
372 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
373 xfs_btree_firstrec(tcur, level);
374 /*
375 * Grab a pointer to the block.
376 */
377 lbp = tcur->bc_bufs[level];
378 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
379#ifdef DEBUG
380 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
381 goto error0;
382#endif
383 /*
384 * Grab the current block number, for future use.
385 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100386 bno = be32_to_cpu(left->bb_rightsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 /*
388 * If left block is full enough so that removing one entry
389 * won't make it too empty, and right-shifting an entry out
390 * of left to us works, we're done.
391 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100392 if (be16_to_cpu(left->bb_numrecs) - 1 >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
394 if ((error = xfs_alloc_rshift(tcur, level, &i)))
395 goto error0;
396 if (i) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100397 ASSERT(be16_to_cpu(block->bb_numrecs) >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 XFS_ALLOC_BLOCK_MINRECS(level, cur));
399 xfs_btree_del_cursor(tcur,
400 XFS_BTREE_NOERROR);
401 if (level == 0)
402 cur->bc_ptrs[0]++;
403 *stat = 1;
404 return 0;
405 }
406 }
407 /*
408 * Otherwise, grab the number of records in right for
409 * future reference.
410 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100411 lrecs = be16_to_cpu(left->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
413 /*
414 * Delete the temp cursor, we're done with it.
415 */
416 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
417 /*
418 * If here, we need to do a join to keep the tree balanced.
419 */
420 ASSERT(bno != NULLAGBLOCK);
421 /*
422 * See if we can join with the left neighbor block.
423 */
424 if (lbno != NULLAGBLOCK &&
Eric Sandeen91d87232006-09-28 11:05:40 +1000425 lrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 /*
427 * Set "right" to be the starting block,
428 * "left" to be the left neighbor.
429 */
430 rbno = bno;
431 right = block;
Eric Sandeen91d87232006-09-28 11:05:40 +1000432 rrecs = be16_to_cpu(right->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 rbp = bp;
434 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
435 cur->bc_private.a.agno, lbno, 0, &lbp,
436 XFS_ALLOC_BTREE_REF)))
437 return error;
438 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000439 lrecs = be16_to_cpu(left->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
441 return error;
442 }
443 /*
444 * If that won't work, see if we can join with the right neighbor block.
445 */
446 else if (rbno != NULLAGBLOCK &&
Eric Sandeen91d87232006-09-28 11:05:40 +1000447 rrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 /*
449 * Set "left" to be the starting block,
450 * "right" to be the right neighbor.
451 */
452 lbno = bno;
453 left = block;
Eric Sandeen91d87232006-09-28 11:05:40 +1000454 lrecs = be16_to_cpu(left->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 lbp = bp;
456 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
457 cur->bc_private.a.agno, rbno, 0, &rbp,
458 XFS_ALLOC_BTREE_REF)))
459 return error;
460 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000461 rrecs = be16_to_cpu(right->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
463 return error;
464 }
465 /*
466 * Otherwise, we can't fix the imbalance.
467 * Just return. This is probably a logic error, but it's not fatal.
468 */
469 else {
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100470 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 return error;
472 *stat = 1;
473 return 0;
474 }
475 /*
476 * We're now going to join "left" and "right" by moving all the stuff
477 * in "right" to "left" and deleting "right".
478 */
479 if (level > 0) {
480 /*
481 * It's a non-leaf. Move keys and pointers.
482 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000483 lkp = XFS_ALLOC_KEY_ADDR(left, lrecs + 1, cur);
484 lpp = XFS_ALLOC_PTR_ADDR(left, lrecs + 1, cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
486 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
487#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000488 for (i = 0; i < rrecs; i++) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100489 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 return error;
491 }
492#endif
Eric Sandeen91d87232006-09-28 11:05:40 +1000493 memcpy(lkp, rkp, rrecs * sizeof(*lkp));
494 memcpy(lpp, rpp, rrecs * sizeof(*lpp));
495 xfs_alloc_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
496 xfs_alloc_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 } else {
498 /*
499 * It's a leaf. Move records.
500 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000501 lrp = XFS_ALLOC_REC_ADDR(left, lrecs + 1, cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
Eric Sandeen91d87232006-09-28 11:05:40 +1000503 memcpy(lrp, rrp, rrecs * sizeof(*lrp));
504 xfs_alloc_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 }
506 /*
507 * If we joined with the left neighbor, set the buffer in the
508 * cursor to the left block, and fix up the index.
509 */
510 if (bp != lbp) {
511 xfs_btree_setbuf(cur, level, lbp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000512 cur->bc_ptrs[level] += lrecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514 /*
515 * If we joined with the right neighbor and there's a level above
516 * us, increment the cursor at that level.
517 */
518 else if (level + 1 < cur->bc_nlevels &&
Christoph Hellwig637aa502008-10-30 16:55:45 +1100519 (error = xfs_btree_increment(cur, level + 1, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 return error;
521 /*
522 * Fix up the number of records in the surviving block.
523 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000524 lrecs += rrecs;
525 left->bb_numrecs = cpu_to_be16(lrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 /*
527 * Fix up the right block pointer in the surviving block, and log it.
528 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100529 left->bb_rightsib = right->bb_rightsib;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
531 /*
532 * If there is a right sibling now, make it point to the
533 * remaining block.
534 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100535 if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 xfs_alloc_block_t *rrblock;
537 xfs_buf_t *rrbp;
538
539 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +1100540 cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 &rrbp, XFS_ALLOC_BTREE_REF)))
542 return error;
543 rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
544 if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
545 return error;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100546 rrblock->bb_leftsib = cpu_to_be32(lbno);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
548 }
549 /*
550 * Free the deleting block by putting it on the freelist.
551 */
David Chinner92821e22007-05-24 15:26:31 +1000552 error = xfs_alloc_put_freelist(cur->bc_tp,
553 cur->bc_private.a.agbp, NULL, rbno, 1);
554 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return error;
556 /*
557 * Since blocks move to the free list without the coordination
558 * used in xfs_bmap_finish, we can't allow block to be available
559 * for reallocation and non-transaction writing (user data)
560 * until we know that the transaction that moved it to the free
561 * list is permanently on disk. We track the blocks by declaring
562 * these blocks as "busy"; the busy list is maintained on a
563 * per-ag basis and each transaction records which entries
564 * should be removed when the iclog commits to disk. If a
565 * busy block is allocated, the iclog is pushed up to the
566 * LSN that freed the block.
567 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100568 xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 xfs_trans_agbtree_delta(cur->bc_tp, -1);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 /*
572 * Adjust the current level's cursor so that we're left referring
573 * to the right node, after we're done.
574 * If this leaves the ptr value 0 our caller will fix it up.
575 */
576 if (level > 0)
577 cur->bc_ptrs[level]--;
578 /*
579 * Return value means the next level up has something to do.
580 */
581 *stat = 2;
582 return 0;
583
584error0:
585 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
586 return error;
587}
588
589/*
590 * Insert one record/level. Return information to the caller
591 * allowing the next level up to proceed if necessary.
592 */
593STATIC int /* error */
594xfs_alloc_insrec(
595 xfs_btree_cur_t *cur, /* btree cursor */
596 int level, /* level to insert record at */
597 xfs_agblock_t *bnop, /* i/o: block number inserted */
598 xfs_alloc_rec_t *recp, /* i/o: record data inserted */
599 xfs_btree_cur_t **curp, /* output: new cursor replacing cur */
600 int *stat) /* output: success/failure */
601{
602 xfs_agf_t *agf; /* allocation group freelist header */
603 xfs_alloc_block_t *block; /* btree block record/key lives in */
604 xfs_buf_t *bp; /* buffer for block */
605 int error; /* error return value */
606 int i; /* loop index */
607 xfs_alloc_key_t key; /* key value being inserted */
608 xfs_alloc_key_t *kp; /* pointer to btree keys */
609 xfs_agblock_t nbno; /* block number of allocated block */
610 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
611 xfs_alloc_key_t nkey; /* new key value, from split */
612 xfs_alloc_rec_t nrec; /* new record value, for caller */
Eric Sandeen91d87232006-09-28 11:05:40 +1000613 int numrecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 int optr; /* old ptr value */
615 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
616 int ptr; /* index in btree block for this rec */
617 xfs_alloc_rec_t *rp; /* pointer to btree records */
618
Christoph Hellwig16259e72005-11-02 15:11:25 +1100619 ASSERT(be32_to_cpu(recp->ar_blockcount) > 0);
Christoph Hellwig5bde1ba92005-11-02 15:06:18 +1100620
621 /*
622 * GCC doesn't understand the (arguably complex) control flow in
623 * this function and complains about uninitialized structure fields
624 * without this.
625 */
626 memset(&nrec, 0, sizeof(nrec));
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 /*
629 * If we made it to the root level, allocate a new root block
630 * and we're done.
631 */
632 if (level >= cur->bc_nlevels) {
633 XFS_STATS_INC(xs_abt_insrec);
634 if ((error = xfs_alloc_newroot(cur, &i)))
635 return error;
636 *bnop = NULLAGBLOCK;
637 *stat = i;
638 return 0;
639 }
640 /*
641 * Make a key out of the record data to be inserted, and save it.
642 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100643 key.ar_startblock = recp->ar_startblock;
644 key.ar_blockcount = recp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 optr = ptr = cur->bc_ptrs[level];
646 /*
647 * If we're off the left edge, return failure.
648 */
649 if (ptr == 0) {
650 *stat = 0;
651 return 0;
652 }
653 XFS_STATS_INC(xs_abt_insrec);
654 /*
655 * Get pointers to the btree buffer and block.
656 */
657 bp = cur->bc_bufs[level];
658 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000659 numrecs = be16_to_cpu(block->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660#ifdef DEBUG
661 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
662 return error;
663 /*
664 * Check that the new entry is being inserted in the right place.
665 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000666 if (ptr <= numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (level == 0) {
668 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
669 xfs_btree_check_rec(cur->bc_btnum, recp, rp);
670 } else {
671 kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
672 xfs_btree_check_key(cur->bc_btnum, &key, kp);
673 }
674 }
675#endif
676 nbno = NULLAGBLOCK;
Nathan Scott1121b212006-09-28 10:58:40 +1000677 ncur = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 /*
679 * If the block is full, we can't insert the new entry until we
680 * make the block un-full.
681 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000682 if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 /*
684 * First, try shifting an entry to the right neighbor.
685 */
686 if ((error = xfs_alloc_rshift(cur, level, &i)))
687 return error;
688 if (i) {
689 /* nothing */
690 }
691 /*
692 * Next, try shifting an entry to the left neighbor.
693 */
694 else {
695 if ((error = xfs_alloc_lshift(cur, level, &i)))
696 return error;
697 if (i)
698 optr = ptr = cur->bc_ptrs[level];
699 else {
700 /*
701 * Next, try splitting the current block in
702 * half. If this works we have to re-set our
703 * variables because we could be in a
704 * different block now.
705 */
706 if ((error = xfs_alloc_split(cur, level, &nbno,
707 &nkey, &ncur, &i)))
708 return error;
709 if (i) {
710 bp = cur->bc_bufs[level];
711 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
712#ifdef DEBUG
713 if ((error =
714 xfs_btree_check_sblock(cur,
715 block, level, bp)))
716 return error;
717#endif
718 ptr = cur->bc_ptrs[level];
Christoph Hellwig16259e72005-11-02 15:11:25 +1100719 nrec.ar_startblock = nkey.ar_startblock;
720 nrec.ar_blockcount = nkey.ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 }
722 /*
723 * Otherwise the insert fails.
724 */
725 else {
726 *stat = 0;
727 return 0;
728 }
729 }
730 }
731 }
732 /*
733 * At this point we know there's room for our new entry in the block
734 * we're pointing at.
735 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000736 numrecs = be16_to_cpu(block->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (level > 0) {
738 /*
739 * It's a non-leaf entry. Make a hole for the new data
740 * in the key and ptr regions of the block.
741 */
742 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
743 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
744#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000745 for (i = numrecs; i >= ptr; i--) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100746 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 return error;
748 }
749#endif
750 memmove(&kp[ptr], &kp[ptr - 1],
Eric Sandeen91d87232006-09-28 11:05:40 +1000751 (numrecs - ptr + 1) * sizeof(*kp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 memmove(&pp[ptr], &pp[ptr - 1],
Eric Sandeen91d87232006-09-28 11:05:40 +1000753 (numrecs - ptr + 1) * sizeof(*pp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754#ifdef DEBUG
755 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
756 return error;
757#endif
758 /*
759 * Now stuff the new data in, bump numrecs and log the new data.
760 */
761 kp[ptr - 1] = key;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100762 pp[ptr - 1] = cpu_to_be32(*bnop);
Eric Sandeen91d87232006-09-28 11:05:40 +1000763 numrecs++;
764 block->bb_numrecs = cpu_to_be16(numrecs);
765 xfs_alloc_log_keys(cur, bp, ptr, numrecs);
766 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000768 if (ptr < numrecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
770 kp + ptr);
771#endif
772 } else {
773 /*
774 * It's a leaf entry. Make a hole for the new record.
775 */
776 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
777 memmove(&rp[ptr], &rp[ptr - 1],
Eric Sandeen91d87232006-09-28 11:05:40 +1000778 (numrecs - ptr + 1) * sizeof(*rp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 /*
780 * Now stuff the new record in, bump numrecs
781 * and log the new data.
782 */
Christoph Hellwigc38e5e82006-09-28 10:57:17 +1000783 rp[ptr - 1] = *recp;
Eric Sandeen91d87232006-09-28 11:05:40 +1000784 numrecs++;
785 block->bb_numrecs = cpu_to_be16(numrecs);
786 xfs_alloc_log_recs(cur, bp, ptr, numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000788 if (ptr < numrecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
790 rp + ptr);
791#endif
792 }
793 /*
794 * Log the new number of records in the btree header.
795 */
796 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
797 /*
798 * If we inserted at the start of a block, update the parents' keys.
799 */
Christoph Hellwig38bb7422008-10-30 16:56:22 +1100800 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 return error;
802 /*
803 * Look to see if the longest extent in the allocation group
804 * needs to be updated.
805 */
806
807 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
808 if (level == 0 &&
809 cur->bc_btnum == XFS_BTNUM_CNT &&
Christoph Hellwig16259e72005-11-02 15:11:25 +1100810 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
811 be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 /*
813 * If this is a leaf in the by-size btree and there
814 * is no right sibling block and this block is bigger
815 * than the previous longest block, update it.
816 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100817 agf->agf_longest = recp->ar_blockcount;
818 cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest
819 = be32_to_cpu(recp->ar_blockcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
821 XFS_AGF_LONGEST);
822 }
823 /*
824 * Return the new block number, if any.
825 * If there is one, give back a record value and a cursor too.
826 */
827 *bnop = nbno;
828 if (nbno != NULLAGBLOCK) {
Christoph Hellwigc38e5e82006-09-28 10:57:17 +1000829 *recp = nrec;
830 *curp = ncur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
832 *stat = 1;
833 return 0;
834}
835
836/*
837 * Log header fields from a btree block.
838 */
839STATIC void
840xfs_alloc_log_block(
841 xfs_trans_t *tp, /* transaction pointer */
842 xfs_buf_t *bp, /* buffer containing btree block */
843 int fields) /* mask of fields: XFS_BB_... */
844{
845 int first; /* first byte offset logged */
846 int last; /* last byte offset logged */
847 static const short offsets[] = { /* table of offsets */
848 offsetof(xfs_alloc_block_t, bb_magic),
849 offsetof(xfs_alloc_block_t, bb_level),
850 offsetof(xfs_alloc_block_t, bb_numrecs),
851 offsetof(xfs_alloc_block_t, bb_leftsib),
852 offsetof(xfs_alloc_block_t, bb_rightsib),
853 sizeof(xfs_alloc_block_t)
854 };
855
856 xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last);
857 xfs_trans_log_buf(tp, bp, first, last);
858}
859
860/*
861 * Log keys from a btree block (nonleaf).
862 */
863STATIC void
864xfs_alloc_log_keys(
865 xfs_btree_cur_t *cur, /* btree cursor */
866 xfs_buf_t *bp, /* buffer containing btree block */
867 int kfirst, /* index of first key to log */
868 int klast) /* index of last key to log */
869{
870 xfs_alloc_block_t *block; /* btree block to log from */
871 int first; /* first byte offset logged */
872 xfs_alloc_key_t *kp; /* key pointer in btree block */
873 int last; /* last byte offset logged */
874
875 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
876 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
877 first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
878 last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
879 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
880}
881
882/*
883 * Log block pointer fields from a btree block (nonleaf).
884 */
885STATIC void
886xfs_alloc_log_ptrs(
887 xfs_btree_cur_t *cur, /* btree cursor */
888 xfs_buf_t *bp, /* buffer containing btree block */
889 int pfirst, /* index of first pointer to log */
890 int plast) /* index of last pointer to log */
891{
892 xfs_alloc_block_t *block; /* btree block to log from */
893 int first; /* first byte offset logged */
894 int last; /* last byte offset logged */
895 xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */
896
897 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
898 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
899 first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
900 last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
901 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
902}
903
904/*
905 * Log records from a btree block (leaf).
906 */
907STATIC void
908xfs_alloc_log_recs(
909 xfs_btree_cur_t *cur, /* btree cursor */
910 xfs_buf_t *bp, /* buffer containing btree block */
911 int rfirst, /* index of first record to log */
912 int rlast) /* index of last record to log */
913{
914 xfs_alloc_block_t *block; /* btree block to log from */
915 int first; /* first byte offset logged */
916 int last; /* last byte offset logged */
917 xfs_alloc_rec_t *rp; /* record pointer for btree block */
918
919
920 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
921 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
922#ifdef DEBUG
923 {
924 xfs_agf_t *agf;
925 xfs_alloc_rec_t *p;
926
927 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
928 for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++)
Christoph Hellwig16259e72005-11-02 15:11:25 +1100929 ASSERT(be32_to_cpu(p->ar_startblock) +
930 be32_to_cpu(p->ar_blockcount) <=
931 be32_to_cpu(agf->agf_length));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 }
933#endif
934 first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
935 last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
936 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
937}
938
939/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 * Move 1 record left from cur/level if possible.
941 * Update cur to reflect the new path.
942 */
943STATIC int /* error */
944xfs_alloc_lshift(
945 xfs_btree_cur_t *cur, /* btree cursor */
946 int level, /* level to shift record on */
947 int *stat) /* success/failure */
948{
949 int error; /* error return value */
950#ifdef DEBUG
951 int i; /* loop index */
952#endif
953 xfs_alloc_key_t key; /* key value for leaf level upward */
954 xfs_buf_t *lbp; /* buffer for left neighbor block */
955 xfs_alloc_block_t *left; /* left neighbor btree block */
956 int nrec; /* new number of left block entries */
957 xfs_buf_t *rbp; /* buffer for right (current) block */
958 xfs_alloc_block_t *right; /* right (current) btree block */
959 xfs_alloc_key_t *rkp=NULL; /* key pointer for right block */
960 xfs_alloc_ptr_t *rpp=NULL; /* address pointer for right block */
961 xfs_alloc_rec_t *rrp=NULL; /* record pointer for right block */
962
963 /*
964 * Set up variables for this block as "right".
965 */
966 rbp = cur->bc_bufs[level];
967 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
968#ifdef DEBUG
969 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
970 return error;
971#endif
972 /*
973 * If we've got no left sibling then we can't shift an entry left.
974 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100975 if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 *stat = 0;
977 return 0;
978 }
979 /*
980 * If the cursor entry is the one that would be moved, don't
981 * do it... it's too complicated.
982 */
983 if (cur->bc_ptrs[level] <= 1) {
984 *stat = 0;
985 return 0;
986 }
987 /*
988 * Set up the left neighbor as "left".
989 */
990 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +1100991 cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib),
992 0, &lbp, XFS_ALLOC_BTREE_REF)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 return error;
994 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
995 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
996 return error;
997 /*
998 * If it's full, it can't take another entry.
999 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001000 if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 *stat = 0;
1002 return 0;
1003 }
Christoph Hellwig16259e72005-11-02 15:11:25 +11001004 nrec = be16_to_cpu(left->bb_numrecs) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 /*
1006 * If non-leaf, copy a key and a ptr to the left block.
1007 */
1008 if (level > 0) {
1009 xfs_alloc_key_t *lkp; /* key pointer for left block */
1010 xfs_alloc_ptr_t *lpp; /* address pointer for left block */
1011
1012 lkp = XFS_ALLOC_KEY_ADDR(left, nrec, cur);
1013 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
1014 *lkp = *rkp;
1015 xfs_alloc_log_keys(cur, lbp, nrec, nrec);
1016 lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur);
1017 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
1018#ifdef DEBUG
Christoph Hellwig16259e72005-11-02 15:11:25 +11001019 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 return error;
1021#endif
Christoph Hellwigc38e5e82006-09-28 10:57:17 +10001022 *lpp = *rpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 xfs_alloc_log_ptrs(cur, lbp, nrec, nrec);
1024 xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp);
1025 }
1026 /*
1027 * If leaf, copy a record to the left block.
1028 */
1029 else {
1030 xfs_alloc_rec_t *lrp; /* record pointer for left block */
1031
1032 lrp = XFS_ALLOC_REC_ADDR(left, nrec, cur);
1033 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
1034 *lrp = *rrp;
1035 xfs_alloc_log_recs(cur, lbp, nrec, nrec);
1036 xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
1037 }
1038 /*
1039 * Bump and log left's numrecs, decrement and log right's numrecs.
1040 */
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001041 be16_add_cpu(&left->bb_numrecs, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001043 be16_add_cpu(&right->bb_numrecs, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
1045 /*
1046 * Slide the contents of right down one entry.
1047 */
1048 if (level > 0) {
1049#ifdef DEBUG
Christoph Hellwig16259e72005-11-02 15:11:25 +11001050 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1051 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 level)))
1053 return error;
1054 }
1055#endif
Christoph Hellwig16259e72005-11-02 15:11:25 +11001056 memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1057 memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1058 xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1059 xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 } else {
Christoph Hellwig16259e72005-11-02 15:11:25 +11001061 memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1062 xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1063 key.ar_startblock = rrp->ar_startblock;
1064 key.ar_blockcount = rrp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 rkp = &key;
1066 }
1067 /*
1068 * Update the parent key values of right.
1069 */
Christoph Hellwig38bb7422008-10-30 16:56:22 +11001070 if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 return error;
1072 /*
1073 * Slide the cursor value left one.
1074 */
1075 cur->bc_ptrs[level]--;
1076 *stat = 1;
1077 return 0;
1078}
1079
1080/*
1081 * Allocate a new root block, fill it in.
1082 */
1083STATIC int /* error */
1084xfs_alloc_newroot(
1085 xfs_btree_cur_t *cur, /* btree cursor */
1086 int *stat) /* success/failure */
1087{
1088 int error; /* error return value */
1089 xfs_agblock_t lbno; /* left block number */
1090 xfs_buf_t *lbp; /* left btree buffer */
1091 xfs_alloc_block_t *left; /* left btree block */
1092 xfs_mount_t *mp; /* mount structure */
1093 xfs_agblock_t nbno; /* new block number */
1094 xfs_buf_t *nbp; /* new (root) buffer */
1095 xfs_alloc_block_t *new; /* new (root) btree block */
1096 int nptr; /* new value for key index, 1 or 2 */
1097 xfs_agblock_t rbno; /* right block number */
1098 xfs_buf_t *rbp; /* right btree buffer */
1099 xfs_alloc_block_t *right; /* right btree block */
1100
1101 mp = cur->bc_mp;
1102
1103 ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp));
1104 /*
1105 * Get a buffer from the freelist blocks, for the new root.
1106 */
David Chinner92821e22007-05-24 15:26:31 +10001107 error = xfs_alloc_get_freelist(cur->bc_tp,
1108 cur->bc_private.a.agbp, &nbno, 1);
1109 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 return error;
1111 /*
1112 * None available, we fail.
1113 */
1114 if (nbno == NULLAGBLOCK) {
1115 *stat = 0;
1116 return 0;
1117 }
1118 xfs_trans_agbtree_delta(cur->bc_tp, 1);
1119 nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno,
1120 0);
1121 new = XFS_BUF_TO_ALLOC_BLOCK(nbp);
1122 /*
1123 * Set the root data in the a.g. freespace structure.
1124 */
1125 {
1126 xfs_agf_t *agf; /* a.g. freespace header */
1127 xfs_agnumber_t seqno;
1128
1129 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001130 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001131 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001132 seqno = be32_to_cpu(agf->agf_seqno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
1134 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
1135 XFS_AGF_ROOTS | XFS_AGF_LEVELS);
1136 }
1137 /*
1138 * At the previous root level there are now two blocks: the old
1139 * root, and the new block generated when it was split.
1140 * We don't know which one the cursor is pointing at, so we
1141 * set up variables "left" and "right" for each case.
1142 */
1143 lbp = cur->bc_bufs[cur->bc_nlevels - 1];
1144 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1145#ifdef DEBUG
1146 if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp)))
1147 return error;
1148#endif
Christoph Hellwig16259e72005-11-02 15:11:25 +11001149 if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 /*
1151 * Our block is left, pick up the right block.
1152 */
1153 lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp));
Christoph Hellwig16259e72005-11-02 15:11:25 +11001154 rbno = be32_to_cpu(left->bb_rightsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
1156 cur->bc_private.a.agno, rbno, 0, &rbp,
1157 XFS_ALLOC_BTREE_REF)))
1158 return error;
1159 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
1160 if ((error = xfs_btree_check_sblock(cur, right,
1161 cur->bc_nlevels - 1, rbp)))
1162 return error;
1163 nptr = 1;
1164 } else {
1165 /*
1166 * Our block is right, pick up the left block.
1167 */
1168 rbp = lbp;
1169 right = left;
1170 rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp));
Christoph Hellwig16259e72005-11-02 15:11:25 +11001171 lbno = be32_to_cpu(right->bb_leftsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
1173 cur->bc_private.a.agno, lbno, 0, &lbp,
1174 XFS_ALLOC_BTREE_REF)))
1175 return error;
1176 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1177 if ((error = xfs_btree_check_sblock(cur, left,
1178 cur->bc_nlevels - 1, lbp)))
1179 return error;
1180 nptr = 2;
1181 }
1182 /*
1183 * Fill in the new block's btree header and log it.
1184 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001185 new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
1186 new->bb_level = cpu_to_be16(cur->bc_nlevels);
1187 new->bb_numrecs = cpu_to_be16(2);
1188 new->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1189 new->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS);
1191 ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
1192 /*
1193 * Fill in the key data in the new root.
1194 */
1195 {
1196 xfs_alloc_key_t *kp; /* btree key pointer */
1197
1198 kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001199 if (be16_to_cpu(left->bb_level) > 0) {
Christoph Hellwigc38e5e82006-09-28 10:57:17 +10001200 kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur);
1201 kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 } else {
1203 xfs_alloc_rec_t *rp; /* btree record pointer */
1204
1205 rp = XFS_ALLOC_REC_ADDR(left, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001206 kp[0].ar_startblock = rp->ar_startblock;
1207 kp[0].ar_blockcount = rp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 rp = XFS_ALLOC_REC_ADDR(right, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001209 kp[1].ar_startblock = rp->ar_startblock;
1210 kp[1].ar_blockcount = rp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 }
1212 }
1213 xfs_alloc_log_keys(cur, nbp, 1, 2);
1214 /*
1215 * Fill in the pointer data in the new root.
1216 */
1217 {
1218 xfs_alloc_ptr_t *pp; /* btree address pointer */
1219
1220 pp = XFS_ALLOC_PTR_ADDR(new, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001221 pp[0] = cpu_to_be32(lbno);
1222 pp[1] = cpu_to_be32(rbno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 }
1224 xfs_alloc_log_ptrs(cur, nbp, 1, 2);
1225 /*
1226 * Fix up the cursor.
1227 */
1228 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
1229 cur->bc_ptrs[cur->bc_nlevels] = nptr;
1230 cur->bc_nlevels++;
1231 *stat = 1;
1232 return 0;
1233}
1234
1235/*
1236 * Move 1 record right from cur/level if possible.
1237 * Update cur to reflect the new path.
1238 */
1239STATIC int /* error */
1240xfs_alloc_rshift(
1241 xfs_btree_cur_t *cur, /* btree cursor */
1242 int level, /* level to shift record on */
1243 int *stat) /* success/failure */
1244{
1245 int error; /* error return value */
1246 int i; /* loop index */
1247 xfs_alloc_key_t key; /* key value for leaf level upward */
1248 xfs_buf_t *lbp; /* buffer for left (current) block */
1249 xfs_alloc_block_t *left; /* left (current) btree block */
1250 xfs_buf_t *rbp; /* buffer for right neighbor block */
1251 xfs_alloc_block_t *right; /* right neighbor btree block */
1252 xfs_alloc_key_t *rkp; /* key pointer for right block */
1253 xfs_btree_cur_t *tcur; /* temporary cursor */
1254
1255 /*
1256 * Set up variables for this block as "left".
1257 */
1258 lbp = cur->bc_bufs[level];
1259 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1260#ifdef DEBUG
1261 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
1262 return error;
1263#endif
1264 /*
1265 * If we've got no right sibling then we can't shift an entry right.
1266 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001267 if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 *stat = 0;
1269 return 0;
1270 }
1271 /*
1272 * If the cursor entry is the one that would be moved, don't
1273 * do it... it's too complicated.
1274 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001275 if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 *stat = 0;
1277 return 0;
1278 }
1279 /*
1280 * Set up the right neighbor as "right".
1281 */
1282 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +11001283 cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib),
1284 0, &rbp, XFS_ALLOC_BTREE_REF)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return error;
1286 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
1287 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
1288 return error;
1289 /*
1290 * If it's full, it can't take another entry.
1291 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001292 if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 *stat = 0;
1294 return 0;
1295 }
1296 /*
1297 * Make a hole at the start of the right neighbor block, then
1298 * copy the last left block entry to the hole.
1299 */
1300 if (level > 0) {
1301 xfs_alloc_key_t *lkp; /* key pointer for left block */
1302 xfs_alloc_ptr_t *lpp; /* address pointer for left block */
1303 xfs_alloc_ptr_t *rpp; /* address pointer for right block */
1304
Christoph Hellwig16259e72005-11-02 15:11:25 +11001305 lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
1306 lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
1308 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
1309#ifdef DEBUG
Christoph Hellwig16259e72005-11-02 15:11:25 +11001310 for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
1311 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 return error;
1313 }
1314#endif
Christoph Hellwig16259e72005-11-02 15:11:25 +11001315 memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1316 memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317#ifdef DEBUG
Christoph Hellwig16259e72005-11-02 15:11:25 +11001318 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 return error;
1320#endif
Christoph Hellwigc38e5e82006-09-28 10:57:17 +10001321 *rkp = *lkp;
1322 *rpp = *lpp;
Christoph Hellwig16259e72005-11-02 15:11:25 +11001323 xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1324 xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
1326 } else {
1327 xfs_alloc_rec_t *lrp; /* record pointer for left block */
1328 xfs_alloc_rec_t *rrp; /* record pointer for right block */
1329
Christoph Hellwig16259e72005-11-02 15:11:25 +11001330 lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001332 memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 *rrp = *lrp;
Christoph Hellwig16259e72005-11-02 15:11:25 +11001334 xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1335 key.ar_startblock = rrp->ar_startblock;
1336 key.ar_blockcount = rrp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 rkp = &key;
1338 xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1);
1339 }
1340 /*
1341 * Decrement and log left's numrecs, bump and log right's numrecs.
1342 */
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001343 be16_add_cpu(&left->bb_numrecs, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001345 be16_add_cpu(&right->bb_numrecs, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
1347 /*
1348 * Using a temporary cursor, update the parent key values of the
1349 * block on the right.
1350 */
1351 if ((error = xfs_btree_dup_cursor(cur, &tcur)))
1352 return error;
1353 i = xfs_btree_lastrec(tcur, level);
1354 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig637aa502008-10-30 16:55:45 +11001355 if ((error = xfs_btree_increment(tcur, level, &i)) ||
Christoph Hellwig38bb7422008-10-30 16:56:22 +11001356 (error = xfs_btree_updkey(tcur, (union xfs_btree_key *)rkp, level + 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 goto error0;
1358 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1359 *stat = 1;
1360 return 0;
1361error0:
1362 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1363 return error;
1364}
1365
1366/*
1367 * Split cur/level block in half.
1368 * Return new block number and its first record (to be inserted into parent).
1369 */
1370STATIC int /* error */
1371xfs_alloc_split(
1372 xfs_btree_cur_t *cur, /* btree cursor */
1373 int level, /* level to split */
1374 xfs_agblock_t *bnop, /* output: block number allocated */
1375 xfs_alloc_key_t *keyp, /* output: first key of new block */
1376 xfs_btree_cur_t **curp, /* output: new cursor */
1377 int *stat) /* success/failure */
1378{
1379 int error; /* error return value */
1380 int i; /* loop index/record number */
1381 xfs_agblock_t lbno; /* left (current) block number */
1382 xfs_buf_t *lbp; /* buffer for left block */
1383 xfs_alloc_block_t *left; /* left (current) btree block */
1384 xfs_agblock_t rbno; /* right (new) block number */
1385 xfs_buf_t *rbp; /* buffer for right block */
1386 xfs_alloc_block_t *right; /* right (new) btree block */
1387
1388 /*
1389 * Allocate the new block from the freelist.
1390 * If we can't do it, we're toast. Give up.
1391 */
David Chinner92821e22007-05-24 15:26:31 +10001392 error = xfs_alloc_get_freelist(cur->bc_tp,
1393 cur->bc_private.a.agbp, &rbno, 1);
1394 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 return error;
1396 if (rbno == NULLAGBLOCK) {
1397 *stat = 0;
1398 return 0;
1399 }
1400 xfs_trans_agbtree_delta(cur->bc_tp, 1);
1401 rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno,
1402 rbno, 0);
1403 /*
1404 * Set up the new block as "right".
1405 */
1406 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
1407 /*
1408 * "Left" is the current (according to the cursor) block.
1409 */
1410 lbp = cur->bc_bufs[level];
1411 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1412#ifdef DEBUG
1413 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
1414 return error;
1415#endif
1416 /*
1417 * Fill in the btree header for the new block.
1418 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001419 right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
1420 right->bb_level = left->bb_level;
1421 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 /*
1423 * Make sure that if there's an odd number of entries now, that
1424 * each new block will have the same number of entries.
1425 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001426 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1427 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001428 be16_add_cpu(&right->bb_numrecs, 1);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001429 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 /*
1431 * For non-leaf blocks, copy keys and addresses over to the new block.
1432 */
1433 if (level > 0) {
1434 xfs_alloc_key_t *lkp; /* left btree key pointer */
1435 xfs_alloc_ptr_t *lpp; /* left btree address pointer */
1436 xfs_alloc_key_t *rkp; /* right btree key pointer */
1437 xfs_alloc_ptr_t *rpp; /* right btree address pointer */
1438
1439 lkp = XFS_ALLOC_KEY_ADDR(left, i, cur);
1440 lpp = XFS_ALLOC_PTR_ADDR(left, i, cur);
1441 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
1442 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
1443#ifdef DEBUG
Christoph Hellwig16259e72005-11-02 15:11:25 +11001444 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1445 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 return error;
1447 }
1448#endif
Christoph Hellwig16259e72005-11-02 15:11:25 +11001449 memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1450 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1451 xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1452 xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 *keyp = *rkp;
1454 }
1455 /*
1456 * For leaf blocks, copy records over to the new block.
1457 */
1458 else {
1459 xfs_alloc_rec_t *lrp; /* left btree record pointer */
1460 xfs_alloc_rec_t *rrp; /* right btree record pointer */
1461
1462 lrp = XFS_ALLOC_REC_ADDR(left, i, cur);
1463 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001464 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1465 xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1466 keyp->ar_startblock = rrp->ar_startblock;
1467 keyp->ar_blockcount = rrp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 }
1469 /*
1470 * Find the left block number by looking in the buffer.
1471 * Adjust numrecs, sibling pointers.
1472 */
1473 lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001474 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
Christoph Hellwig16259e72005-11-02 15:11:25 +11001475 right->bb_rightsib = left->bb_rightsib;
1476 left->bb_rightsib = cpu_to_be32(rbno);
1477 right->bb_leftsib = cpu_to_be32(lbno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS);
1479 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
1480 /*
1481 * If there's a block to the new block's right, make that block
1482 * point back to right instead of to left.
1483 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001484 if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 xfs_alloc_block_t *rrblock; /* rr btree block */
1486 xfs_buf_t *rrbp; /* buffer for rrblock */
1487
1488 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +11001489 cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 &rrbp, XFS_ALLOC_BTREE_REF)))
1491 return error;
1492 rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
1493 if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
1494 return error;
Christoph Hellwig16259e72005-11-02 15:11:25 +11001495 rrblock->bb_leftsib = cpu_to_be32(rbno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
1497 }
1498 /*
1499 * If the cursor is really in the right block, move it there.
1500 * If it's just pointing past the last entry in left, then we'll
1501 * insert there, so don't change anything in that case.
1502 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001503 if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 xfs_btree_setbuf(cur, level, rbp);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001505 cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 }
1507 /*
1508 * If there are more levels, we'll need another cursor which refers to
1509 * the right block, no matter where this cursor was.
1510 */
1511 if (level + 1 < cur->bc_nlevels) {
1512 if ((error = xfs_btree_dup_cursor(cur, curp)))
1513 return error;
1514 (*curp)->bc_ptrs[level + 1]++;
1515 }
1516 *bnop = rbno;
1517 *stat = 1;
1518 return 0;
1519}
1520
1521/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 * Externally visible routines.
1523 */
1524
1525/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 * Delete the record pointed to by cur.
1527 * The cursor refers to the place where the record was (could be inserted)
1528 * when the operation returns.
1529 */
1530int /* error */
1531xfs_alloc_delete(
1532 xfs_btree_cur_t *cur, /* btree cursor */
1533 int *stat) /* success/failure */
1534{
1535 int error; /* error return value */
1536 int i; /* result code */
1537 int level; /* btree level */
1538
1539 /*
1540 * Go up the tree, starting at leaf level.
1541 * If 2 is returned then a join was done; go to the next level.
1542 * Otherwise we are done.
1543 */
1544 for (level = 0, i = 2; i == 2; level++) {
1545 if ((error = xfs_alloc_delrec(cur, level, &i)))
1546 return error;
1547 }
1548 if (i == 0) {
1549 for (level = 1; level < cur->bc_nlevels; level++) {
1550 if (cur->bc_ptrs[level] == 0) {
Christoph Hellwig8df4da42008-10-30 16:55:58 +11001551 if ((error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 return error;
1553 break;
1554 }
1555 }
1556 }
1557 *stat = i;
1558 return 0;
1559}
1560
1561/*
1562 * Get the data from the pointed-to record.
1563 */
1564int /* error */
1565xfs_alloc_get_rec(
1566 xfs_btree_cur_t *cur, /* btree cursor */
1567 xfs_agblock_t *bno, /* output: starting block of extent */
1568 xfs_extlen_t *len, /* output: length of extent */
1569 int *stat) /* output: success/failure */
1570{
1571 xfs_alloc_block_t *block; /* btree block */
1572#ifdef DEBUG
1573 int error; /* error return value */
1574#endif
1575 int ptr; /* record number */
1576
1577 ptr = cur->bc_ptrs[0];
1578 block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
1579#ifdef DEBUG
1580 if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
1581 return error;
1582#endif
1583 /*
1584 * Off the right end or left end, return failure.
1585 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001586 if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 *stat = 0;
1588 return 0;
1589 }
1590 /*
1591 * Point to the record and extract its data.
1592 */
1593 {
1594 xfs_alloc_rec_t *rec; /* record data */
1595
1596 rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001597 *bno = be32_to_cpu(rec->ar_startblock);
1598 *len = be32_to_cpu(rec->ar_blockcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 }
1600 *stat = 1;
1601 return 0;
1602}
1603
1604/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 * Insert the current record at the point referenced by cur.
1606 * The cursor may be inconsistent on return if splits have been done.
1607 */
1608int /* error */
1609xfs_alloc_insert(
1610 xfs_btree_cur_t *cur, /* btree cursor */
1611 int *stat) /* success/failure */
1612{
1613 int error; /* error return value */
1614 int i; /* result value, 0 for failure */
1615 int level; /* current level number in btree */
1616 xfs_agblock_t nbno; /* new block number (split result) */
1617 xfs_btree_cur_t *ncur; /* new cursor (split result) */
1618 xfs_alloc_rec_t nrec; /* record being inserted this level */
1619 xfs_btree_cur_t *pcur; /* previous level's cursor */
1620
1621 level = 0;
1622 nbno = NULLAGBLOCK;
Christoph Hellwig16259e72005-11-02 15:11:25 +11001623 nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
1624 nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
Nathan Scott1121b212006-09-28 10:58:40 +10001625 ncur = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 pcur = cur;
1627 /*
1628 * Loop going up the tree, starting at the leaf level.
1629 * Stop when we don't get a split block, that must mean that
1630 * the insert is finished with this level.
1631 */
1632 do {
1633 /*
1634 * Insert nrec/nbno into this level of the tree.
1635 * Note if we fail, nbno will be null.
1636 */
1637 if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur,
1638 &i))) {
1639 if (pcur != cur)
1640 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1641 return error;
1642 }
1643 /*
1644 * See if the cursor we just used is trash.
1645 * Can't trash the caller's cursor, but otherwise we should
1646 * if ncur is a new cursor or we're about to be done.
1647 */
1648 if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
1649 cur->bc_nlevels = pcur->bc_nlevels;
1650 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1651 }
1652 /*
1653 * If we got a new cursor, switch to it.
1654 */
1655 if (ncur) {
1656 pcur = ncur;
Nathan Scott1121b212006-09-28 10:58:40 +10001657 ncur = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 }
1659 } while (nbno != NULLAGBLOCK);
1660 *stat = i;
1661 return 0;
1662}
1663
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001664STATIC struct xfs_btree_cur *
1665xfs_allocbt_dup_cursor(
1666 struct xfs_btree_cur *cur)
1667{
1668 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
1669 cur->bc_private.a.agbp, cur->bc_private.a.agno,
1670 cur->bc_btnum);
1671}
1672
Christoph Hellwig278d0ca2008-10-30 16:56:32 +11001673/*
1674 * Update the longest extent in the AGF
1675 */
1676STATIC void
1677xfs_allocbt_update_lastrec(
1678 struct xfs_btree_cur *cur,
1679 struct xfs_btree_block *block,
1680 union xfs_btree_rec *rec,
1681 int ptr,
1682 int reason)
1683{
1684 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
1685 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
1686 __be32 len;
1687
1688 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
1689
1690 switch (reason) {
1691 case LASTREC_UPDATE:
1692 /*
1693 * If this is the last leaf block and it's the last record,
1694 * then update the size of the longest extent in the AG.
1695 */
1696 if (ptr != xfs_btree_get_numrecs(block))
1697 return;
1698 len = rec->alloc.ar_blockcount;
1699 break;
1700 default:
1701 ASSERT(0);
1702 return;
1703 }
1704
1705 agf->agf_longest = len;
1706 cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
1707 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
1708}
1709
Christoph Hellwigce5e42d2008-10-30 16:55:23 +11001710STATIC int
1711xfs_allocbt_get_maxrecs(
1712 struct xfs_btree_cur *cur,
1713 int level)
1714{
1715 return cur->bc_mp->m_alloc_mxr[level != 0];
1716}
1717
Christoph Hellwigfe033cc2008-10-30 16:56:09 +11001718STATIC void
1719xfs_allocbt_init_key_from_rec(
1720 union xfs_btree_key *key,
1721 union xfs_btree_rec *rec)
1722{
1723 ASSERT(rec->alloc.ar_startblock != 0);
1724
1725 key->alloc.ar_startblock = rec->alloc.ar_startblock;
1726 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
1727}
1728
1729STATIC void
1730xfs_allocbt_init_ptr_from_cur(
1731 struct xfs_btree_cur *cur,
1732 union xfs_btree_ptr *ptr)
1733{
1734 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
1735
1736 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
1737 ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
1738
1739 ptr->s = agf->agf_roots[cur->bc_btnum];
1740}
1741
1742STATIC __int64_t
1743xfs_allocbt_key_diff(
1744 struct xfs_btree_cur *cur,
1745 union xfs_btree_key *key)
1746{
1747 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
1748 xfs_alloc_key_t *kp = &key->alloc;
1749 __int64_t diff;
1750
1751 if (cur->bc_btnum == XFS_BTNUM_BNO) {
1752 return (__int64_t)be32_to_cpu(kp->ar_startblock) -
1753 rec->ar_startblock;
1754 }
1755
1756 diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
1757 if (diff)
1758 return diff;
1759
1760 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
1761}
1762
Christoph Hellwig8c4ed632008-10-30 16:55:13 +11001763#ifdef XFS_BTREE_TRACE
1764ktrace_t *xfs_allocbt_trace_buf;
1765
1766STATIC void
1767xfs_allocbt_trace_enter(
1768 struct xfs_btree_cur *cur,
1769 const char *func,
1770 char *s,
1771 int type,
1772 int line,
1773 __psunsigned_t a0,
1774 __psunsigned_t a1,
1775 __psunsigned_t a2,
1776 __psunsigned_t a3,
1777 __psunsigned_t a4,
1778 __psunsigned_t a5,
1779 __psunsigned_t a6,
1780 __psunsigned_t a7,
1781 __psunsigned_t a8,
1782 __psunsigned_t a9,
1783 __psunsigned_t a10)
1784{
1785 ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
1786 (void *)func, (void *)s, NULL, (void *)cur,
1787 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
1788 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
1789 (void *)a8, (void *)a9, (void *)a10);
1790}
1791
1792STATIC void
1793xfs_allocbt_trace_cursor(
1794 struct xfs_btree_cur *cur,
1795 __uint32_t *s0,
1796 __uint64_t *l0,
1797 __uint64_t *l1)
1798{
1799 *s0 = cur->bc_private.a.agno;
1800 *l0 = cur->bc_rec.a.ar_startblock;
1801 *l1 = cur->bc_rec.a.ar_blockcount;
1802}
1803
1804STATIC void
1805xfs_allocbt_trace_key(
1806 struct xfs_btree_cur *cur,
1807 union xfs_btree_key *key,
1808 __uint64_t *l0,
1809 __uint64_t *l1)
1810{
1811 *l0 = be32_to_cpu(key->alloc.ar_startblock);
1812 *l1 = be32_to_cpu(key->alloc.ar_blockcount);
1813}
1814
1815STATIC void
1816xfs_allocbt_trace_record(
1817 struct xfs_btree_cur *cur,
1818 union xfs_btree_rec *rec,
1819 __uint64_t *l0,
1820 __uint64_t *l1,
1821 __uint64_t *l2)
1822{
1823 *l0 = be32_to_cpu(rec->alloc.ar_startblock);
1824 *l1 = be32_to_cpu(rec->alloc.ar_blockcount);
1825 *l2 = 0;
1826}
1827#endif /* XFS_BTREE_TRACE */
1828
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001829static const struct xfs_btree_ops xfs_allocbt_ops = {
Christoph Hellwig65f1eae2008-10-30 16:55:34 +11001830 .rec_len = sizeof(xfs_alloc_rec_t),
1831 .key_len = sizeof(xfs_alloc_key_t),
1832
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001833 .dup_cursor = xfs_allocbt_dup_cursor,
Christoph Hellwig278d0ca2008-10-30 16:56:32 +11001834 .update_lastrec = xfs_allocbt_update_lastrec,
Christoph Hellwigce5e42d2008-10-30 16:55:23 +11001835 .get_maxrecs = xfs_allocbt_get_maxrecs,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +11001836 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
1837 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
1838 .key_diff = xfs_allocbt_key_diff,
Christoph Hellwig8c4ed632008-10-30 16:55:13 +11001839
1840#ifdef XFS_BTREE_TRACE
1841 .trace_enter = xfs_allocbt_trace_enter,
1842 .trace_cursor = xfs_allocbt_trace_cursor,
1843 .trace_key = xfs_allocbt_trace_key,
1844 .trace_record = xfs_allocbt_trace_record,
1845#endif
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001846};
1847
1848/*
1849 * Allocate a new allocation btree cursor.
1850 */
1851struct xfs_btree_cur * /* new alloc btree cursor */
1852xfs_allocbt_init_cursor(
1853 struct xfs_mount *mp, /* file system mount point */
1854 struct xfs_trans *tp, /* transaction pointer */
1855 struct xfs_buf *agbp, /* buffer for agf structure */
1856 xfs_agnumber_t agno, /* allocation group number */
1857 xfs_btnum_t btnum) /* btree identifier */
1858{
1859 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
1860 struct xfs_btree_cur *cur;
1861
1862 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
1863
1864 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
1865
1866 cur->bc_tp = tp;
1867 cur->bc_mp = mp;
1868 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
1869 cur->bc_btnum = btnum;
1870 cur->bc_blocklog = mp->m_sb.sb_blocklog;
1871
1872 cur->bc_ops = &xfs_allocbt_ops;
Christoph Hellwig278d0ca2008-10-30 16:56:32 +11001873 if (btnum == XFS_BTNUM_CNT)
1874 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001875
1876 cur->bc_private.a.agbp = agbp;
1877 cur->bc_private.a.agno = agno;
1878
1879 return cur;
1880}