blob: 547575c1c3c0f8ee42d358afc76825cf353e9bb6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
Jeff Mahoney098297b2014-04-23 10:00:36 -04005/*
6 * Now we have all buffers that must be used in balancing of the tree
7 * Further calculations can not cause schedule(), and thus the buffer
8 * tree will be stable until the balancing will be finished
9 * balance the tree according to the analysis made before,
10 * and using buffers obtained after all above.
11 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/uaccess.h>
14#include <linux/time.h>
Al Virof466c6f2012-03-17 01:16:43 -040015#include "reiserfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/buffer_head.h>
Ahmed S. Darwish79a81ae2007-02-12 00:52:09 -080017#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Jeff Mahoneyfba4ebb2009-03-30 14:02:42 -040019static inline void buffer_info_init_left(struct tree_balance *tb,
20 struct buffer_info *bi)
21{
22 bi->tb = tb;
23 bi->bi_bh = tb->L[0];
24 bi->bi_parent = tb->FL[0];
25 bi->bi_position = get_left_neighbor_position(tb, 0);
26}
27
28static inline void buffer_info_init_right(struct tree_balance *tb,
29 struct buffer_info *bi)
30{
31 bi->tb = tb;
32 bi->bi_bh = tb->R[0];
33 bi->bi_parent = tb->FR[0];
34 bi->bi_position = get_right_neighbor_position(tb, 0);
35}
36
37static inline void buffer_info_init_tbS0(struct tree_balance *tb,
38 struct buffer_info *bi)
39{
40 bi->tb = tb;
41 bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
42 bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
43 bi->bi_position = PATH_H_POSITION(tb->tb_path, 1);
44}
45
46static inline void buffer_info_init_bh(struct tree_balance *tb,
47 struct buffer_info *bi,
48 struct buffer_head *bh)
49{
50 bi->tb = tb;
51 bi->bi_bh = bh;
52 bi->bi_parent = NULL;
53 bi->bi_position = 0;
54}
55
Linus Torvaldsbd4c6252005-07-12 20:21:28 -070056inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
57 struct buffer_head *bh, int flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Jeff Mahoney09f1b802014-04-23 10:00:39 -040059 journal_mark_dirty(tb->transaction_handle, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
62#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
63#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
64
Jeff Mahoney098297b2014-04-23 10:00:36 -040065/*
66 * summary:
67 * if deleting something ( tb->insert_size[0] < 0 )
68 * return(balance_leaf_when_delete()); (flag d handled here)
69 * else
70 * if lnum is larger than 0 we put items into the left node
71 * if rnum is larger than 0 we put items into the right node
72 * if snum1 is larger than 0 we put items into the new node s1
73 * if snum2 is larger than 0 we put items into the new node s2
74 * Note that all *num* count new items being created.
Jeff Mahoney098297b2014-04-23 10:00:36 -040075 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Jeff Mahoney83a3a562014-04-23 10:01:02 -040077static void balance_leaf_when_delete_del(struct tree_balance *tb)
78{
79 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
80 int item_pos = PATH_LAST_POSITION(tb->tb_path);
81 struct buffer_info bi;
82#ifdef CONFIG_REISERFS_CHECK
83 struct item_head *ih = item_head(tbS0, item_pos);
84#endif
85
86 RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0],
87 "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
88 -tb->insert_size[0], ih);
89
90 buffer_info_init_tbS0(tb, &bi);
91 leaf_delete_items(&bi, 0, item_pos, 1, -1);
92
93 if (!item_pos && tb->CFL[0]) {
94 if (B_NR_ITEMS(tbS0)) {
95 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
96 } else {
97 if (!PATH_H_POSITION(tb->tb_path, 1))
98 replace_key(tb, tb->CFL[0], tb->lkey[0],
99 PATH_H_PPARENT(tb->tb_path, 0), 0);
100 }
101 }
102
103 RFALSE(!item_pos && !tb->CFL[0],
104 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0],
105 tb->L[0]);
106}
107
108/* cut item in S[0] */
109static void balance_leaf_when_delete_cut(struct tree_balance *tb)
110{
111 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
112 int item_pos = PATH_LAST_POSITION(tb->tb_path);
113 struct item_head *ih = item_head(tbS0, item_pos);
114 int pos_in_item = tb->tb_path->pos_in_item;
115 struct buffer_info bi;
116 buffer_info_init_tbS0(tb, &bi);
117
118 if (is_direntry_le_ih(ih)) {
119 /*
120 * UFS unlink semantics are such that you can only
121 * delete one directory entry at a time.
122 *
123 * when we cut a directory tb->insert_size[0] means
124 * number of entries to be cut (always 1)
125 */
126 tb->insert_size[0] = -1;
127 leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
128 -tb->insert_size[0]);
129
130 RFALSE(!item_pos && !pos_in_item && !tb->CFL[0],
131 "PAP-12030: can not change delimiting key. CFL[0]=%p",
132 tb->CFL[0]);
133
134 if (!item_pos && !pos_in_item && tb->CFL[0])
135 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
136 } else {
137 leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
138 -tb->insert_size[0]);
139
140 RFALSE(!ih_item_len(ih),
141 "PAP-12035: cut must leave non-zero dynamic "
142 "length of item");
143 }
144}
145
146static int balance_leaf_when_delete_left(struct tree_balance *tb)
147{
148 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
149 int n = B_NR_ITEMS(tbS0);
150
151 /* L[0] must be joined with S[0] */
152 if (tb->lnum[0] == -1) {
153 /* R[0] must be also joined with S[0] */
154 if (tb->rnum[0] == -1) {
155 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
156 /*
157 * all contents of all the
158 * 3 buffers will be in L[0]
159 */
160 if (PATH_H_POSITION(tb->tb_path, 1) == 0 &&
161 1 < B_NR_ITEMS(tb->FR[0]))
162 replace_key(tb, tb->CFL[0],
163 tb->lkey[0], tb->FR[0], 1);
164
165 leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1,
166 NULL);
167 leaf_move_items(LEAF_FROM_R_TO_L, tb,
168 B_NR_ITEMS(tb->R[0]), -1,
169 NULL);
170
171 reiserfs_invalidate_buffer(tb, tbS0);
172 reiserfs_invalidate_buffer(tb, tb->R[0]);
173
174 return 0;
175 }
176
177 /* all contents of all the 3 buffers will be in R[0] */
178 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL);
179 leaf_move_items(LEAF_FROM_L_TO_R, tb,
180 B_NR_ITEMS(tb->L[0]), -1, NULL);
181
182 /* right_delimiting_key is correct in R[0] */
183 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
184
185 reiserfs_invalidate_buffer(tb, tbS0);
186 reiserfs_invalidate_buffer(tb, tb->L[0]);
187
188 return -1;
189 }
190
191 RFALSE(tb->rnum[0] != 0,
192 "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]);
193 /* all contents of L[0] and S[0] will be in L[0] */
194 leaf_shift_left(tb, n, -1);
195
196 reiserfs_invalidate_buffer(tb, tbS0);
197
198 return 0;
199 }
200
201 /*
202 * a part of contents of S[0] will be in L[0] and
203 * the rest part of S[0] will be in R[0]
204 */
205
206 RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
207 (tb->lnum[0] + tb->rnum[0] > n + 1),
208 "PAP-12050: rnum(%d) and lnum(%d) and item "
209 "number(%d) in S[0] are not consistent",
210 tb->rnum[0], tb->lnum[0], n);
211 RFALSE((tb->lnum[0] + tb->rnum[0] == n) &&
212 (tb->lbytes != -1 || tb->rbytes != -1),
213 "PAP-12055: bad rbytes (%d)/lbytes (%d) "
214 "parameters when items are not split",
215 tb->rbytes, tb->lbytes);
216 RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) &&
217 (tb->lbytes < 1 || tb->rbytes != -1),
218 "PAP-12060: bad rbytes (%d)/lbytes (%d) "
219 "parameters when items are split",
220 tb->rbytes, tb->lbytes);
221
222 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
223 leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
224
225 reiserfs_invalidate_buffer(tb, tbS0);
226
227 return 0;
228}
229
Jeff Mahoney098297b2014-04-23 10:00:36 -0400230/*
231 * Balance leaf node in case of delete or cut: insert_size[0] < 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 *
233 * lnum, rnum can have values >= -1
234 * -1 means that the neighbor must be joined with S
235 * 0 means that nothing should be done with the neighbor
Jeff Mahoney098297b2014-04-23 10:00:36 -0400236 * >0 means to shift entirely or partly the specified number of items
237 * to the neighbor
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700239static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700241 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
242 int item_pos = PATH_LAST_POSITION(tb->tb_path);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700243 struct buffer_info bi;
244 int n;
245 struct item_head *ih;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700247 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
248 "vs- 12000: level: wrong FR %z", tb->FR[0]);
249 RFALSE(tb->blknum[0] > 1,
250 "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]);
251 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
252 "PAP-12010: tree can not be empty");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Jeff Mahoney4cf5f7a2014-04-23 10:00:35 -0400254 ih = item_head(tbS0, item_pos);
Jeff Mahoneyfba4ebb2009-03-30 14:02:42 -0400255 buffer_info_init_tbS0(tb, &bi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700257 /* Delete or truncate the item */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Jeff Mahoney83a3a562014-04-23 10:01:02 -0400259 BUG_ON(flag != M_DELETE && flag != M_CUT);
260 if (flag == M_DELETE)
261 balance_leaf_when_delete_del(tb);
262 else /* M_CUT */
263 balance_leaf_when_delete_cut(tb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Jeff Mahoney098297b2014-04-23 10:00:36 -0400266 /*
267 * the rule is that no shifting occurs unless by shifting
268 * a node can be freed
269 */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700270 n = B_NR_ITEMS(tbS0);
Jeff Mahoney83a3a562014-04-23 10:01:02 -0400271
272
Jeff Mahoney098297b2014-04-23 10:00:36 -0400273 /* L[0] takes part in balancing */
Jeff Mahoney83a3a562014-04-23 10:01:02 -0400274 if (tb->lnum[0])
275 return balance_leaf_when_delete_left(tb);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700276
277 if (tb->rnum[0] == -1) {
278 /* all contents of R[0] and S[0] will be in R[0] */
279 leaf_shift_right(tb, n, -1);
280 reiserfs_invalidate_buffer(tb, tbS0);
281 return 0;
282 }
283
284 RFALSE(tb->rnum[0],
285 "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400289static void balance_leaf_insert_left(struct tree_balance *tb,
290 struct item_head *ih, const char *body)
291{
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400292 int ret;
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400293 struct buffer_info bi;
294 int n = B_NR_ITEMS(tb->L[0]);
295
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400296 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
297 /* part of new item falls into L[0] */
298 int new_item_len, shift;
299 int version;
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400300
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400301 ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400302
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400303 /* Calculate item length to insert to S[0] */
304 new_item_len = ih_item_len(ih) - tb->lbytes;
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400305
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400306 /* Calculate and check item length to insert to L[0] */
307 put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400308
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400309 RFALSE(ih_item_len(ih) <= 0,
310 "PAP-12080: there is nothing to insert into L[0]: "
311 "ih_item_len=%d", ih_item_len(ih));
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400312
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400313 /* Insert new item into L[0] */
314 buffer_info_init_left(tb, &bi);
315 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
316 min_t(int, tb->zeroes_num, ih_item_len(ih)));
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400317
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400318 version = ih_version(ih);
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400319
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400320 /*
321 * Calculate key component, item length and body to
322 * insert into S[0]
323 */
324 shift = 0;
325 if (is_indirect_le_ih(ih))
326 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400327
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400328 add_le_ih_k_offset(ih, tb->lbytes << shift);
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400329
Jeff Mahoney4bf4de62014-04-23 10:00:56 -0400330 put_ih_item_len(ih, new_item_len);
331 if (tb->lbytes > tb->zeroes_num) {
332 body += (tb->lbytes - tb->zeroes_num);
333 tb->zeroes_num = 0;
334 } else
335 tb->zeroes_num -= tb->lbytes;
336
337 RFALSE(ih_item_len(ih) <= 0,
338 "PAP-12085: there is nothing to insert into S[0]: "
339 "ih_item_len=%d", ih_item_len(ih));
340 } else {
341 /* new item in whole falls into L[0] */
342 /* Shift lnum[0]-1 items to L[0] */
343 ret = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
344
345 /* Insert new item into L[0] */
346 buffer_info_init_left(tb, &bi);
347 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
348 tb->zeroes_num);
349 tb->insert_size[0] = 0;
350 tb->zeroes_num = 0;
351 }
Jeff Mahoneyf1f007c2014-04-23 10:00:47 -0400352}
353
Jeff Mahoney3fade932014-04-23 10:00:57 -0400354static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
355 struct item_head *ih,
356 const char *body)
357{
358 int n = B_NR_ITEMS(tb->L[0]);
359 struct buffer_info bi;
360
361 RFALSE(tb->zeroes_num,
362 "PAP-12090: invalid parameter in case of a directory");
363
364 /* directory item */
365 if (tb->lbytes > tb->pos_in_item) {
366 /* new directory entry falls into L[0] */
367 struct item_head *pasted;
368 int ret, l_pos_in_item = tb->pos_in_item;
369
370 /*
371 * Shift lnum[0] - 1 items in whole.
372 * Shift lbytes - 1 entries from given directory item
373 */
374 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1);
375 if (ret && !tb->item_pos) {
376 pasted = item_head(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
377 l_pos_in_item += ih_entry_count(pasted) -
378 (tb->lbytes - 1);
379 }
380
381 /* Append given directory entry to directory item */
382 buffer_info_init_left(tb, &bi);
383 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
384 l_pos_in_item, tb->insert_size[0],
385 body, tb->zeroes_num);
386
387 /*
388 * previous string prepared space for pasting new entry,
389 * following string pastes this entry
390 */
391
392 /*
393 * when we have merge directory item, pos_in_item
394 * has been changed too
395 */
396
397 /* paste new directory entry. 1 is entry number */
398 leaf_paste_entries(&bi, n + tb->item_pos - ret,
399 l_pos_in_item, 1,
400 (struct reiserfs_de_head *) body,
401 body + DEH_SIZE, tb->insert_size[0]);
402 tb->insert_size[0] = 0;
403 } else {
404 /* new directory item doesn't fall into L[0] */
405 /*
406 * Shift lnum[0]-1 items in whole. Shift lbytes
407 * directory entries from directory item number lnum[0]
408 */
409 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
410 }
411
412 /* Calculate new position to append in item body */
413 tb->pos_in_item -= tb->lbytes;
414}
415
416static void balance_leaf_paste_left_shift(struct tree_balance *tb,
417 struct item_head *ih,
418 const char *body)
419{
420 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
421 int n = B_NR_ITEMS(tb->L[0]);
422 struct buffer_info bi;
423
424 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
425 balance_leaf_paste_left_shift_dirent(tb, ih, body);
426 return;
427 }
428
429 RFALSE(tb->lbytes <= 0,
430 "PAP-12095: there is nothing to shift to L[0]. "
431 "lbytes=%d", tb->lbytes);
432 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
433 "PAP-12100: incorrect position to paste: "
434 "item_len=%d, pos_in_item=%d",
435 ih_item_len(item_head(tbS0, tb->item_pos)), tb->pos_in_item);
436
437 /* appended item will be in L[0] in whole */
438 if (tb->lbytes >= tb->pos_in_item) {
439 struct item_head *tbS0_pos_ih, *tbL0_ih;
440 struct item_head *tbS0_0_ih;
441 struct reiserfs_key *left_delim_key;
442 int ret, l_n, version, temp_l;
443
444 tbS0_pos_ih = item_head(tbS0, tb->item_pos);
445 tbS0_0_ih = item_head(tbS0, 0);
446
447 /*
448 * this bytes number must be appended
449 * to the last item of L[h]
450 */
451 l_n = tb->lbytes - tb->pos_in_item;
452
453 /* Calculate new insert_size[0] */
454 tb->insert_size[0] -= l_n;
455
456 RFALSE(tb->insert_size[0] <= 0,
457 "PAP-12105: there is nothing to paste into "
458 "L[0]. insert_size=%d", tb->insert_size[0]);
459
460 ret = leaf_shift_left(tb, tb->lnum[0],
461 ih_item_len(tbS0_pos_ih));
462
463 tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret);
464
465 /* Append to body of item in L[0] */
466 buffer_info_init_left(tb, &bi);
467 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
468 ih_item_len(tbL0_ih), l_n, body,
469 min_t(int, l_n, tb->zeroes_num));
470
471 /*
472 * 0-th item in S0 can be only of DIRECT type
473 * when l_n != 0
474 */
475 temp_l = l_n;
476
477 RFALSE(ih_item_len(tbS0_0_ih),
478 "PAP-12106: item length must be 0");
479 RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key,
480 leaf_key(tb->L[0], n + tb->item_pos - ret)),
481 "PAP-12107: items must be of the same file");
482
483 if (is_indirect_le_ih(tbL0_ih)) {
484 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
485 temp_l = l_n << shift;
486 }
487 /* update key of first item in S0 */
488 version = ih_version(tbS0_0_ih);
489 add_le_key_k_offset(version, &tbS0_0_ih->ih_key, temp_l);
490
491 /* update left delimiting key */
492 left_delim_key = internal_key(tb->CFL[0], tb->lkey[0]);
493 add_le_key_k_offset(version, left_delim_key, temp_l);
494
495 /*
496 * Calculate new body, position in item and
497 * insert_size[0]
498 */
499 if (l_n > tb->zeroes_num) {
500 body += (l_n - tb->zeroes_num);
501 tb->zeroes_num = 0;
502 } else
503 tb->zeroes_num -= l_n;
504 tb->pos_in_item = 0;
505
506 RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key,
507 leaf_key(tb->L[0],
508 B_NR_ITEMS(tb->L[0]) - 1)) ||
509 !op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size) ||
510 !op_is_left_mergeable(left_delim_key, tbS0->b_size),
511 "PAP-12120: item must be merge-able with left "
512 "neighboring item");
513 } else {
514 /* only part of the appended item will be in L[0] */
515
516 /* Calculate position in item for append in S[0] */
517 tb->pos_in_item -= tb->lbytes;
518
519 RFALSE(tb->pos_in_item <= 0,
520 "PAP-12125: no place for paste. pos_in_item=%d",
521 tb->pos_in_item);
522
523 /*
524 * Shift lnum[0] - 1 items in whole.
525 * Shift lbytes - 1 byte from item number lnum[0]
526 */
527 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
528 }
529}
530
531
532/* appended item will be in L[0] in whole */
533static void balance_leaf_paste_left_whole(struct tree_balance *tb,
534 struct item_head *ih,
535 const char *body)
536{
537 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
538 int n = B_NR_ITEMS(tb->L[0]);
539 struct buffer_info bi;
540 struct item_head *pasted;
541 int ret;
542
543 /* if we paste into first item of S[0] and it is left mergable */
544 if (!tb->item_pos &&
545 op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size)) {
546 /*
547 * then increment pos_in_item by the size of the
548 * last item in L[0]
549 */
550 pasted = item_head(tb->L[0], n - 1);
551 if (is_direntry_le_ih(pasted))
552 tb->pos_in_item += ih_entry_count(pasted);
553 else
554 tb->pos_in_item += ih_item_len(pasted);
555 }
556
557 /*
558 * Shift lnum[0] - 1 items in whole.
559 * Shift lbytes - 1 byte from item number lnum[0]
560 */
561 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
562
563 /* Append to body of item in L[0] */
564 buffer_info_init_left(tb, &bi);
565 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item,
566 tb->insert_size[0], body, tb->zeroes_num);
567
568 /* if appended item is directory, paste entry */
569 pasted = item_head(tb->L[0], n + tb->item_pos - ret);
570 if (is_direntry_le_ih(pasted))
571 leaf_paste_entries(&bi, n + tb->item_pos - ret,
572 tb->pos_in_item, 1,
573 (struct reiserfs_de_head *)body,
574 body + DEH_SIZE, tb->insert_size[0]);
575
576 /*
577 * if appended item is indirect item, put unformatted node
578 * into un list
579 */
580 if (is_indirect_le_ih(pasted))
581 set_ih_free_space(pasted, 0);
582
583 tb->insert_size[0] = 0;
584 tb->zeroes_num = 0;
585}
586
Jeff Mahoneycf22df12014-04-23 10:00:48 -0400587static void balance_leaf_paste_left(struct tree_balance *tb,
588 struct item_head *ih, const char *body)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
Jeff Mahoney3fade932014-04-23 10:00:57 -0400590 /* we must shift the part of the appended item */
591 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
592 balance_leaf_paste_left_shift(tb, ih, body);
593 else
594 balance_leaf_paste_left_whole(tb, ih, body);
Jeff Mahoneycf22df12014-04-23 10:00:48 -0400595}
596
Jeff Mahoney0080e9f2014-04-23 10:00:55 -0400597/* Shift lnum[0] items from S[0] to the left neighbor L[0] */
598static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih,
599 const char *body, int flag)
600{
601 if (tb->lnum[0] <= 0)
602 return;
603
604 /* new item or it part falls to L[0], shift it too */
605 if (tb->item_pos < tb->lnum[0]) {
606 BUG_ON(flag != M_INSERT && flag != M_PASTE);
607
608 if (flag == M_INSERT)
609 balance_leaf_insert_left(tb, ih, body);
610 else /* M_PASTE */
611 balance_leaf_paste_left(tb, ih, body);
612 } else
613 /* new item doesn't fall into L[0] */
614 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
615}
616
617
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400618static void balance_leaf_insert_right(struct tree_balance *tb,
619 struct item_head *ih, const char *body)
620{
621
622 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
623 int n = B_NR_ITEMS(tbS0);
624 struct buffer_info bi;
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400625 int ret;
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400626
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400627 /* new item or part of it doesn't fall into R[0] */
628 if (n - tb->rnum[0] >= tb->item_pos) {
629 leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
630 return;
631 }
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400632
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400633 /* new item or its part falls to R[0] */
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400634
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400635 /* part of new item falls into R[0] */
636 if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
637 loff_t old_key_comp, old_len, r_zeroes_number;
638 const char *r_body;
639 int version, shift;
640 loff_t offset;
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400641
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400642 leaf_shift_right(tb, tb->rnum[0] - 1, -1);
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400643
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400644 version = ih_version(ih);
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400645
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400646 /* Remember key component and item length */
647 old_key_comp = le_ih_k_offset(ih);
648 old_len = ih_item_len(ih);
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400649
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400650 /*
651 * Calculate key component and item length to insert
652 * into R[0]
653 */
654 shift = 0;
655 if (is_indirect_le_ih(ih))
656 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
657 offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << shift);
658 set_le_ih_k_offset(ih, offset);
659 put_ih_item_len(ih, tb->rbytes);
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400660
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400661 /* Insert part of the item into R[0] */
662 buffer_info_init_right(tb, &bi);
663 if ((old_len - tb->rbytes) > tb->zeroes_num) {
664 r_zeroes_number = 0;
665 r_body = body + (old_len - tb->rbytes) - tb->zeroes_num;
666 } else {
667 r_body = body;
668 r_zeroes_number = tb->zeroes_num -
669 (old_len - tb->rbytes);
670 tb->zeroes_num -= r_zeroes_number;
671 }
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400672
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400673 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number);
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400674
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400675 /* Replace right delimiting key by first key in R[0] */
676 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400677
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400678 /*
679 * Calculate key component and item length to
680 * insert into S[0]
681 */
682 set_le_ih_k_offset(ih, old_key_comp);
683 put_ih_item_len(ih, old_len - tb->rbytes);
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400684
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400685 tb->insert_size[0] -= tb->rbytes;
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400686
Jeff Mahoney8dbf0d82014-04-23 10:00:58 -0400687 } else {
688 /* whole new item falls into R[0] */
689
690 /* Shift rnum[0]-1 items to R[0] */
691 ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
692
693 /* Insert new item into R[0] */
694 buffer_info_init_right(tb, &bi);
695 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1,
696 ih, body, tb->zeroes_num);
697
698 if (tb->item_pos - n + tb->rnum[0] - 1 == 0)
699 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
700
701 tb->zeroes_num = tb->insert_size[0] = 0;
702 }
Jeff Mahoneye80ef3d2014-04-23 10:00:49 -0400703}
704
Jeff Mahoney2369ecd2014-04-23 10:00:59 -0400705
706static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
707 struct item_head *ih, const char *body)
708{
709 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
710 struct buffer_info bi;
711 int entry_count;
712
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400713 RFALSE(tb->zeroes_num,
Jeff Mahoney2369ecd2014-04-23 10:00:59 -0400714 "PAP-12145: invalid parameter in case of a directory");
715 entry_count = ih_entry_count(item_head(tbS0, tb->item_pos));
716
717 /* new directory entry falls into R[0] */
718 if (entry_count - tb->rbytes < tb->pos_in_item) {
719 int paste_entry_position;
720
721 RFALSE(tb->rbytes - 1 >= entry_count || !tb->insert_size[0],
722 "PAP-12150: no enough of entries to shift to R[0]: "
723 "rbytes=%d, entry_count=%d", tb->rbytes, entry_count);
724
725 /*
726 * Shift rnum[0]-1 items in whole.
727 * Shift rbytes-1 directory entries from directory
728 * item number rnum[0]
729 */
730 leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
731
732 /* Paste given directory entry to directory item */
733 paste_entry_position = tb->pos_in_item - entry_count +
734 tb->rbytes - 1;
735 buffer_info_init_right(tb, &bi);
736 leaf_paste_in_buffer(&bi, 0, paste_entry_position,
737 tb->insert_size[0], body, tb->zeroes_num);
738
739 /* paste entry */
740 leaf_paste_entries(&bi, 0, paste_entry_position, 1,
741 (struct reiserfs_de_head *) body,
742 body + DEH_SIZE, tb->insert_size[0]);
743
744 /* change delimiting keys */
745 if (paste_entry_position == 0)
746 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
747
748 tb->insert_size[0] = 0;
749 tb->pos_in_item++;
750 } else {
751 /* new directory entry doesn't fall into R[0] */
752 leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
753 }
754}
755
756static void balance_leaf_paste_right_shift(struct tree_balance *tb,
757 struct item_head *ih, const char *body)
758{
759 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
760 int n_shift, n_rem, r_zeroes_number, version;
761 unsigned long temp_rem;
762 const char *r_body;
763 struct buffer_info bi;
764
765 /* we append to directory item */
766 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
767 balance_leaf_paste_right_shift_dirent(tb, ih, body);
768 return;
769 }
770
771 /* regular object */
772
773 /*
774 * Calculate number of bytes which must be shifted
775 * from appended item
776 */
777 n_shift = tb->rbytes - tb->insert_size[0];
778 if (n_shift < 0)
779 n_shift = 0;
780
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400781 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
Jeff Mahoney2369ecd2014-04-23 10:00:59 -0400782 "PAP-12155: invalid position to paste. ih_item_len=%d, "
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400783 "pos_in_item=%d", tb->pos_in_item,
784 ih_item_len(item_head(tbS0, tb->item_pos)));
Jeff Mahoney2369ecd2014-04-23 10:00:59 -0400785
786 leaf_shift_right(tb, tb->rnum[0], n_shift);
787
788 /*
789 * Calculate number of bytes which must remain in body
790 * after appending to R[0]
791 */
792 n_rem = tb->insert_size[0] - tb->rbytes;
793 if (n_rem < 0)
794 n_rem = 0;
795
796 temp_rem = n_rem;
797
798 version = ih_version(item_head(tb->R[0], 0));
799
800 if (is_indirect_le_key(version, leaf_key(tb->R[0], 0))) {
801 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
802 temp_rem = n_rem << shift;
803 }
804
805 add_le_key_k_offset(version, leaf_key(tb->R[0], 0), temp_rem);
806 add_le_key_k_offset(version, internal_key(tb->CFR[0], tb->rkey[0]),
807 temp_rem);
808
809 do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
810
811 /* Append part of body into R[0] */
812 buffer_info_init_right(tb, &bi);
813 if (n_rem > tb->zeroes_num) {
814 r_zeroes_number = 0;
815 r_body = body + n_rem - tb->zeroes_num;
816 } else {
817 r_body = body;
818 r_zeroes_number = tb->zeroes_num - n_rem;
819 tb->zeroes_num -= r_zeroes_number;
820 }
821
822 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
823 r_body, r_zeroes_number);
824
825 if (is_indirect_le_ih(item_head(tb->R[0], 0)))
826 set_ih_free_space(item_head(tb->R[0], 0), 0);
827
828 tb->insert_size[0] = n_rem;
829 if (!n_rem)
830 tb->pos_in_item++;
831}
832
833static void balance_leaf_paste_right_whole(struct tree_balance *tb,
834 struct item_head *ih, const char *body)
835{
836 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
837 int n = B_NR_ITEMS(tbS0);
838 struct item_head *pasted;
839 struct buffer_info bi;
840
841 buffer_info_init_right(tb, &bi);
842 leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
843
844 /* append item in R[0] */
845 if (tb->pos_in_item >= 0) {
846 buffer_info_init_right(tb, &bi);
847 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0],
848 tb->pos_in_item, tb->insert_size[0], body,
849 tb->zeroes_num);
850 }
851
852 /* paste new entry, if item is directory item */
853 pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]);
854 if (is_direntry_le_ih(pasted) && tb->pos_in_item >= 0) {
855 leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0],
856 tb->pos_in_item, 1,
857 (struct reiserfs_de_head *)body,
858 body + DEH_SIZE, tb->insert_size[0]);
859
860 if (!tb->pos_in_item) {
861
862 RFALSE(tb->item_pos - n + tb->rnum[0],
863 "PAP-12165: directory item must be first "
864 "item of node when pasting is in 0th position");
865
866 /* update delimiting keys */
867 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
868 }
869 }
870
871 if (is_indirect_le_ih(pasted))
872 set_ih_free_space(pasted, 0);
873 tb->zeroes_num = tb->insert_size[0] = 0;
874}
875
Jeff Mahoney3f0eb272014-04-23 10:00:50 -0400876static void balance_leaf_paste_right(struct tree_balance *tb,
877 struct item_head *ih, const char *body)
Jeff Mahoneycf22df12014-04-23 10:00:48 -0400878{
879 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
Jeff Mahoney3f0eb272014-04-23 10:00:50 -0400880 int n = B_NR_ITEMS(tbS0);
Jeff Mahoneycf22df12014-04-23 10:00:48 -0400881
Jeff Mahoney2369ecd2014-04-23 10:00:59 -0400882 /* new item doesn't fall into R[0] */
883 if (n - tb->rnum[0] > tb->item_pos) {
884 leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
885 return;
886 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
Jeff Mahoney2369ecd2014-04-23 10:00:59 -0400888 /* pasted item or part of it falls to R[0] */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -0700889
Jeff Mahoney2369ecd2014-04-23 10:00:59 -0400890 if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1)
891 /* we must shift the part of the appended item */
892 balance_leaf_paste_right_shift(tb, ih, body);
893 else
894 /* pasted item in whole falls into R[0] */
895 balance_leaf_paste_right_whole(tb, ih, body);
Jeff Mahoney3f0eb272014-04-23 10:00:50 -0400896}
897
Jeff Mahoney0080e9f2014-04-23 10:00:55 -0400898/* shift rnum[0] items from S[0] to the right neighbor R[0] */
899static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
900 const char *body, int flag)
901{
902 if (tb->rnum[0] <= 0)
903 return;
904
905 BUG_ON(flag != M_INSERT && flag != M_PASTE);
906
907 if (flag == M_INSERT)
908 balance_leaf_insert_right(tb, ih, body);
909 else /* M_PASTE */
910 balance_leaf_paste_right(tb, ih, body);
Jeff Mahoney0080e9f2014-04-23 10:00:55 -0400911}
912
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400913static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
914 struct item_head *ih,
915 const char *body,
916 struct item_head *insert_key,
917 struct buffer_head **insert_ptr,
918 int i)
919{
920 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
921 int n = B_NR_ITEMS(tbS0);
922 struct buffer_info bi;
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400923 int shift;
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400924
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400925 /* new item or it part don't falls into S_new[i] */
926 if (n - tb->snum[i] >= tb->item_pos) {
927 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
928 tb->snum[i], tb->sbytes[i], tb->S_new[i]);
929 return;
930 }
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400931
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400932 /* new item or it's part falls to first new node S_new[i] */
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400933
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400934 /* part of new item falls into S_new[i] */
935 if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
936 int old_key_comp, old_len, r_zeroes_number;
937 const char *r_body;
938 int version;
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400939
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400940 /* Move snum[i]-1 items from S[0] to S_new[i] */
941 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
942 tb->S_new[i]);
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400943
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400944 /* Remember key component and item length */
945 version = ih_version(ih);
946 old_key_comp = le_ih_k_offset(ih);
947 old_len = ih_item_len(ih);
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400948
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400949 /*
950 * Calculate key component and item length to insert
951 * into S_new[i]
952 */
953 shift = 0;
954 if (is_indirect_le_ih(ih))
955 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
956 set_le_ih_k_offset(ih,
957 le_ih_k_offset(ih) +
958 ((old_len - tb->sbytes[i]) << shift));
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400959
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400960 put_ih_item_len(ih, tb->sbytes[i]);
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400961
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400962 /* Insert part of the item into S_new[i] before 0-th item */
963 buffer_info_init_bh(tb, &bi, tb->S_new[i]);
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400964
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400965 if ((old_len - tb->sbytes[i]) > tb->zeroes_num) {
966 r_zeroes_number = 0;
967 r_body = body + (old_len - tb->sbytes[i]) -
968 tb->zeroes_num;
969 } else {
970 r_body = body;
971 r_zeroes_number = tb->zeroes_num - (old_len -
972 tb->sbytes[i]);
973 tb->zeroes_num -= r_zeroes_number;
974 }
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400975
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400976 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number);
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400977
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400978 /*
979 * Calculate key component and item length to
980 * insert into S[i]
981 */
982 set_le_ih_k_offset(ih, old_key_comp);
983 put_ih_item_len(ih, old_len - tb->sbytes[i]);
984 tb->insert_size[0] -= tb->sbytes[i];
985 } else {
986 /* whole new item falls into S_new[i] */
Jeff Mahoney65ab18c2014-04-23 10:00:51 -0400987
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -0400988 /*
989 * Shift snum[0] - 1 items to S_new[i]
990 * (sbytes[i] of split item)
991 */
992 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
993 tb->snum[i] - 1, tb->sbytes[i], tb->S_new[i]);
994
995 /* Insert new item into S_new[i] */
996 buffer_info_init_bh(tb, &bi, tb->S_new[i]);
997 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1,
998 ih, body, tb->zeroes_num);
999
1000 tb->zeroes_num = tb->insert_size[0] = 0;
1001 }
Jeff Mahoney65ab18c2014-04-23 10:00:51 -04001002}
1003
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -04001004/* we append to directory item */
1005static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
1006 struct item_head *ih,
1007 const char *body,
1008 struct item_head *insert_key,
1009 struct buffer_head **insert_ptr,
1010 int i)
1011{
1012 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1013 struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
1014 int entry_count = ih_entry_count(aux_ih);
1015 struct buffer_info bi;
1016
1017 if (entry_count - tb->sbytes[i] < tb->pos_in_item &&
1018 tb->pos_in_item <= entry_count) {
1019 /* new directory entry falls into S_new[i] */
1020
1021 RFALSE(!tb->insert_size[0],
1022 "PAP-12215: insert_size is already 0");
1023 RFALSE(tb->sbytes[i] - 1 >= entry_count,
1024 "PAP-12220: there are no so much entries (%d), only %d",
1025 tb->sbytes[i] - 1, entry_count);
1026
1027 /*
1028 * Shift snum[i]-1 items in whole.
1029 * Shift sbytes[i] directory entries
1030 * from directory item number snum[i]
1031 */
1032 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
1033 tb->sbytes[i] - 1, tb->S_new[i]);
1034
1035 /*
1036 * Paste given directory entry to
1037 * directory item
1038 */
1039 buffer_info_init_bh(tb, &bi, tb->S_new[i]);
1040 leaf_paste_in_buffer(&bi, 0, tb->pos_in_item - entry_count +
1041 tb->sbytes[i] - 1, tb->insert_size[0],
1042 body, tb->zeroes_num);
1043
1044 /* paste new directory entry */
1045 leaf_paste_entries(&bi, 0, tb->pos_in_item - entry_count +
1046 tb->sbytes[i] - 1, 1,
1047 (struct reiserfs_de_head *) body,
1048 body + DEH_SIZE, tb->insert_size[0]);
1049
1050 tb->insert_size[0] = 0;
1051 tb->pos_in_item++;
1052 } else {
1053 /* new directory entry doesn't fall into S_new[i] */
1054 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
1055 tb->sbytes[i], tb->S_new[i]);
1056 }
1057
1058}
1059
1060static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
1061 struct item_head *ih,
1062 const char *body,
1063 struct item_head *insert_key,
1064 struct buffer_head **insert_ptr,
1065 int i)
1066{
1067 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1068 struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
1069 int n_shift, n_rem, r_zeroes_number, shift;
1070 const char *r_body;
1071 struct item_head *tmp;
1072 struct buffer_info bi;
1073
1074 RFALSE(ih, "PAP-12210: ih must be 0");
1075
1076 if (is_direntry_le_ih(aux_ih)) {
1077 balance_leaf_new_nodes_paste_dirent(tb, ih, body, insert_key,
1078 insert_ptr, i);
1079 return;
1080 }
1081
1082 /* regular object */
1083
1084
1085 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)) ||
1086 tb->insert_size[0] <= 0,
1087 "PAP-12225: item too short or insert_size <= 0");
1088
1089 /*
1090 * Calculate number of bytes which must be shifted from appended item
1091 */
1092 n_shift = tb->sbytes[i] - tb->insert_size[0];
1093 if (n_shift < 0)
1094 n_shift = 0;
1095 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], n_shift,
1096 tb->S_new[i]);
1097
1098 /*
1099 * Calculate number of bytes which must remain in body after
1100 * append to S_new[i]
1101 */
1102 n_rem = tb->insert_size[0] - tb->sbytes[i];
1103 if (n_rem < 0)
1104 n_rem = 0;
1105
1106 /* Append part of body into S_new[0] */
1107 buffer_info_init_bh(tb, &bi, tb->S_new[i]);
1108 if (n_rem > tb->zeroes_num) {
1109 r_zeroes_number = 0;
1110 r_body = body + n_rem - tb->zeroes_num;
1111 } else {
1112 r_body = body;
1113 r_zeroes_number = tb->zeroes_num - n_rem;
1114 tb->zeroes_num -= r_zeroes_number;
1115 }
1116
1117 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
1118 r_body, r_zeroes_number);
1119
1120 tmp = item_head(tb->S_new[i], 0);
1121 shift = 0;
1122 if (is_indirect_le_ih(tmp)) {
1123 set_ih_free_space(tmp, 0);
1124 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
1125 }
1126 add_le_ih_k_offset(tmp, n_rem << shift);
1127
1128 tb->insert_size[0] = n_rem;
1129 if (!n_rem)
1130 tb->pos_in_item++;
1131}
1132
1133static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
1134 struct item_head *ih,
1135 const char *body,
1136 struct item_head *insert_key,
1137 struct buffer_head **insert_ptr,
1138 int i)
1139
1140{
1141 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1142 int n = B_NR_ITEMS(tbS0);
1143 int leaf_mi;
1144 struct item_head *pasted;
1145 struct buffer_info bi;
1146
1147#ifdef CONFIG_REISERFS_CHECK
1148 struct item_head *ih_check = item_head(tbS0, tb->item_pos);
1149
1150 if (!is_direntry_le_ih(ih_check) &&
1151 (tb->pos_in_item != ih_item_len(ih_check) ||
1152 tb->insert_size[0] <= 0))
1153 reiserfs_panic(tb->tb_sb,
1154 "PAP-12235",
1155 "pos_in_item must be equal to ih_item_len");
1156#endif
1157
1158 leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
1159 tb->sbytes[i], tb->S_new[i]);
1160
1161 RFALSE(leaf_mi,
1162 "PAP-12240: unexpected value returned by leaf_move_items (%d)",
1163 leaf_mi);
1164
1165 /* paste into item */
1166 buffer_info_init_bh(tb, &bi, tb->S_new[i]);
1167 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i],
1168 tb->pos_in_item, tb->insert_size[0],
1169 body, tb->zeroes_num);
1170
1171 pasted = item_head(tb->S_new[i], tb->item_pos - n +
1172 tb->snum[i]);
1173 if (is_direntry_le_ih(pasted))
1174 leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i],
1175 tb->pos_in_item, 1,
1176 (struct reiserfs_de_head *)body,
1177 body + DEH_SIZE, tb->insert_size[0]);
1178
1179 /* if we paste to indirect item update ih_free_space */
1180 if (is_indirect_le_ih(pasted))
1181 set_ih_free_space(pasted, 0);
1182
1183 tb->zeroes_num = tb->insert_size[0] = 0;
1184
1185}
Jeff Mahoney9d496552014-04-23 10:00:52 -04001186static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
1187 struct item_head *ih,
1188 const char *body,
1189 struct item_head *insert_key,
1190 struct buffer_head **insert_ptr,
1191 int i)
1192{
1193 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1194 int n = B_NR_ITEMS(tbS0);
Jeff Mahoney9d496552014-04-23 10:00:52 -04001195
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -04001196 /* pasted item doesn't fall into S_new[i] */
1197 if (n - tb->snum[i] > tb->item_pos) {
1198 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
1199 tb->snum[i], tb->sbytes[i], tb->S_new[i]);
1200 return;
1201 }
Jeff Mahoney9d496552014-04-23 10:00:52 -04001202
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -04001203 /* pasted item or part if it falls to S_new[i] */
Jeff Mahoney9d496552014-04-23 10:00:52 -04001204
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -04001205 if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1)
1206 /* we must shift part of the appended item */
1207 balance_leaf_new_nodes_paste_shift(tb, ih, body, insert_key,
1208 insert_ptr, i);
1209 else
1210 /* item falls wholly into S_new[i] */
1211 balance_leaf_new_nodes_paste_whole(tb, ih, body, insert_key,
1212 insert_ptr, i);
Jeff Mahoney9d496552014-04-23 10:00:52 -04001213}
1214
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001215/* Fill new nodes that appear in place of S[0] */
1216static void balance_leaf_new_nodes(struct tree_balance *tb,
1217 struct item_head *ih,
1218 const char *body,
1219 struct item_head *insert_key,
1220 struct buffer_head **insert_ptr,
1221 int flag)
1222{
1223 int i;
1224 for (i = tb->blknum[0] - 2; i >= 0; i--) {
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -04001225 BUG_ON(flag != M_INSERT && flag != M_PASTE);
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001226
1227 RFALSE(!tb->snum[i],
1228 "PAP-12200: snum[%d] == %d. Must be > 0", i,
1229 tb->snum[i]);
1230
1231 /* here we shift from S to S_new nodes */
1232
1233 tb->S_new[i] = get_FEB(tb);
1234
1235 /* initialized block type and tree level */
1236 set_blkh_level(B_BLK_HEAD(tb->S_new[i]), DISK_LEAF_NODE_LEVEL);
1237
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -04001238 if (flag == M_INSERT)
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001239 balance_leaf_new_nodes_insert(tb, ih, body, insert_key,
1240 insert_ptr, i);
Jeff Mahoneyb54b8c92014-04-23 10:01:00 -04001241 else /* M_PASTE */
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001242 balance_leaf_new_nodes_paste(tb, ih, body, insert_key,
1243 insert_ptr, i);
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001244
1245 memcpy(insert_key + i, leaf_key(tb->S_new[i], 0), KEY_SIZE);
1246 insert_ptr[i] = tb->S_new[i];
1247
1248 RFALSE(!buffer_journaled(tb->S_new[i])
1249 || buffer_journal_dirty(tb->S_new[i])
1250 || buffer_dirty(tb->S_new[i]),
1251 "PAP-12247: S_new[%d] : (%b)",
1252 i, format_bh(tb->S_new[i]));
1253 }
1254}
1255
Jeff Mahoney8c480ea2014-04-23 10:00:53 -04001256static void balance_leaf_finish_node_insert(struct tree_balance *tb,
1257 struct item_head *ih,
1258 const char *body)
1259{
1260 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1261 struct buffer_info bi;
Jeff Mahoney441378c2014-04-23 10:01:01 -04001262 buffer_info_init_tbS0(tb, &bi);
1263 leaf_insert_into_buf(&bi, tb->item_pos, ih, body, tb->zeroes_num);
Jeff Mahoney8c480ea2014-04-23 10:00:53 -04001264
Jeff Mahoney441378c2014-04-23 10:01:01 -04001265 /* If we insert the first key change the delimiting key */
1266 if (tb->item_pos == 0) {
1267 if (tb->CFL[0]) /* can be 0 in reiserfsck */
1268 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
1269
1270 }
1271}
1272
1273static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
1274 struct item_head *ih,
1275 const char *body)
1276{
1277 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1278 struct item_head *pasted = item_head(tbS0, tb->item_pos);
1279 struct buffer_info bi;
1280
1281 if (tb->pos_in_item >= 0 && tb->pos_in_item <= ih_entry_count(pasted)) {
1282 RFALSE(!tb->insert_size[0],
1283 "PAP-12260: insert_size is 0 already");
1284
1285 /* prepare space */
1286 buffer_info_init_tbS0(tb, &bi);
1287 leaf_paste_in_buffer(&bi, tb->item_pos, tb->pos_in_item,
1288 tb->insert_size[0], body, tb->zeroes_num);
1289
1290 /* paste entry */
1291 leaf_paste_entries(&bi, tb->item_pos, tb->pos_in_item, 1,
1292 (struct reiserfs_de_head *)body,
1293 body + DEH_SIZE, tb->insert_size[0]);
1294
1295 if (!tb->item_pos && !tb->pos_in_item) {
1296 RFALSE(!tb->CFL[0] || !tb->L[0],
1297 "PAP-12270: CFL[0]/L[0] must be specified");
1298 if (tb->CFL[0])
1299 replace_key(tb, tb->CFL[0], tb->lkey[0],
1300 tbS0, 0);
1301 }
1302
1303 tb->insert_size[0] = 0;
1304 }
Jeff Mahoney8c480ea2014-04-23 10:00:53 -04001305}
1306
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001307static void balance_leaf_finish_node_paste(struct tree_balance *tb,
1308 struct item_head *ih,
1309 const char *body)
1310{
1311 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1312 struct buffer_info bi;
Jeff Mahoney441378c2014-04-23 10:01:01 -04001313 struct item_head *pasted = item_head(tbS0, tb->item_pos);
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001314
Jeff Mahoney441378c2014-04-23 10:01:01 -04001315 /* when directory, may be new entry already pasted */
1316 if (is_direntry_le_ih(pasted)) {
1317 balance_leaf_finish_node_paste_dirent(tb, ih, body);
1318 return;
1319 }
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001320
Jeff Mahoney441378c2014-04-23 10:01:01 -04001321 /* regular object */
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001322
Jeff Mahoney441378c2014-04-23 10:01:01 -04001323 if (tb->pos_in_item == ih_item_len(pasted)) {
1324 RFALSE(tb->insert_size[0] <= 0,
1325 "PAP-12275: insert size must not be %d",
1326 tb->insert_size[0]);
1327 buffer_info_init_tbS0(tb, &bi);
1328 leaf_paste_in_buffer(&bi, tb->item_pos,
1329 tb->pos_in_item, tb->insert_size[0], body,
1330 tb->zeroes_num);
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001331
Jeff Mahoney441378c2014-04-23 10:01:01 -04001332 if (is_indirect_le_ih(pasted))
1333 set_ih_free_space(pasted, 0);
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001334
Jeff Mahoney441378c2014-04-23 10:01:01 -04001335 tb->insert_size[0] = 0;
1336 }
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001337#ifdef CONFIG_REISERFS_CHECK
Jeff Mahoney441378c2014-04-23 10:01:01 -04001338 else if (tb->insert_size[0]) {
1339 print_cur_tb("12285");
1340 reiserfs_panic(tb->tb_sb, "PAP-12285",
1341 "insert_size must be 0 (%d)", tb->insert_size[0]);
1342 }
1343#endif
Jeff Mahoney7f447ba2014-04-23 10:00:54 -04001344}
1345
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001346/*
1347 * if the affected item was not wholly shifted then we
1348 * perform all necessary operations on that part or whole
1349 * of the affected item which remains in S
1350 */
1351static void balance_leaf_finish_node(struct tree_balance *tb,
1352 struct item_head *ih,
1353 const char *body, int flag)
1354{
1355 /* if we must insert or append into buffer S[0] */
1356 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
1357 if (flag == M_INSERT)
1358 balance_leaf_finish_node_insert(tb, ih, body);
1359 else /* M_PASTE */
1360 balance_leaf_finish_node_paste(tb, ih, body);
1361 }
1362}
1363
Jeff Mahoney3f0eb272014-04-23 10:00:50 -04001364/**
1365 * balance_leaf - reiserfs tree balancing algorithm
1366 * @tb: tree balance state
1367 * @ih: item header of inserted item (little endian)
1368 * @body: body of inserted item or bytes to paste
1369 * @flag: i - insert, d - delete, c - cut, p - paste (see do_balance)
1370 * passed back:
1371 * @insert_key: key to insert new nodes
1372 * @insert_ptr: array of nodes to insert at the next level
1373 *
1374 * In our processing of one level we sometimes determine what must be
1375 * inserted into the next higher level. This insertion consists of a
1376 * key or two keys and their corresponding pointers.
1377 */
1378static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
1379 const char *body, int flag,
1380 struct item_head *insert_key,
1381 struct buffer_head **insert_ptr)
1382{
1383 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
Jeff Mahoney3f0eb272014-04-23 10:00:50 -04001384
1385 PROC_INFO_INC(tb->tb_sb, balance_at[0]);
1386
1387 /* Make balance in case insert_size[0] < 0 */
1388 if (tb->insert_size[0] < 0)
1389 return balance_leaf_when_delete(tb, flag);
1390
1391 tb->item_pos = PATH_LAST_POSITION(tb->tb_path),
1392 tb->pos_in_item = tb->tb_path->pos_in_item,
1393 tb->zeroes_num = 0;
1394 if (flag == M_INSERT && !body)
1395 tb->zeroes_num = ih_item_len(ih);
1396
1397 /*
1398 * for indirect item pos_in_item is measured in unformatted node
1399 * pointers. Recalculate to bytes
1400 */
1401 if (flag != M_INSERT
1402 && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
1403 tb->pos_in_item *= UNFM_P_SIZE;
1404
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001405 balance_leaf_left(tb, ih, body, flag);
Jeff Mahoney3f0eb272014-04-23 10:00:50 -04001406
1407 /* tb->lnum[0] > 0 */
1408 /* Calculate new item position */
1409 tb->item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0));
1410
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001411 balance_leaf_right(tb, ih, body, flag);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001412
1413 /* tb->rnum[0] > 0 */
1414 RFALSE(tb->blknum[0] > 3,
Dave Jones416e2ab2014-02-17 16:21:24 -05001415 "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001416 RFALSE(tb->blknum[0] < 0,
Dave Jones416e2ab2014-02-17 16:21:24 -05001417 "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001418
Jeff Mahoney97fd4b92014-04-23 10:00:45 -04001419 /*
1420 * if while adding to a node we discover that it is possible to split
1421 * it in two, and merge the left part into the left neighbor and the
1422 * right part into the right neighbor, eliminating the node
1423 */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001424 if (tb->blknum[0] == 0) { /* node S[0] is empty now */
1425
1426 RFALSE(!tb->lnum[0] || !tb->rnum[0],
1427 "PAP-12190: lnum and rnum must not be zero");
Jeff Mahoney97fd4b92014-04-23 10:00:45 -04001428 /*
1429 * if insertion was done before 0-th position in R[0], right
1430 * delimiting key of the tb->L[0]'s and left delimiting key are
1431 * not set correctly
1432 */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001433 if (tb->CFL[0]) {
1434 if (!tb->CFR[0])
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001435 reiserfs_panic(tb->tb_sb, "vs-12195",
1436 "CFR not initialized");
Jeff Mahoney4cf5f7a2014-04-23 10:00:35 -04001437 copy_key(internal_key(tb->CFL[0], tb->lkey[0]),
1438 internal_key(tb->CFR[0], tb->rkey[0]));
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001439 do_balance_mark_internal_dirty(tb, tb->CFL[0], 0);
1440 }
1441
1442 reiserfs_invalidate_buffer(tb, tbS0);
1443 return 0;
1444 }
1445
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001446 balance_leaf_new_nodes(tb, ih, body, insert_key, insert_ptr, flag);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001447
Jeff Mahoney0080e9f2014-04-23 10:00:55 -04001448 balance_leaf_finish_node(tb, ih, body, flag);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001449
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001450#ifdef CONFIG_REISERFS_CHECK
1451 if (flag == M_PASTE && tb->insert_size[0]) {
1452 print_cur_tb("12290");
1453 reiserfs_panic(tb->tb_sb,
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001454 "PAP-12290", "insert_size is still not 0 (%d)",
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001455 tb->insert_size[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 }
Jeff Mahoney97fd4b92014-04-23 10:00:45 -04001457#endif
1458
1459 /* Leaf level of the tree is balanced (end of balance_leaf) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 return 0;
Jeff Mahoney97fd4b92014-04-23 10:00:45 -04001461}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463/* Make empty node */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001464void make_empty_node(struct buffer_info *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001466 struct block_head *blkh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001468 RFALSE(bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001470 blkh = B_BLK_HEAD(bi->bi_bh);
1471 set_blkh_nr_item(blkh, 0);
1472 set_blkh_free_space(blkh, MAX_CHILD_SIZE(bi->bi_bh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001474 if (bi->bi_parent)
1475 B_N_CHILD(bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476}
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478/* Get first empty buffer */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001479struct buffer_head *get_FEB(struct tree_balance *tb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001481 int i;
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001482 struct buffer_info bi;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001484 for (i = 0; i < MAX_FEB_SIZE; i++)
Al Viro9dce07f2008-03-29 03:07:28 +00001485 if (tb->FEB[i] != NULL)
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001486 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001488 if (i == MAX_FEB_SIZE)
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001489 reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Jeff Mahoneyfba4ebb2009-03-30 14:02:42 -04001491 buffer_info_init_bh(tb, &bi, tb->FEB[i]);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001492 make_empty_node(&bi);
Jeff Mahoneyfba4ebb2009-03-30 14:02:42 -04001493 set_buffer_uptodate(tb->FEB[i]);
1494 tb->used[i] = tb->FEB[i];
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001495 tb->FEB[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
Jeff Mahoneyfba4ebb2009-03-30 14:02:42 -04001497 return tb->used[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
1499
Jeff Mahoney098297b2014-04-23 10:00:36 -04001500/* This is now used because reiserfs_free_block has to be able to schedule. */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001501static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001503 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001505 if (buffer_dirty(bh))
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001506 reiserfs_warning(tb->tb_sb, "reiserfs-12320",
1507 "called with dirty buffer");
Ahmed S. Darwish79a81ae2007-02-12 00:52:09 -08001508 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++)
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001509 if (!tb->thrown[i]) {
1510 tb->thrown[i] = bh;
1511 get_bh(bh); /* free_thrown puts this */
1512 return;
1513 }
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001514 reiserfs_warning(tb->tb_sb, "reiserfs-12321",
1515 "too many thrown buffers");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
1517
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001518static void free_thrown(struct tree_balance *tb)
1519{
1520 int i;
1521 b_blocknr_t blocknr;
Ahmed S. Darwish79a81ae2007-02-12 00:52:09 -08001522 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) {
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001523 if (tb->thrown[i]) {
1524 blocknr = tb->thrown[i]->b_blocknr;
1525 if (buffer_dirty(tb->thrown[i]))
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001526 reiserfs_warning(tb->tb_sb, "reiserfs-12322",
1527 "called with dirty buffer %d",
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001528 blocknr);
1529 brelse(tb->thrown[i]); /* incremented in store_thrown */
1530 reiserfs_free_block(tb->transaction_handle, NULL,
1531 blocknr, 0);
1532 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001536void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001538 struct block_head *blkh;
1539 blkh = B_BLK_HEAD(bh);
1540 set_blkh_level(blkh, FREE_LEVEL);
1541 set_blkh_nr_item(blkh, 0);
1542
1543 clear_buffer_dirty(bh);
1544 store_thrown(tb, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545}
1546
1547/* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001548void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest,
1549 struct buffer_head *src, int n_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550{
1551
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001552 RFALSE(dest == NULL || src == NULL,
1553 "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)",
1554 src, dest);
1555 RFALSE(!B_IS_KEYS_LEVEL(dest),
1556 "vs-12310: invalid level (%z) for destination buffer. dest must be leaf",
1557 dest);
1558 RFALSE(n_dest < 0 || n_src < 0,
1559 "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest);
1560 RFALSE(n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src),
1561 "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big",
1562 n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001564 if (B_IS_ITEMS_LEVEL(src))
1565 /* source buffer contains leaf node */
Jeff Mahoney4cf5f7a2014-04-23 10:00:35 -04001566 memcpy(internal_key(dest, n_dest), item_head(src, n_src),
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001567 KEY_SIZE);
1568 else
Jeff Mahoney4cf5f7a2014-04-23 10:00:35 -04001569 memcpy(internal_key(dest, n_dest), internal_key(src, n_src),
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001570 KEY_SIZE);
1571
1572 do_balance_mark_internal_dirty(tb, dest, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001575int get_left_neighbor_position(struct tree_balance *tb, int h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001577 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
Al Viro9dce07f2008-03-29 03:07:28 +00001579 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL,
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001580 "vs-12325: FL[%d](%p) or F[%d](%p) does not exist",
1581 h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001583 if (Sh_position == 0)
1584 return B_NR_ITEMS(tb->FL[h]);
1585 else
1586 return Sh_position - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587}
1588
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001589int get_right_neighbor_position(struct tree_balance *tb, int h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001591 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
Al Viro9dce07f2008-03-29 03:07:28 +00001593 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL,
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001594 "vs-12330: F[%d](%p) or FR[%d](%p) does not exist",
1595 h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001597 if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h)))
1598 return 0;
1599 else
1600 return Sh_position + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601}
1602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603#ifdef CONFIG_REISERFS_CHECK
1604
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001605int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value);
1606static void check_internal_node(struct super_block *s, struct buffer_head *bh,
1607 char *mes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001609 struct disk_child *dc;
1610 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001612 RFALSE(!bh, "PAP-12336: bh == 0");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001614 if (!bh || !B_IS_IN_TREE(bh))
1615 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001617 RFALSE(!buffer_dirty(bh) &&
1618 !(buffer_journaled(bh) || buffer_journal_dirty(bh)),
1619 "PAP-12337: buffer (%b) must be dirty", bh);
1620 dc = B_N_CHILD(bh, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001622 for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) {
1623 if (!is_reusable(s, dc_block_number(dc), 1)) {
1624 print_cur_tb(mes);
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001625 reiserfs_panic(s, "PAP-12338",
1626 "invalid child pointer %y in %b",
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001627 dc, bh);
1628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 }
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001630}
1631
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001632static int locked_or_not_in_tree(struct tree_balance *tb,
1633 struct buffer_head *bh, char *which)
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001634{
1635 if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) ||
1636 !B_IS_IN_TREE(bh)) {
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001637 reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001638 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 }
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001640 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001643static int check_before_balancing(struct tree_balance *tb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001645 int retval = 0;
1646
Frederic Weisbecker08f14fc2009-05-16 19:10:38 +02001647 if (REISERFS_SB(tb->tb_sb)->cur_tb) {
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001648 reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
1649 "occurred based on cur_tb not being null at "
1650 "this point in code. do_balance cannot properly "
Frederic Weisbecker08f14fc2009-05-16 19:10:38 +02001651 "handle concurrent tree accesses on a same "
1652 "mount point.");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001653 }
1654
Jeff Mahoney098297b2014-04-23 10:00:36 -04001655 /*
1656 * double check that buffers that we will modify are unlocked.
1657 * (fix_nodes should already have prepped all of these for us).
1658 */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001659 if (tb->lnum[0]) {
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001660 retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
1661 retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
1662 retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001663 check_leaf(tb->L[0]);
1664 }
1665 if (tb->rnum[0]) {
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001666 retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]");
1667 retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]");
1668 retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001669 check_leaf(tb->R[0]);
1670 }
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001671 retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path),
1672 "S[0]");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001673 check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
1674
1675 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676}
1677
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001678static void check_after_balance_leaf(struct tree_balance *tb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001680 if (tb->lnum[0]) {
1681 if (B_FREE_SPACE(tb->L[0]) !=
1682 MAX_CHILD_SIZE(tb->L[0]) -
1683 dc_size(B_N_CHILD
1684 (tb->FL[0], get_left_neighbor_position(tb, 0)))) {
1685 print_cur_tb("12221");
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001686 reiserfs_panic(tb->tb_sb, "PAP-12355",
1687 "shift to left was incorrect");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001688 }
1689 }
1690 if (tb->rnum[0]) {
1691 if (B_FREE_SPACE(tb->R[0]) !=
1692 MAX_CHILD_SIZE(tb->R[0]) -
1693 dc_size(B_N_CHILD
1694 (tb->FR[0], get_right_neighbor_position(tb, 0)))) {
1695 print_cur_tb("12222");
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001696 reiserfs_panic(tb->tb_sb, "PAP-12360",
1697 "shift to right was incorrect");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001698 }
1699 }
1700 if (PATH_H_PBUFFER(tb->tb_path, 1) &&
1701 (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) !=
1702 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
1703 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
1704 PATH_H_POSITION(tb->tb_path, 1)))))) {
1705 int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0));
1706 int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
1707 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
1708 PATH_H_POSITION(tb->tb_path,
1709 1))));
1710 print_cur_tb("12223");
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001711 reiserfs_warning(tb->tb_sb, "reiserfs-12363",
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001712 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; "
1713 "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d",
1714 left,
1715 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)),
1716 PATH_H_PBUFFER(tb->tb_path, 1),
1717 PATH_H_POSITION(tb->tb_path, 1),
1718 dc_size(B_N_CHILD
1719 (PATH_H_PBUFFER(tb->tb_path, 1),
1720 PATH_H_POSITION(tb->tb_path, 1))),
1721 right);
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001722 reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001723 }
1724}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001726static void check_leaf_level(struct tree_balance *tb)
1727{
1728 check_leaf(tb->L[0]);
1729 check_leaf(tb->R[0]);
1730 check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
1731}
1732
1733static void check_internal_levels(struct tree_balance *tb)
1734{
1735 int h;
1736
1737 /* check all internal nodes */
1738 for (h = 1; tb->insert_size[h]; h++) {
1739 check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h),
1740 "BAD BUFFER ON PATH");
1741 if (tb->lnum[h])
1742 check_internal_node(tb->tb_sb, tb->L[h], "BAD L");
1743 if (tb->rnum[h])
1744 check_internal_node(tb->tb_sb, tb->R[h], "BAD R");
1745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747}
1748
1749#endif
1750
Jeff Mahoney098297b2014-04-23 10:00:36 -04001751/*
1752 * Now we have all of the buffers that must be used in balancing of
1753 * the tree. We rely on the assumption that schedule() will not occur
1754 * while do_balance works. ( Only interrupt handlers are acceptable.)
1755 * We balance the tree according to the analysis made before this,
1756 * using buffers already obtained. For SMP support it will someday be
1757 * necessary to add ordered locking of tb.
1758 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
Jeff Mahoney098297b2014-04-23 10:00:36 -04001760/*
1761 * Some interesting rules of balancing:
1762 * we delete a maximum of two nodes per level per balancing: we never
1763 * delete R, when we delete two of three nodes L, S, R then we move
1764 * them into R.
1765 *
1766 * we only delete L if we are deleting two nodes, if we delete only
1767 * one node we delete S
1768 *
1769 * if we shift leaves then we shift as much as we can: this is a
1770 * deliberate policy of extremism in node packing which results in
1771 * higher average utilization after repeated random balance operations
1772 * at the cost of more memory copies and more balancing as a result of
1773 * small insertions to full nodes.
1774 *
1775 * if we shift internal nodes we try to evenly balance the node
1776 * utilization, with consequent less balancing at the cost of lower
1777 * utilization.
1778 *
1779 * one could argue that the policy for directories in leaves should be
1780 * that of internal nodes, but we will wait until another day to
1781 * evaluate this.... It would be nice to someday measure and prove
1782 * these assumptions as to what is optimal....
1783 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001785static inline void do_balance_starts(struct tree_balance *tb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786{
Jeff Mahoney098297b2014-04-23 10:00:36 -04001787 /* use print_cur_tb() to see initial state of struct tree_balance */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001789 /* store_print_tb (tb); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001791 /* do not delete, just comment it out */
Jeff Mahoney098297b2014-04-23 10:00:36 -04001792 /*
1793 print_tb(flag, PATH_LAST_POSITION(tb->tb_path),
1794 tb->tb_path->pos_in_item, tb, "check");
1795 */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001796 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797#ifdef CONFIG_REISERFS_CHECK
Frederic Weisbecker08f14fc2009-05-16 19:10:38 +02001798 REISERFS_SB(tb->tb_sb)->cur_tb = tb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799#endif
1800}
1801
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001802static inline void do_balance_completed(struct tree_balance *tb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805#ifdef CONFIG_REISERFS_CHECK
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001806 check_leaf_level(tb);
1807 check_internal_levels(tb);
Frederic Weisbecker08f14fc2009-05-16 19:10:38 +02001808 REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809#endif
1810
Jeff Mahoney098297b2014-04-23 10:00:36 -04001811 /*
1812 * reiserfs_free_block is no longer schedule safe. So, we need to
1813 * put the buffers we want freed on the thrown list during do_balance,
1814 * and then free them now
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001815 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001817 REISERFS_SB(tb->tb_sb)->s_do_balance++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001819 /* release all nodes hold to perform the balancing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 unfix_nodes(tb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001822 free_thrown(tb);
1823}
1824
Jeff Mahoney098297b2014-04-23 10:00:36 -04001825/*
1826 * do_balance - balance the tree
1827 *
1828 * @tb: tree_balance structure
1829 * @ih: item header of inserted item
1830 * @body: body of inserted item or bytes to paste
1831 * @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste
1832 *
1833 * Cut means delete part of an item (includes removing an entry from a
1834 * directory).
1835 *
1836 * Delete means delete whole item.
1837 *
1838 * Insert means add a new item into the tree.
1839 *
1840 * Paste means to append to the end of an existing file or to
1841 * insert a directory entry.
1842 */
1843void do_balance(struct tree_balance *tb, struct item_head *ih,
1844 const char *body, int flag)
1845{
1846 int child_pos; /* position of a child node in its parent */
1847 int h; /* level of the tree being processed */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001848
Jeff Mahoney098297b2014-04-23 10:00:36 -04001849 /*
1850 * in our processing of one level we sometimes determine what
1851 * must be inserted into the next higher level. This insertion
1852 * consists of a key or two keys and their corresponding
1853 * pointers
1854 */
1855 struct item_head insert_key[2];
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001856
Jeff Mahoney098297b2014-04-23 10:00:36 -04001857 /* inserted node-ptrs for the next level */
1858 struct buffer_head *insert_ptr[2];
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001859
1860 tb->tb_mode = flag;
1861 tb->need_balance_dirty = 0;
1862
1863 if (FILESYSTEM_CHANGED_TB(tb)) {
Jeff Mahoneyc3a9c212009-03-30 14:02:25 -04001864 reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has "
1865 "changed");
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001866 }
1867 /* if we have no real work to do */
1868 if (!tb->insert_size[0]) {
Jeff Mahoney45b03d52009-03-30 14:02:21 -04001869 reiserfs_warning(tb->tb_sb, "PAP-12350",
1870 "insert_size == 0, mode == %c", flag);
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001871 unfix_nodes(tb);
1872 return;
1873 }
1874
Jeff Mahoneya228bf82014-04-23 10:00:42 -04001875 atomic_inc(&fs_generation(tb->tb_sb));
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001876 do_balance_starts(tb);
1877
Jeff Mahoney098297b2014-04-23 10:00:36 -04001878 /*
1879 * balance_leaf returns 0 except if combining L R and S into
1880 * one node. see balance_internal() for explanation of this
1881 * line of code.
1882 */
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001883 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
1884 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
1886#ifdef CONFIG_REISERFS_CHECK
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001887 check_after_balance_leaf(tb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888#endif
1889
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001890 /* Balance internal level of the tree. */
1891 for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++)
Jeff Mahoney83a3a562014-04-23 10:01:02 -04001892 child_pos = balance_internal(tb, h, child_pos, insert_key,
1893 insert_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
Linus Torvaldsbd4c6252005-07-12 20:21:28 -07001895 do_balance_completed(tb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896}