blob: ea3d3b656fd0a1f8f55a0bcb0285bf4a94f6c38d [file] [log] [blame]
Joe Thornber3241b1d2011-10-31 20:19:11 +00001/*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-btree-internal.h"
8#include "dm-space-map.h"
9#include "dm-transaction-manager.h"
10
Paul Gortmaker1944ce62011-09-28 18:29:32 -040011#include <linux/export.h>
Joe Thornber3241b1d2011-10-31 20:19:11 +000012#include <linux/device-mapper.h>
13
14#define DM_MSG_PREFIX "btree"
15
16/*----------------------------------------------------------------
17 * Array manipulation
18 *--------------------------------------------------------------*/
19static void memcpy_disk(void *dest, const void *src, size_t len)
20 __dm_written_to_disk(src)
21{
22 memcpy(dest, src, len);
23 __dm_unbless_for_disk(src);
24}
25
26static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
27 unsigned index, void *elt)
28 __dm_written_to_disk(elt)
29{
30 if (index < nr_elts)
31 memmove(base + (elt_size * (index + 1)),
32 base + (elt_size * index),
33 (nr_elts - index) * elt_size);
34
35 memcpy_disk(base + (elt_size * index), elt, elt_size);
36}
37
38/*----------------------------------------------------------------*/
39
40/* makes the assumption that no two keys are the same. */
Mikulas Patocka550929f2012-12-21 20:23:30 +000041static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
Joe Thornber3241b1d2011-10-31 20:19:11 +000042{
43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
44
45 while (hi - lo > 1) {
46 int mid = lo + ((hi - lo) / 2);
47 uint64_t mid_key = le64_to_cpu(n->keys[mid]);
48
49 if (mid_key == key)
50 return mid;
51
52 if (mid_key < key)
53 lo = mid;
54 else
55 hi = mid;
56 }
57
58 return want_hi ? hi : lo;
59}
60
Mikulas Patocka550929f2012-12-21 20:23:30 +000061int lower_bound(struct btree_node *n, uint64_t key)
Joe Thornber3241b1d2011-10-31 20:19:11 +000062{
63 return bsearch(n, key, 0);
64}
65
Joe Thornber993ceab2015-12-02 12:24:39 +000066static int upper_bound(struct btree_node *n, uint64_t key)
67{
68 return bsearch(n, key, 1);
69}
70
Mikulas Patocka550929f2012-12-21 20:23:30 +000071void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
Joe Thornber3241b1d2011-10-31 20:19:11 +000072 struct dm_btree_value_type *vt)
73{
74 unsigned i;
75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
76
77 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
78 for (i = 0; i < nr_entries; i++)
79 dm_tm_inc(tm, value64(n, i));
80 else if (vt->inc)
81 for (i = 0; i < nr_entries; i++)
Joe Thornbera3aefb32012-03-28 18:41:25 +010082 vt->inc(vt->context, value_ptr(n, i));
Joe Thornber3241b1d2011-10-31 20:19:11 +000083}
84
Mikulas Patocka550929f2012-12-21 20:23:30 +000085static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
Joe Thornber3241b1d2011-10-31 20:19:11 +000086 uint64_t key, void *value)
87 __dm_written_to_disk(value)
88{
89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
90 __le64 key_le = cpu_to_le64(key);
91
92 if (index > nr_entries ||
93 index >= le32_to_cpu(node->header.max_entries)) {
94 DMERR("too many entries in btree node for insert");
95 __dm_unbless_for_disk(value);
96 return -ENOMEM;
97 }
98
99 __dm_bless_for_disk(&key_le);
100
101 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
102 array_insert(value_base(node), value_size, nr_entries, index, value);
103 node->header.nr_entries = cpu_to_le32(nr_entries + 1);
104
105 return 0;
106}
107
108/*----------------------------------------------------------------*/
109
110/*
111 * We want 3n entries (for some n). This works more nicely for repeated
112 * insert remove loops than (2n + 1).
113 */
114static uint32_t calc_max_entries(size_t value_size, size_t block_size)
115{
116 uint32_t total, n;
117 size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
118
119 block_size -= sizeof(struct node_header);
120 total = block_size / elt_size;
121 n = total / 3; /* rounds down */
122
123 return 3 * n;
124}
125
126int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
127{
128 int r;
129 struct dm_block *b;
Mikulas Patocka550929f2012-12-21 20:23:30 +0000130 struct btree_node *n;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000131 size_t block_size;
132 uint32_t max_entries;
133
134 r = new_block(info, &b);
135 if (r < 0)
136 return r;
137
138 block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
139 max_entries = calc_max_entries(info->value_type.size, block_size);
140
141 n = dm_block_data(b);
142 memset(n, 0, block_size);
143 n->header.flags = cpu_to_le32(LEAF_NODE);
144 n->header.nr_entries = cpu_to_le32(0);
145 n->header.max_entries = cpu_to_le32(max_entries);
146 n->header.value_size = cpu_to_le32(info->value_type.size);
147
148 *root = dm_block_location(b);
Mikulas Patocka4c7da062015-10-22 16:46:59 -0400149 unlock_block(info, b);
150
151 return 0;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000152}
153EXPORT_SYMBOL_GPL(dm_btree_empty);
154
155/*----------------------------------------------------------------*/
156
157/*
158 * Deletion uses a recursive algorithm, since we have limited stack space
159 * we explicitly manage our own stack on the heap.
160 */
161#define MAX_SPINE_DEPTH 64
162struct frame {
163 struct dm_block *b;
Mikulas Patocka550929f2012-12-21 20:23:30 +0000164 struct btree_node *n;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000165 unsigned level;
166 unsigned nr_children;
167 unsigned current_child;
168};
169
170struct del_stack {
Joe Thornber04f17c82013-08-09 12:59:30 +0100171 struct dm_btree_info *info;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000172 struct dm_transaction_manager *tm;
173 int top;
174 struct frame spine[MAX_SPINE_DEPTH];
175};
176
177static int top_frame(struct del_stack *s, struct frame **f)
178{
179 if (s->top < 0) {
180 DMERR("btree deletion stack empty");
181 return -EINVAL;
182 }
183
184 *f = s->spine + s->top;
185
186 return 0;
187}
188
189static int unprocessed_frames(struct del_stack *s)
190{
191 return s->top >= 0;
192}
193
Joe Thornber04f17c82013-08-09 12:59:30 +0100194static void prefetch_children(struct del_stack *s, struct frame *f)
195{
196 unsigned i;
197 struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
198
199 for (i = 0; i < f->nr_children; i++)
200 dm_bm_prefetch(bm, value64(f->n, i));
201}
202
203static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
204{
205 return f->level < (info->levels - 1);
206}
207
Joe Thornber3241b1d2011-10-31 20:19:11 +0000208static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
209{
210 int r;
211 uint32_t ref_count;
212
213 if (s->top >= MAX_SPINE_DEPTH - 1) {
214 DMERR("btree deletion stack out of memory");
215 return -ENOMEM;
216 }
217
218 r = dm_tm_ref(s->tm, b, &ref_count);
219 if (r)
220 return r;
221
222 if (ref_count > 1)
223 /*
224 * This is a shared node, so we can just decrement it's
225 * reference counter and leave the children.
226 */
227 dm_tm_dec(s->tm, b);
228
229 else {
Joe Thornber04f17c82013-08-09 12:59:30 +0100230 uint32_t flags;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000231 struct frame *f = s->spine + ++s->top;
232
233 r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
234 if (r) {
235 s->top--;
236 return r;
237 }
238
239 f->n = dm_block_data(f->b);
240 f->level = level;
241 f->nr_children = le32_to_cpu(f->n->header.nr_entries);
242 f->current_child = 0;
Joe Thornber04f17c82013-08-09 12:59:30 +0100243
244 flags = le32_to_cpu(f->n->header.flags);
245 if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
246 prefetch_children(s, f);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000247 }
248
249 return 0;
250}
251
252static void pop_frame(struct del_stack *s)
253{
254 struct frame *f = s->spine + s->top--;
255
256 dm_tm_dec(s->tm, dm_block_location(f->b));
257 dm_tm_unlock(s->tm, f->b);
258}
259
Joe Thornbered8b45a2015-12-10 14:37:53 +0000260static void unlock_all_frames(struct del_stack *s)
261{
262 struct frame *f;
263
264 while (unprocessed_frames(s)) {
265 f = s->spine + s->top--;
266 dm_tm_unlock(s->tm, f->b);
267 }
268}
269
Joe Thornber3241b1d2011-10-31 20:19:11 +0000270int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
271{
272 int r;
273 struct del_stack *s;
274
Joe Thornber1c751872015-07-03 14:51:32 +0100275 s = kmalloc(sizeof(*s), GFP_NOIO);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000276 if (!s)
277 return -ENOMEM;
Joe Thornber04f17c82013-08-09 12:59:30 +0100278 s->info = info;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000279 s->tm = info->tm;
280 s->top = -1;
281
Joe Thornbere3cbf942012-12-21 20:23:32 +0000282 r = push_frame(s, root, 0);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000283 if (r)
284 goto out;
285
286 while (unprocessed_frames(s)) {
287 uint32_t flags;
288 struct frame *f;
289 dm_block_t b;
290
291 r = top_frame(s, &f);
292 if (r)
293 goto out;
294
295 if (f->current_child >= f->nr_children) {
296 pop_frame(s);
297 continue;
298 }
299
300 flags = le32_to_cpu(f->n->header.flags);
301 if (flags & INTERNAL_NODE) {
302 b = value64(f->n, f->current_child);
303 f->current_child++;
304 r = push_frame(s, b, f->level);
305 if (r)
306 goto out;
307
Joe Thornbere3cbf942012-12-21 20:23:32 +0000308 } else if (is_internal_level(info, f)) {
Joe Thornber3241b1d2011-10-31 20:19:11 +0000309 b = value64(f->n, f->current_child);
310 f->current_child++;
311 r = push_frame(s, b, f->level + 1);
312 if (r)
313 goto out;
314
315 } else {
316 if (info->value_type.dec) {
317 unsigned i;
318
319 for (i = 0; i < f->nr_children; i++)
320 info->value_type.dec(info->value_type.context,
Joe Thornbera3aefb32012-03-28 18:41:25 +0100321 value_ptr(f->n, i));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000322 }
Joe Thornbercd5acf02013-08-09 12:48:42 +0100323 pop_frame(s);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000324 }
325 }
Joe Thornber3241b1d2011-10-31 20:19:11 +0000326out:
Joe Thornbered8b45a2015-12-10 14:37:53 +0000327 if (r) {
328 /* cleanup all frames of del_stack */
329 unlock_all_frames(s);
330 }
Joe Thornber3241b1d2011-10-31 20:19:11 +0000331 kfree(s);
Joe Thornbered8b45a2015-12-10 14:37:53 +0000332
Joe Thornber3241b1d2011-10-31 20:19:11 +0000333 return r;
334}
335EXPORT_SYMBOL_GPL(dm_btree_del);
336
337/*----------------------------------------------------------------*/
338
339static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
Mikulas Patocka550929f2012-12-21 20:23:30 +0000340 int (*search_fn)(struct btree_node *, uint64_t),
Joe Thornber3241b1d2011-10-31 20:19:11 +0000341 uint64_t *result_key, void *v, size_t value_size)
342{
343 int i, r;
344 uint32_t flags, nr_entries;
345
346 do {
347 r = ro_step(s, block);
348 if (r < 0)
349 return r;
350
351 i = search_fn(ro_node(s), key);
352
353 flags = le32_to_cpu(ro_node(s)->header.flags);
354 nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
355 if (i < 0 || i >= nr_entries)
356 return -ENODATA;
357
358 if (flags & INTERNAL_NODE)
359 block = value64(ro_node(s), i);
360
361 } while (!(flags & LEAF_NODE));
362
363 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
Joe Thornbera3aefb32012-03-28 18:41:25 +0100364 memcpy(v, value_ptr(ro_node(s), i), value_size);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000365
366 return 0;
367}
368
369int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
370 uint64_t *keys, void *value_le)
371{
372 unsigned level, last_level = info->levels - 1;
373 int r = -ENODATA;
374 uint64_t rkey;
375 __le64 internal_value_le;
376 struct ro_spine spine;
377
378 init_ro_spine(&spine, info);
379 for (level = 0; level < info->levels; level++) {
380 size_t size;
381 void *value_p;
382
383 if (level == last_level) {
384 value_p = value_le;
385 size = info->value_type.size;
386
387 } else {
388 value_p = &internal_value_le;
389 size = sizeof(uint64_t);
390 }
391
392 r = btree_lookup_raw(&spine, root, keys[level],
393 lower_bound, &rkey,
394 value_p, size);
395
396 if (!r) {
397 if (rkey != keys[level]) {
398 exit_ro_spine(&spine);
399 return -ENODATA;
400 }
401 } else {
402 exit_ro_spine(&spine);
403 return r;
404 }
405
406 root = le64_to_cpu(internal_value_le);
407 }
408 exit_ro_spine(&spine);
409
410 return r;
411}
412EXPORT_SYMBOL_GPL(dm_btree_lookup);
413
Joe Thornber993ceab2015-12-02 12:24:39 +0000414static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
415 uint64_t key, uint64_t *rkey, void *value_le)
416{
417 int r, i;
418 uint32_t flags, nr_entries;
419 struct dm_block *node;
420 struct btree_node *n;
421
422 r = bn_read_lock(info, root, &node);
423 if (r)
424 return r;
425
426 n = dm_block_data(node);
427 flags = le32_to_cpu(n->header.flags);
428 nr_entries = le32_to_cpu(n->header.nr_entries);
429
430 if (flags & INTERNAL_NODE) {
431 i = lower_bound(n, key);
432 if (i < 0 || i >= nr_entries) {
433 r = -ENODATA;
434 goto out;
435 }
436
437 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
438 if (r == -ENODATA && i < (nr_entries - 1)) {
439 i++;
440 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
441 }
442
443 } else {
444 i = upper_bound(n, key);
445 if (i < 0 || i >= nr_entries) {
446 r = -ENODATA;
447 goto out;
448 }
449
450 *rkey = le64_to_cpu(n->keys[i]);
451 memcpy(value_le, value_ptr(n, i), info->value_type.size);
452 }
453out:
454 dm_tm_unlock(info->tm, node);
455 return r;
456}
457
458int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
459 uint64_t *keys, uint64_t *rkey, void *value_le)
460{
461 unsigned level;
462 int r = -ENODATA;
463 __le64 internal_value_le;
464 struct ro_spine spine;
465
466 init_ro_spine(&spine, info);
467 for (level = 0; level < info->levels - 1u; level++) {
468 r = btree_lookup_raw(&spine, root, keys[level],
469 lower_bound, rkey,
470 &internal_value_le, sizeof(uint64_t));
471 if (r)
472 goto out;
473
474 if (*rkey != keys[level]) {
475 r = -ENODATA;
476 goto out;
477 }
478
479 root = le64_to_cpu(internal_value_le);
480 }
481
482 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
483out:
484 exit_ro_spine(&spine);
485 return r;
486}
487
488EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
489
Joe Thornber3241b1d2011-10-31 20:19:11 +0000490/*
491 * Splits a node by creating a sibling node and shifting half the nodes
492 * contents across. Assumes there is a parent node, and it has room for
493 * another child.
494 *
495 * Before:
496 * +--------+
497 * | Parent |
498 * +--------+
499 * |
500 * v
501 * +----------+
502 * | A ++++++ |
503 * +----------+
504 *
505 *
506 * After:
507 * +--------+
508 * | Parent |
509 * +--------+
510 * | |
511 * v +------+
512 * +---------+ |
513 * | A* +++ | v
514 * +---------+ +-------+
515 * | B +++ |
516 * +-------+
517 *
518 * Where A* is a shadow of A.
519 */
Vivek Goyal0a8d4c32015-07-06 11:55:40 -0400520static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
521 uint64_t key)
Joe Thornber3241b1d2011-10-31 20:19:11 +0000522{
523 int r;
524 size_t size;
525 unsigned nr_left, nr_right;
526 struct dm_block *left, *right, *parent;
Mikulas Patocka550929f2012-12-21 20:23:30 +0000527 struct btree_node *ln, *rn, *pn;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000528 __le64 location;
529
530 left = shadow_current(s);
531
532 r = new_block(s->info, &right);
533 if (r < 0)
534 return r;
535
536 ln = dm_block_data(left);
537 rn = dm_block_data(right);
538
539 nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
540 nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
541
542 ln->header.nr_entries = cpu_to_le32(nr_left);
543
544 rn->header.flags = ln->header.flags;
545 rn->header.nr_entries = cpu_to_le32(nr_right);
546 rn->header.max_entries = ln->header.max_entries;
547 rn->header.value_size = ln->header.value_size;
548 memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
549
550 size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
551 sizeof(uint64_t) : s->info->value_type.size;
Joe Thornbera3aefb32012-03-28 18:41:25 +0100552 memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
Joe Thornber3241b1d2011-10-31 20:19:11 +0000553 size * nr_right);
554
555 /*
556 * Patch up the parent
557 */
558 parent = shadow_parent(s);
559
560 pn = dm_block_data(parent);
561 location = cpu_to_le64(dm_block_location(left));
562 __dm_bless_for_disk(&location);
Joe Thornbera3aefb32012-03-28 18:41:25 +0100563 memcpy_disk(value_ptr(pn, parent_index),
Joe Thornber3241b1d2011-10-31 20:19:11 +0000564 &location, sizeof(__le64));
565
566 location = cpu_to_le64(dm_block_location(right));
567 __dm_bless_for_disk(&location);
568
569 r = insert_at(sizeof(__le64), pn, parent_index + 1,
570 le64_to_cpu(rn->keys[0]), &location);
Mike Snitzer30ce6e12015-11-23 16:24:45 -0500571 if (r) {
572 unlock_block(s->info, right);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000573 return r;
Mike Snitzer30ce6e12015-11-23 16:24:45 -0500574 }
Joe Thornber3241b1d2011-10-31 20:19:11 +0000575
576 if (key < le64_to_cpu(rn->keys[0])) {
577 unlock_block(s->info, right);
578 s->nodes[1] = left;
579 } else {
580 unlock_block(s->info, left);
581 s->nodes[1] = right;
582 }
583
584 return 0;
585}
586
587/*
588 * Splits a node by creating two new children beneath the given node.
589 *
590 * Before:
591 * +----------+
592 * | A ++++++ |
593 * +----------+
594 *
595 *
596 * After:
597 * +------------+
598 * | A (shadow) |
599 * +------------+
600 * | |
601 * +------+ +----+
602 * | |
603 * v v
604 * +-------+ +-------+
605 * | B +++ | | C +++ |
606 * +-------+ +-------+
607 */
608static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
609{
610 int r;
611 size_t size;
612 unsigned nr_left, nr_right;
613 struct dm_block *left, *right, *new_parent;
Mikulas Patocka550929f2012-12-21 20:23:30 +0000614 struct btree_node *pn, *ln, *rn;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000615 __le64 val;
616
617 new_parent = shadow_current(s);
618
619 r = new_block(s->info, &left);
620 if (r < 0)
621 return r;
622
623 r = new_block(s->info, &right);
624 if (r < 0) {
Mike Snitzer4dcb8b52015-10-22 10:56:40 -0400625 unlock_block(s->info, left);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000626 return r;
627 }
628
629 pn = dm_block_data(new_parent);
630 ln = dm_block_data(left);
631 rn = dm_block_data(right);
632
633 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
634 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
635
636 ln->header.flags = pn->header.flags;
637 ln->header.nr_entries = cpu_to_le32(nr_left);
638 ln->header.max_entries = pn->header.max_entries;
639 ln->header.value_size = pn->header.value_size;
640
641 rn->header.flags = pn->header.flags;
642 rn->header.nr_entries = cpu_to_le32(nr_right);
643 rn->header.max_entries = pn->header.max_entries;
644 rn->header.value_size = pn->header.value_size;
645
646 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
647 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
648
649 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
650 sizeof(__le64) : s->info->value_type.size;
Joe Thornbera3aefb32012-03-28 18:41:25 +0100651 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
652 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
Joe Thornber3241b1d2011-10-31 20:19:11 +0000653 nr_right * size);
654
655 /* new_parent should just point to l and r now */
656 pn->header.flags = cpu_to_le32(INTERNAL_NODE);
657 pn->header.nr_entries = cpu_to_le32(2);
658 pn->header.max_entries = cpu_to_le32(
659 calc_max_entries(sizeof(__le64),
660 dm_bm_block_size(
661 dm_tm_get_bm(s->info->tm))));
662 pn->header.value_size = cpu_to_le32(sizeof(__le64));
663
664 val = cpu_to_le64(dm_block_location(left));
665 __dm_bless_for_disk(&val);
666 pn->keys[0] = ln->keys[0];
Joe Thornbera3aefb32012-03-28 18:41:25 +0100667 memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000668
669 val = cpu_to_le64(dm_block_location(right));
670 __dm_bless_for_disk(&val);
671 pn->keys[1] = rn->keys[0];
Joe Thornbera3aefb32012-03-28 18:41:25 +0100672 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000673
674 /*
675 * rejig the spine. This is ugly, since it knows too
676 * much about the spine
677 */
678 if (s->nodes[0] != new_parent) {
679 unlock_block(s->info, s->nodes[0]);
680 s->nodes[0] = new_parent;
681 }
682 if (key < le64_to_cpu(rn->keys[0])) {
683 unlock_block(s->info, right);
684 s->nodes[1] = left;
685 } else {
686 unlock_block(s->info, left);
687 s->nodes[1] = right;
688 }
689 s->count = 2;
690
691 return 0;
692}
693
694static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
695 struct dm_btree_value_type *vt,
696 uint64_t key, unsigned *index)
697{
698 int r, i = *index, top = 1;
Mikulas Patocka550929f2012-12-21 20:23:30 +0000699 struct btree_node *node;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000700
701 for (;;) {
702 r = shadow_step(s, root, vt);
703 if (r < 0)
704 return r;
705
706 node = dm_block_data(shadow_current(s));
707
708 /*
709 * We have to patch up the parent node, ugly, but I don't
710 * see a way to do this automatically as part of the spine
711 * op.
712 */
713 if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
714 __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
715
716 __dm_bless_for_disk(&location);
Joe Thornbera3aefb32012-03-28 18:41:25 +0100717 memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
Joe Thornber3241b1d2011-10-31 20:19:11 +0000718 &location, sizeof(__le64));
719 }
720
721 node = dm_block_data(shadow_current(s));
722
723 if (node->header.nr_entries == node->header.max_entries) {
724 if (top)
725 r = btree_split_beneath(s, key);
726 else
Vivek Goyal0a8d4c32015-07-06 11:55:40 -0400727 r = btree_split_sibling(s, i, key);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000728
729 if (r < 0)
730 return r;
731 }
732
733 node = dm_block_data(shadow_current(s));
734
735 i = lower_bound(node, key);
736
737 if (le32_to_cpu(node->header.flags) & LEAF_NODE)
738 break;
739
740 if (i < 0) {
741 /* change the bounds on the lowest key */
742 node->keys[0] = cpu_to_le64(key);
743 i = 0;
744 }
745
746 root = value64(node, i);
747 top = 0;
748 }
749
750 if (i < 0 || le64_to_cpu(node->keys[i]) != key)
751 i++;
752
753 *index = i;
754 return 0;
755}
756
Mike Snitzerba503832015-11-23 16:38:25 -0500757static bool need_insert(struct btree_node *node, uint64_t *keys,
758 unsigned level, unsigned index)
759{
760 return ((index >= le32_to_cpu(node->header.nr_entries)) ||
761 (le64_to_cpu(node->keys[index]) != keys[level]));
762}
763
Joe Thornber3241b1d2011-10-31 20:19:11 +0000764static int insert(struct dm_btree_info *info, dm_block_t root,
765 uint64_t *keys, void *value, dm_block_t *new_root,
766 int *inserted)
767 __dm_written_to_disk(value)
768{
Mike Snitzerba503832015-11-23 16:38:25 -0500769 int r;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000770 unsigned level, index = -1, last_level = info->levels - 1;
771 dm_block_t block = root;
772 struct shadow_spine spine;
Mikulas Patocka550929f2012-12-21 20:23:30 +0000773 struct btree_node *n;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000774 struct dm_btree_value_type le64_type;
775
Joe Thornberb0dc3c82015-08-12 15:12:09 +0100776 init_le64_type(info->tm, &le64_type);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000777 init_shadow_spine(&spine, info);
778
779 for (level = 0; level < (info->levels - 1); level++) {
780 r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
781 if (r < 0)
782 goto bad;
783
784 n = dm_block_data(shadow_current(&spine));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000785
Mike Snitzerba503832015-11-23 16:38:25 -0500786 if (need_insert(n, keys, level, index)) {
Joe Thornber3241b1d2011-10-31 20:19:11 +0000787 dm_block_t new_tree;
788 __le64 new_le;
789
790 r = dm_btree_empty(info, &new_tree);
791 if (r < 0)
792 goto bad;
793
794 new_le = cpu_to_le64(new_tree);
795 __dm_bless_for_disk(&new_le);
796
797 r = insert_at(sizeof(uint64_t), n, index,
798 keys[level], &new_le);
799 if (r)
800 goto bad;
801 }
802
803 if (level < last_level)
804 block = value64(n, index);
805 }
806
807 r = btree_insert_raw(&spine, block, &info->value_type,
808 keys[level], &index);
809 if (r < 0)
810 goto bad;
811
812 n = dm_block_data(shadow_current(&spine));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000813
Mike Snitzerba503832015-11-23 16:38:25 -0500814 if (need_insert(n, keys, level, index)) {
Joe Thornber3241b1d2011-10-31 20:19:11 +0000815 if (inserted)
816 *inserted = 1;
817
818 r = insert_at(info->value_type.size, n, index,
819 keys[level], value);
820 if (r)
821 goto bad_unblessed;
822 } else {
823 if (inserted)
824 *inserted = 0;
825
826 if (info->value_type.dec &&
827 (!info->value_type.equal ||
828 !info->value_type.equal(
829 info->value_type.context,
Joe Thornbera3aefb32012-03-28 18:41:25 +0100830 value_ptr(n, index),
Joe Thornber3241b1d2011-10-31 20:19:11 +0000831 value))) {
832 info->value_type.dec(info->value_type.context,
Joe Thornbera3aefb32012-03-28 18:41:25 +0100833 value_ptr(n, index));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000834 }
Joe Thornbera3aefb32012-03-28 18:41:25 +0100835 memcpy_disk(value_ptr(n, index),
Joe Thornber3241b1d2011-10-31 20:19:11 +0000836 value, info->value_type.size);
837 }
838
839 *new_root = shadow_root(&spine);
840 exit_shadow_spine(&spine);
841
842 return 0;
843
844bad:
845 __dm_unbless_for_disk(value);
846bad_unblessed:
847 exit_shadow_spine(&spine);
848 return r;
849}
850
851int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
852 uint64_t *keys, void *value, dm_block_t *new_root)
853 __dm_written_to_disk(value)
854{
855 return insert(info, root, keys, value, new_root, NULL);
856}
857EXPORT_SYMBOL_GPL(dm_btree_insert);
858
859int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
860 uint64_t *keys, void *value, dm_block_t *new_root,
861 int *inserted)
862 __dm_written_to_disk(value)
863{
864 return insert(info, root, keys, value, new_root, inserted);
865}
866EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
867
868/*----------------------------------------------------------------*/
869
Joe Thornberf164e692013-12-20 15:41:11 +0000870static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
871 uint64_t *result_key, dm_block_t *next_block)
Joe Thornber3241b1d2011-10-31 20:19:11 +0000872{
873 int i, r;
874 uint32_t flags;
875
876 do {
877 r = ro_step(s, block);
878 if (r < 0)
879 return r;
880
881 flags = le32_to_cpu(ro_node(s)->header.flags);
882 i = le32_to_cpu(ro_node(s)->header.nr_entries);
883 if (!i)
884 return -ENODATA;
885 else
886 i--;
887
Joe Thornberf164e692013-12-20 15:41:11 +0000888 if (find_highest)
889 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
890 else
891 *result_key = le64_to_cpu(ro_node(s)->keys[0]);
892
Joe Thornber3241b1d2011-10-31 20:19:11 +0000893 if (next_block || flags & INTERNAL_NODE)
894 block = value64(ro_node(s), i);
895
896 } while (flags & INTERNAL_NODE);
897
898 if (next_block)
899 *next_block = block;
900 return 0;
901}
902
Joe Thornberf164e692013-12-20 15:41:11 +0000903static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root,
904 bool find_highest, uint64_t *result_keys)
Joe Thornber3241b1d2011-10-31 20:19:11 +0000905{
906 int r = 0, count = 0, level;
907 struct ro_spine spine;
908
909 init_ro_spine(&spine, info);
910 for (level = 0; level < info->levels; level++) {
Joe Thornberf164e692013-12-20 15:41:11 +0000911 r = find_key(&spine, root, find_highest, result_keys + level,
912 level == info->levels - 1 ? NULL : &root);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000913 if (r == -ENODATA) {
914 r = 0;
915 break;
916
917 } else if (r)
918 break;
919
920 count++;
921 }
922 exit_ro_spine(&spine);
923
924 return r ? r : count;
925}
Joe Thornberf164e692013-12-20 15:41:11 +0000926
927int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
928 uint64_t *result_keys)
929{
930 return dm_btree_find_key(info, root, true, result_keys);
931}
Joe Thornber3241b1d2011-10-31 20:19:11 +0000932EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000933
Joe Thornberf164e692013-12-20 15:41:11 +0000934int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
935 uint64_t *result_keys)
936{
937 return dm_btree_find_key(info, root, false, result_keys);
938}
939EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
940
941/*----------------------------------------------------------------*/
942
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000943/*
944 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
945 * space. Also this only works for single level trees.
946 */
Joe Thornber9b460d32014-11-10 15:03:24 +0000947static int walk_node(struct dm_btree_info *info, dm_block_t block,
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000948 int (*fn)(void *context, uint64_t *keys, void *leaf),
949 void *context)
950{
951 int r;
952 unsigned i, nr;
Joe Thornber9b460d32014-11-10 15:03:24 +0000953 struct dm_block *node;
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000954 struct btree_node *n;
955 uint64_t keys;
956
Joe Thornber9b460d32014-11-10 15:03:24 +0000957 r = bn_read_lock(info, block, &node);
958 if (r)
959 return r;
960
961 n = dm_block_data(node);
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000962
963 nr = le32_to_cpu(n->header.nr_entries);
964 for (i = 0; i < nr; i++) {
965 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
Joe Thornber9b460d32014-11-10 15:03:24 +0000966 r = walk_node(info, value64(n, i), fn, context);
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000967 if (r)
968 goto out;
969 } else {
970 keys = le64_to_cpu(*key_ptr(n, i));
971 r = fn(context, &keys, value_ptr(n, i));
972 if (r)
973 goto out;
974 }
975 }
976
977out:
Joe Thornber9b460d32014-11-10 15:03:24 +0000978 dm_tm_unlock(info->tm, node);
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000979 return r;
980}
981
982int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
983 int (*fn)(void *context, uint64_t *keys, void *leaf),
984 void *context)
985{
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000986 BUG_ON(info->levels > 1);
Joe Thornber9b460d32014-11-10 15:03:24 +0000987 return walk_node(info, root, fn, context);
Joe Thornber4e7f1f92013-03-01 22:45:50 +0000988}
989EXPORT_SYMBOL_GPL(dm_btree_walk);