Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 Red Hat, Inc. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #ifndef DM_BTREE_INTERNAL_H |
| 8 | #define DM_BTREE_INTERNAL_H |
| 9 | |
| 10 | #include "dm-btree.h" |
| 11 | |
| 12 | /*----------------------------------------------------------------*/ |
| 13 | |
| 14 | /* |
| 15 | * We'll need 2 accessor functions for n->csum and n->blocknr |
| 16 | * to support dm-btree-spine.c in that case. |
| 17 | */ |
| 18 | |
| 19 | enum node_flags { |
| 20 | INTERNAL_NODE = 1, |
| 21 | LEAF_NODE = 1 << 1 |
| 22 | }; |
| 23 | |
| 24 | /* |
| 25 | * Every btree node begins with this structure. Make sure it's a multiple |
| 26 | * of 8-bytes in size, otherwise the 64bit keys will be mis-aligned. |
| 27 | */ |
| 28 | struct node_header { |
| 29 | __le32 csum; |
| 30 | __le32 flags; |
| 31 | __le64 blocknr; /* Block this node is supposed to live in. */ |
| 32 | |
| 33 | __le32 nr_entries; |
| 34 | __le32 max_entries; |
| 35 | __le32 value_size; |
| 36 | __le32 padding; |
| 37 | } __packed; |
| 38 | |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 39 | struct btree_node { |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 40 | struct node_header header; |
| 41 | __le64 keys[0]; |
| 42 | } __packed; |
| 43 | |
| 44 | |
Joe Thornber | 9b460d3 | 2014-11-10 15:03:24 +0000 | [diff] [blame^] | 45 | /* |
| 46 | * Locks a block using the btree node validator. |
| 47 | */ |
| 48 | int bn_read_lock(struct dm_btree_info *info, dm_block_t b, |
| 49 | struct dm_block **result); |
| 50 | |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 51 | void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 52 | struct dm_btree_value_type *vt); |
| 53 | |
| 54 | int new_block(struct dm_btree_info *info, struct dm_block **result); |
| 55 | int unlock_block(struct dm_btree_info *info, struct dm_block *b); |
| 56 | |
| 57 | /* |
| 58 | * Spines keep track of the rolling locks. There are 2 variants, read-only |
| 59 | * and one that uses shadowing. These are separate structs to allow the |
| 60 | * type checker to spot misuse, for example accidentally calling read_lock |
| 61 | * on a shadow spine. |
| 62 | */ |
| 63 | struct ro_spine { |
| 64 | struct dm_btree_info *info; |
| 65 | |
| 66 | int count; |
| 67 | struct dm_block *nodes[2]; |
| 68 | }; |
| 69 | |
| 70 | void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info); |
| 71 | int exit_ro_spine(struct ro_spine *s); |
| 72 | int ro_step(struct ro_spine *s, dm_block_t new_child); |
Joe Thornber | 4e7f1f9 | 2013-03-01 22:45:50 +0000 | [diff] [blame] | 73 | void ro_pop(struct ro_spine *s); |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 74 | struct btree_node *ro_node(struct ro_spine *s); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 75 | |
| 76 | struct shadow_spine { |
| 77 | struct dm_btree_info *info; |
| 78 | |
| 79 | int count; |
| 80 | struct dm_block *nodes[2]; |
| 81 | |
| 82 | dm_block_t root; |
| 83 | }; |
| 84 | |
| 85 | void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info); |
| 86 | int exit_shadow_spine(struct shadow_spine *s); |
| 87 | |
| 88 | int shadow_step(struct shadow_spine *s, dm_block_t b, |
| 89 | struct dm_btree_value_type *vt); |
| 90 | |
| 91 | /* |
| 92 | * The spine must have at least one entry before calling this. |
| 93 | */ |
| 94 | struct dm_block *shadow_current(struct shadow_spine *s); |
| 95 | |
| 96 | /* |
| 97 | * The spine must have at least two entries before calling this. |
| 98 | */ |
| 99 | struct dm_block *shadow_parent(struct shadow_spine *s); |
| 100 | |
| 101 | int shadow_has_parent(struct shadow_spine *s); |
| 102 | |
| 103 | int shadow_root(struct shadow_spine *s); |
| 104 | |
| 105 | /* |
| 106 | * Some inlines. |
| 107 | */ |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 108 | static inline __le64 *key_ptr(struct btree_node *n, uint32_t index) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 109 | { |
| 110 | return n->keys + index; |
| 111 | } |
| 112 | |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 113 | static inline void *value_base(struct btree_node *n) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 114 | { |
| 115 | return &n->keys[le32_to_cpu(n->header.max_entries)]; |
| 116 | } |
| 117 | |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 118 | static inline void *value_ptr(struct btree_node *n, uint32_t index) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 119 | { |
Joe Thornber | a3aefb3 | 2012-03-28 18:41:25 +0100 | [diff] [blame] | 120 | uint32_t value_size = le32_to_cpu(n->header.value_size); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 121 | return value_base(n) + (value_size * index); |
| 122 | } |
| 123 | |
| 124 | /* |
| 125 | * Assumes the values are suitably-aligned and converts to core format. |
| 126 | */ |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 127 | static inline uint64_t value64(struct btree_node *n, uint32_t index) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 128 | { |
| 129 | __le64 *values_le = value_base(n); |
| 130 | |
| 131 | return le64_to_cpu(values_le[index]); |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * Searching for a key within a single node. |
| 136 | */ |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 137 | int lower_bound(struct btree_node *n, uint64_t key); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 138 | |
| 139 | extern struct dm_block_validator btree_node_validator; |
| 140 | |
| 141 | #endif /* DM_BTREE_INTERNAL_H */ |