blob: 9b80417cd547f52c264c1b4b993f3ee2155405f2 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001#ifndef _BCACHE_BTREE_H
2#define _BCACHE_BTREE_H
3
4/*
5 * THE BTREE:
6 *
7 * At a high level, bcache's btree is relatively standard b+ tree. All keys and
8 * pointers are in the leaves; interior nodes only have pointers to the child
9 * nodes.
10 *
11 * In the interior nodes, a struct bkey always points to a child btree node, and
12 * the key is the highest key in the child node - except that the highest key in
13 * an interior node is always MAX_KEY. The size field refers to the size on disk
14 * of the child node - this would allow us to have variable sized btree nodes
15 * (handy for keeping the depth of the btree 1 by expanding just the root).
16 *
17 * Btree nodes are themselves log structured, but this is hidden fairly
18 * thoroughly. Btree nodes on disk will in practice have extents that overlap
19 * (because they were written at different times), but in memory we never have
20 * overlapping extents - when we read in a btree node from disk, the first thing
21 * we do is resort all the sets of keys with a mergesort, and in the same pass
22 * we check for overlapping extents and adjust them appropriately.
23 *
24 * struct btree_op is a central interface to the btree code. It's used for
25 * specifying read vs. write locking, and the embedded closure is used for
26 * waiting on IO or reserve memory.
27 *
28 * BTREE CACHE:
29 *
30 * Btree nodes are cached in memory; traversing the btree might require reading
31 * in btree nodes which is handled mostly transparently.
32 *
33 * bch_btree_node_get() looks up a btree node in the cache and reads it in from
34 * disk if necessary. This function is almost never called directly though - the
35 * btree() macro is used to get a btree node, call some function on it, and
36 * unlock the node after the function returns.
37 *
38 * The root is special cased - it's taken out of the cache's lru (thus pinning
39 * it in memory), so we can find the root of the btree by just dereferencing a
40 * pointer instead of looking it up in the cache. This makes locking a bit
41 * tricky, since the root pointer is protected by the lock in the btree node it
42 * points to - the btree_root() macro handles this.
43 *
44 * In various places we must be able to allocate memory for multiple btree nodes
45 * in order to make forward progress. To do this we use the btree cache itself
46 * as a reserve; if __get_free_pages() fails, we'll find a node in the btree
47 * cache we can reuse. We can't allow more than one thread to be doing this at a
48 * time, so there's a lock, implemented by a pointer to the btree_op closure -
49 * this allows the btree_root() macro to implicitly release this lock.
50 *
51 * BTREE IO:
52 *
53 * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles
54 * this.
55 *
56 * For writing, we have two btree_write structs embeddded in struct btree - one
57 * write in flight, and one being set up, and we toggle between them.
58 *
59 * Writing is done with a single function - bch_btree_write() really serves two
60 * different purposes and should be broken up into two different functions. When
61 * passing now = false, it merely indicates that the node is now dirty - calling
62 * it ensures that the dirty keys will be written at some point in the future.
63 *
64 * When passing now = true, bch_btree_write() causes a write to happen
65 * "immediately" (if there was already a write in flight, it'll cause the write
66 * to happen as soon as the previous write completes). It returns immediately
67 * though - but it takes a refcount on the closure in struct btree_op you passed
68 * to it, so a closure_sync() later can be used to wait for the write to
69 * complete.
70 *
71 * This is handy because btree_split() and garbage collection can issue writes
72 * in parallel, reducing the amount of time they have to hold write locks.
73 *
74 * LOCKING:
75 *
76 * When traversing the btree, we may need write locks starting at some level -
77 * inserting a key into the btree will typically only require a write lock on
78 * the leaf node.
79 *
80 * This is specified with the lock field in struct btree_op; lock = 0 means we
81 * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get()
82 * checks this field and returns the node with the appropriate lock held.
83 *
84 * If, after traversing the btree, the insertion code discovers it has to split
85 * then it must restart from the root and take new locks - to do this it changes
86 * the lock field and returns -EINTR, which causes the btree_root() macro to
87 * loop.
88 *
89 * Handling cache misses require a different mechanism for upgrading to a write
90 * lock. We do cache lookups with only a read lock held, but if we get a cache
91 * miss and we wish to insert this data into the cache, we have to insert a
92 * placeholder key to detect races - otherwise, we could race with a write and
93 * overwrite the data that was just written to the cache with stale data from
94 * the backing device.
95 *
96 * For this we use a sequence number that write locks and unlocks increment - to
97 * insert the check key it unlocks the btree node and then takes a write lock,
98 * and fails if the sequence number doesn't match.
99 */
100
101#include "bset.h"
102#include "debug.h"
103
104struct btree_write {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700105 atomic_t *journal;
106
107 /* If btree_split() frees a btree node, it writes a new pointer to that
108 * btree node indicating it was freed; it takes a refcount on
109 * c->prio_blocked because we can't write the gens until the new
110 * pointer is on disk. This allows btree_write_endio() to release the
111 * refcount that btree_split() took.
112 */
113 int prio_blocked;
114};
115
116struct btree {
117 /* Hottest entries first */
118 struct hlist_node hash;
119
120 /* Key/pointer for this btree node */
121 BKEY_PADDED(key);
122
123 /* Single bit - set when accessed, cleared by shrinker */
124 unsigned long accessed;
125 unsigned long seq;
126 struct rw_semaphore lock;
127 struct cache_set *c;
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700128 struct btree *parent;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700129
Kent Overstreet2a285682014-03-04 16:42:42 -0800130 struct mutex write_lock;
131
Kent Overstreetcafe5632013-03-23 16:11:31 -0700132 unsigned long flags;
133 uint16_t written; /* would be nice to kill */
134 uint8_t level;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700135
Kent Overstreeta85e9682013-12-20 17:28:16 -0800136 struct btree_keys keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700137
Kent Overstreet57943512013-04-25 13:58:35 -0700138 /* For outstanding btree writes, used as a lock - protects write_idx */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800139 struct closure io;
140 struct semaphore io_mutex;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700141
Kent Overstreetcafe5632013-03-23 16:11:31 -0700142 struct list_head list;
143 struct delayed_work work;
144
Kent Overstreetcafe5632013-03-23 16:11:31 -0700145 struct btree_write writes[2];
146 struct bio *bio;
147};
148
149#define BTREE_FLAG(flag) \
150static inline bool btree_node_ ## flag(struct btree *b) \
151{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
152 \
153static inline void set_btree_node_ ## flag(struct btree *b) \
154{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
155
156enum btree_flags {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700157 BTREE_NODE_io_error,
158 BTREE_NODE_dirty,
159 BTREE_NODE_write_idx,
160};
161
Kent Overstreetcafe5632013-03-23 16:11:31 -0700162BTREE_FLAG(io_error);
163BTREE_FLAG(dirty);
164BTREE_FLAG(write_idx);
165
166static inline struct btree_write *btree_current_write(struct btree *b)
167{
168 return b->writes + btree_node_write_idx(b);
169}
170
171static inline struct btree_write *btree_prev_write(struct btree *b)
172{
173 return b->writes + (btree_node_write_idx(b) ^ 1);
174}
175
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800176static inline struct bset *btree_bset_first(struct btree *b)
177{
Kent Overstreeta85e9682013-12-20 17:28:16 -0800178 return b->keys.set->data;
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800179}
180
Kent Overstreetee811282013-12-17 23:49:49 -0800181static inline struct bset *btree_bset_last(struct btree *b)
182{
Kent Overstreeta85e9682013-12-20 17:28:16 -0800183 return bset_tree_last(&b->keys)->data;
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800184}
185
186static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
187{
Kent Overstreeta85e9682013-12-20 17:28:16 -0800188 return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700189}
190
191static inline void set_gc_sectors(struct cache_set *c)
192{
Kent Overstreeta1f03582013-09-10 19:07:00 -0700193 atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700194}
195
Kent Overstreet3a3b6a42013-07-24 16:46:42 -0700196void bkey_put(struct cache_set *c, struct bkey *k);
Kent Overstreete7c590e2013-09-10 18:39:16 -0700197
Kent Overstreetcafe5632013-03-23 16:11:31 -0700198/* Looping macros */
199
200#define for_each_cached_btree(b, c, iter) \
201 for (iter = 0; \
202 iter < ARRAY_SIZE((c)->bucket_hash); \
203 iter++) \
204 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
205
Kent Overstreetcafe5632013-03-23 16:11:31 -0700206/* Recursing down the btree */
207
208struct btree_op {
Kent Overstreet78365412013-12-17 01:29:34 -0800209 /* for waiting on btree reserve in btree_split() */
210 wait_queue_t wait;
211
Kent Overstreetcafe5632013-03-23 16:11:31 -0700212 /* Btree level at which we start taking write locks */
213 short lock;
214
Kent Overstreetcafe5632013-03-23 16:11:31 -0700215 unsigned insert_collision:1;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700216};
217
Kent Overstreetb54d6932013-07-24 18:04:18 -0700218static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
219{
220 memset(op, 0, sizeof(struct btree_op));
Kent Overstreet78365412013-12-17 01:29:34 -0800221 init_wait(&op->wait);
Kent Overstreetb54d6932013-07-24 18:04:18 -0700222 op->lock = write_lock_level;
223}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700224
225static inline void rw_lock(bool w, struct btree *b, int level)
226{
227 w ? down_write_nested(&b->lock, level + 1)
228 : down_read_nested(&b->lock, level + 1);
229 if (w)
230 b->seq++;
231}
232
233static inline void rw_unlock(bool w, struct btree *b)
234{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700235 if (w)
236 b->seq++;
237 (w ? up_write : up_read)(&b->lock);
238}
239
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800240void bch_btree_node_read_done(struct btree *);
Kent Overstreet2a285682014-03-04 16:42:42 -0800241void __bch_btree_node_write(struct btree *, struct closure *);
Kent Overstreet57943512013-04-25 13:58:35 -0700242void bch_btree_node_write(struct btree *, struct closure *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700243
Kent Overstreetcafe5632013-03-23 16:11:31 -0700244void bch_btree_set_root(struct btree *);
Slava Pestovc5aa4a32014-04-21 18:23:12 -0700245struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
Slava Pestov2452cc82014-07-12 00:22:53 -0700246 int, bool, struct btree *);
Kent Overstreet0a63b662014-03-17 17:15:53 -0700247struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
Slava Pestov2452cc82014-07-12 00:22:53 -0700248 struct bkey *, int, bool, struct btree *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700249
Kent Overstreete7c590e2013-09-10 18:39:16 -0700250int bch_btree_insert_check_key(struct btree *, struct btree_op *,
251 struct bkey *);
Kent Overstreetcc7b8812013-07-24 18:07:22 -0700252int bch_btree_insert(struct cache_set *, struct keylist *,
253 atomic_t *, struct bkey *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700254
Kent Overstreet72a44512013-10-24 17:19:26 -0700255int bch_gc_thread_start(struct cache_set *);
Kent Overstreet2531d9ee2014-03-17 16:55:55 -0700256void bch_initial_gc_finish(struct cache_set *);
Kent Overstreet72a44512013-10-24 17:19:26 -0700257void bch_moving_gc(struct cache_set *);
Kent Overstreetc18536a2013-07-24 17:44:17 -0700258int bch_btree_check(struct cache_set *);
Kent Overstreet487dded2014-03-17 15:13:26 -0700259void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700260
Kent Overstreet72a44512013-10-24 17:19:26 -0700261static inline void wake_up_gc(struct cache_set *c)
262{
Kent Overstreet8629aed2016-10-26 20:31:17 -0700263 wake_up(&c->gc_wait);
Kent Overstreet72a44512013-10-24 17:19:26 -0700264}
265
Kent Overstreet48dad8b2013-09-10 18:48:51 -0700266#define MAP_DONE 0
267#define MAP_CONTINUE 1
268
269#define MAP_ALL_NODES 0
270#define MAP_LEAF_NODES 1
271
272#define MAP_END_KEY 1
273
274typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
275int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
276 struct bkey *, btree_map_nodes_fn *, int);
277
278static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
279 struct bkey *from, btree_map_nodes_fn *fn)
280{
281 return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
282}
283
284static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
285 struct cache_set *c,
286 struct bkey *from,
287 btree_map_nodes_fn *fn)
288{
289 return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
290}
291
292typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
293 struct bkey *);
294int bch_btree_map_keys(struct btree_op *, struct cache_set *,
295 struct bkey *, btree_map_keys_fn *, int);
296
297typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
298
Kent Overstreet72c27062013-06-05 06:24:39 -0700299void bch_keybuf_init(struct keybuf *);
Kent Overstreet48dad8b2013-09-10 18:48:51 -0700300void bch_refill_keybuf(struct cache_set *, struct keybuf *,
301 struct bkey *, keybuf_pred_fn *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700302bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
303 struct bkey *);
304void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
305struct keybuf_key *bch_keybuf_next(struct keybuf *);
Kent Overstreet72c27062013-06-05 06:24:39 -0700306struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
307 struct bkey *, keybuf_pred_fn *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700308
309#endif