blob: 101231f0f3992bb9a62a5a1582188fe9031cfeb8 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070026#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070027
28#include <linux/slab.h>
29#include <linux/bitops.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070030#include <linux/freezer.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070031#include <linux/hash.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070032#include <linux/kthread.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010033#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070034#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <trace/events/bcache.h>
37
38/*
39 * Todo:
40 * register_bcache: Return errors out to userspace correctly
41 *
42 * Writeback: don't undirty key until after a cache flush
43 *
44 * Create an iterator for key pointers
45 *
46 * On btree write error, mark bucket such that it won't be freed from the cache
47 *
48 * Journalling:
49 * Check for bad keys in replay
50 * Propagate barriers
51 * Refcount journal entries in journal_replay
52 *
53 * Garbage collection:
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
56 *
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
60 *
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
63 * from being starved
64 *
65 * Add a tracepoint or somesuch to watch for writeback starvation
66 *
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious.
70 *
71 * Make sure all allocations get charged to the root cgroup
72 *
73 * Plugging?
74 *
75 * If data write is less than hard sector size of ssd, round up offset in open
76 * bucket to the next whole sector
77 *
78 * Also lookup by cgroup in get_open_bucket()
79 *
80 * Superblock needs to be fleshed out for multiple cache devices
81 *
82 * Add a sysfs tunable for the number of writeback IOs in flight
83 *
84 * Add a sysfs tunable for the number of open data buckets
85 *
86 * IO tracking: Can we track when one process is doing io on behalf of another?
87 * IO tracking: Don't use just an average, weigh more recent stuff higher
88 *
89 * Test module load/unload
90 */
91
Kent Overstreetdf8e8972013-07-24 17:37:59 -070092enum {
93 BTREE_INSERT_STATUS_INSERT,
94 BTREE_INSERT_STATUS_BACK_MERGE,
95 BTREE_INSERT_STATUS_OVERWROTE,
96 BTREE_INSERT_STATUS_FRONT_MERGE,
97};
98
Kent Overstreetcafe5632013-03-23 16:11:31 -070099#define MAX_NEED_GC 64
100#define MAX_SAVE_PRIO 72
101
102#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
103
104#define PTR_HASH(c, k) \
105 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
106
Kent Overstreetcafe5632013-03-23 16:11:31 -0700107static struct workqueue_struct *btree_io_wq;
108
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700109static inline bool should_split(struct btree *b)
110{
111 struct bset *i = write_block(b);
112 return b->written >= btree_blocks(b) ||
113 (b->written + __set_blocks(i, i->keys + 15, b->c)
114 > btree_blocks(b));
115}
116
117#define insert_lock(s, b) ((b)->level <= (s)->lock)
118
119/*
120 * These macros are for recursing down the btree - they handle the details of
121 * locking and looking up nodes in the cache for you. They're best treated as
122 * mere syntax when reading code that uses them.
123 *
124 * op->lock determines whether we take a read or a write lock at a given depth.
125 * If you've got a read lock and find that you need a write lock (i.e. you're
126 * going to have to split), set op->lock and return -EINTR; btree_root() will
127 * call you again and you'll have the correct lock.
128 */
129
130/**
131 * btree - recurse down the btree on a specified key
132 * @fn: function to call, which will be passed the child node
133 * @key: key to recurse on
134 * @b: parent btree node
135 * @op: pointer to struct btree_op
136 */
137#define btree(fn, key, b, op, ...) \
138({ \
139 int _r, l = (b)->level - 1; \
140 bool _w = l <= (op)->lock; \
141 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
142 if (!IS_ERR(_child)) { \
143 _child->parent = (b); \
144 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
145 rw_unlock(_w, _child); \
146 } else \
147 _r = PTR_ERR(_child); \
148 _r; \
149})
150
151/**
152 * btree_root - call a function on the root of the btree
153 * @fn: function to call, which will be passed the child node
154 * @c: cache set
155 * @op: pointer to struct btree_op
156 */
157#define btree_root(fn, c, op, ...) \
158({ \
159 int _r = -EINTR; \
160 do { \
161 struct btree *_b = (c)->root; \
162 bool _w = insert_lock(op, _b); \
163 rw_lock(_w, _b, _b->level); \
164 if (_b == (c)->root && \
165 _w == insert_lock(op, _b)) { \
166 _b->parent = NULL; \
167 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
168 } \
169 rw_unlock(_w, _b); \
170 bch_cannibalize_unlock(c); \
171 if (_r == -ENOSPC) { \
172 wait_event((c)->try_wait, \
173 !(c)->try_harder); \
174 _r = -EINTR; \
175 } \
176 } while (_r == -EINTR); \
177 \
178 _r; \
179})
180
Kent Overstreetcafe5632013-03-23 16:11:31 -0700181/* Btree key manipulation */
182
Kent Overstreet3a3b6a42013-07-24 16:46:42 -0700183void bkey_put(struct cache_set *c, struct bkey *k)
Kent Overstreete7c590e2013-09-10 18:39:16 -0700184{
185 unsigned i;
186
187 for (i = 0; i < KEY_PTRS(k); i++)
188 if (ptr_available(c, k, i))
189 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
190}
191
Kent Overstreetcafe5632013-03-23 16:11:31 -0700192/* Btree IO */
193
194static uint64_t btree_csum_set(struct btree *b, struct bset *i)
195{
196 uint64_t crc = b->key.ptr[0];
197 void *data = (void *) i + 8, *end = end(i);
198
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600199 crc = bch_crc64_update(crc, data, end - data);
Kent Overstreetc19ed232013-03-26 13:49:02 -0700200 return crc ^ 0xffffffffffffffffULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700201}
202
Kent Overstreetf3059a52013-05-15 17:13:45 -0700203static void bch_btree_node_read_done(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700204{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700205 const char *err = "bad btree header";
Kent Overstreet57943512013-04-25 13:58:35 -0700206 struct bset *i = b->sets[0].data;
207 struct btree_iter *iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700208
Kent Overstreet57943512013-04-25 13:58:35 -0700209 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
210 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700211 iter->used = 0;
212
Kent Overstreet280481d2013-10-24 16:36:03 -0700213#ifdef CONFIG_BCACHE_DEBUG
214 iter->b = b;
215#endif
216
Kent Overstreet57943512013-04-25 13:58:35 -0700217 if (!i->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700218 goto err;
219
220 for (;
221 b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
222 i = write_block(b)) {
223 err = "unsupported bset version";
224 if (i->version > BCACHE_BSET_VERSION)
225 goto err;
226
227 err = "bad btree header";
228 if (b->written + set_blocks(i, b->c) > btree_blocks(b))
229 goto err;
230
231 err = "bad magic";
Kent Overstreet81ab4192013-10-31 15:46:42 -0700232 if (i->magic != bset_magic(&b->c->sb))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700233 goto err;
234
235 err = "bad checksum";
236 switch (i->version) {
237 case 0:
238 if (i->csum != csum_set(i))
239 goto err;
240 break;
241 case BCACHE_BSET_VERSION:
242 if (i->csum != btree_csum_set(b, i))
243 goto err;
244 break;
245 }
246
247 err = "empty set";
248 if (i != b->sets[0].data && !i->keys)
249 goto err;
250
251 bch_btree_iter_push(iter, i->start, end(i));
252
253 b->written += set_blocks(i, b->c);
254 }
255
256 err = "corrupted btree";
257 for (i = write_block(b);
258 index(i, b) < btree_blocks(b);
259 i = ((void *) i) + block_bytes(b->c))
260 if (i->seq == b->sets[0].data->seq)
261 goto err;
262
263 bch_btree_sort_and_fix_extents(b, iter);
264
265 i = b->sets[0].data;
266 err = "short btree key";
267 if (b->sets[0].size &&
268 bkey_cmp(&b->key, &b->sets[0].end) < 0)
269 goto err;
270
271 if (b->written < btree_blocks(b))
272 bch_bset_init_next(b);
273out:
Kent Overstreet57943512013-04-25 13:58:35 -0700274 mempool_free(iter, b->c->fill_iter);
275 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700276err:
277 set_btree_node_io_error(b);
Kent Overstreet07e86cc2013-03-25 11:46:43 -0700278 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
Kent Overstreetcafe5632013-03-23 16:11:31 -0700279 err, PTR_BUCKET_NR(b->c, &b->key, 0),
280 index(i, b), i->keys);
281 goto out;
282}
283
Kent Overstreet57943512013-04-25 13:58:35 -0700284static void btree_node_read_endio(struct bio *bio, int error)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700285{
Kent Overstreet57943512013-04-25 13:58:35 -0700286 struct closure *cl = bio->bi_private;
287 closure_put(cl);
288}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700289
Kent Overstreet57943512013-04-25 13:58:35 -0700290void bch_btree_node_read(struct btree *b)
291{
292 uint64_t start_time = local_clock();
293 struct closure cl;
294 struct bio *bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700295
Kent Overstreetc37511b2013-04-26 15:39:55 -0700296 trace_bcache_btree_read(b);
297
Kent Overstreet57943512013-04-25 13:58:35 -0700298 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700299
Kent Overstreet57943512013-04-25 13:58:35 -0700300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700302 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
Kent Overstreet57943512013-04-25 13:58:35 -0700303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl;
305
306 bch_bio_map(bio, b->sets[0].data);
307
Kent Overstreet57943512013-04-25 13:58:35 -0700308 bch_submit_bbio(bio, b->c, &b->key, 0);
309 closure_sync(&cl);
310
311 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
312 set_btree_node_io_error(b);
313
314 bch_bbio_free(bio, b->c);
315
316 if (btree_node_io_error(b))
317 goto err;
318
319 bch_btree_node_read_done(b);
Kent Overstreet57943512013-04-25 13:58:35 -0700320 bch_time_stats_update(&b->c->btree_read_time, start_time);
Kent Overstreet57943512013-04-25 13:58:35 -0700321
322 return;
323err:
Geert Uytterhoeven61cbd252013-09-23 23:17:30 -0700324 bch_cache_set_error(b->c, "io error reading bucket %zu",
Kent Overstreet57943512013-04-25 13:58:35 -0700325 PTR_BUCKET_NR(b->c, &b->key, 0));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700326}
327
328static void btree_complete_write(struct btree *b, struct btree_write *w)
329{
330 if (w->prio_blocked &&
331 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700332 wake_up_allocators(b->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700333
334 if (w->journal) {
335 atomic_dec_bug(w->journal);
336 __closure_wake_up(&b->c->journal.wait);
337 }
338
Kent Overstreetcafe5632013-03-23 16:11:31 -0700339 w->prio_blocked = 0;
340 w->journal = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700341}
342
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800343static void btree_node_write_unlock(struct closure *cl)
344{
345 struct btree *b = container_of(cl, struct btree, io);
346
347 up(&b->io_mutex);
348}
349
Kent Overstreet57943512013-04-25 13:58:35 -0700350static void __btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700351{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800352 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700353 struct btree_write *w = btree_prev_write(b);
354
355 bch_bbio_free(b->bio, b->c);
356 b->bio = NULL;
357 btree_complete_write(b, w);
358
359 if (btree_node_dirty(b))
360 queue_delayed_work(btree_io_wq, &b->work,
361 msecs_to_jiffies(30000));
362
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800363 closure_return_with_destructor(cl, btree_node_write_unlock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700364}
365
Kent Overstreet57943512013-04-25 13:58:35 -0700366static void btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700367{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800368 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700369 struct bio_vec *bv;
370 int n;
371
Kent Overstreet79886132013-11-23 17:19:00 -0800372 bio_for_each_segment_all(bv, b->bio, n)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700373 __free_page(bv->bv_page);
374
Kent Overstreet57943512013-04-25 13:58:35 -0700375 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700376}
377
Kent Overstreet57943512013-04-25 13:58:35 -0700378static void btree_node_write_endio(struct bio *bio, int error)
379{
380 struct closure *cl = bio->bi_private;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800381 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreet57943512013-04-25 13:58:35 -0700382
383 if (error)
384 set_btree_node_io_error(b);
385
386 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
387 closure_put(cl);
388}
389
390static void do_btree_node_write(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700391{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800392 struct closure *cl = &b->io;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700393 struct bset *i = b->sets[b->nsets].data;
394 BKEY_PADDED(key) k;
395
396 i->version = BCACHE_BSET_VERSION;
397 i->csum = btree_csum_set(b, i);
398
Kent Overstreet57943512013-04-25 13:58:35 -0700399 BUG_ON(b->bio);
400 b->bio = bch_bbio_alloc(b->c);
401
402 b->bio->bi_end_io = btree_node_write_endio;
Kent Overstreetfaadf0c2013-11-01 18:03:08 -0700403 b->bio->bi_private = cl;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700404 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700405 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600406 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700407
Kent Overstreete49c7c32013-06-26 17:25:38 -0700408 /*
409 * If we're appending to a leaf node, we don't technically need FUA -
410 * this write just needs to be persisted before the next journal write,
411 * which will be marked FLUSH|FUA.
412 *
413 * Similarly if we're writing a new btree root - the pointer is going to
414 * be in the next journal entry.
415 *
416 * But if we're writing a new btree node (that isn't a root) or
417 * appending to a non leaf btree node, we need either FUA or a flush
418 * when we write the parent with the new pointer. FUA is cheaper than a
419 * flush, and writes appending to leaf nodes aren't blocking anything so
420 * just make all btree node writes FUA to keep things sane.
421 */
422
Kent Overstreetcafe5632013-03-23 16:11:31 -0700423 bkey_copy(&k.key, &b->key);
424 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
425
Kent Overstreet8e51e412013-06-06 18:15:57 -0700426 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700427 int j;
428 struct bio_vec *bv;
429 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
430
Kent Overstreet79886132013-11-23 17:19:00 -0800431 bio_for_each_segment_all(bv, b->bio, j)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700432 memcpy(page_address(bv->bv_page),
433 base + j * PAGE_SIZE, PAGE_SIZE);
434
Kent Overstreetcafe5632013-03-23 16:11:31 -0700435 bch_submit_bbio(b->bio, b->c, &k.key, 0);
436
Kent Overstreet57943512013-04-25 13:58:35 -0700437 continue_at(cl, btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700438 } else {
439 b->bio->bi_vcnt = 0;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600440 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700441
Kent Overstreetcafe5632013-03-23 16:11:31 -0700442 bch_submit_bbio(b->bio, b->c, &k.key, 0);
443
444 closure_sync(cl);
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800445 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700446 }
447}
448
Kent Overstreet57943512013-04-25 13:58:35 -0700449void bch_btree_node_write(struct btree *b, struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700450{
451 struct bset *i = b->sets[b->nsets].data;
452
Kent Overstreetc37511b2013-04-26 15:39:55 -0700453 trace_bcache_btree_write(b);
454
Kent Overstreetcafe5632013-03-23 16:11:31 -0700455 BUG_ON(current->bio_list);
Kent Overstreet57943512013-04-25 13:58:35 -0700456 BUG_ON(b->written >= btree_blocks(b));
457 BUG_ON(b->written && !i->keys);
458 BUG_ON(b->sets->data->seq != i->seq);
Kent Overstreet280481d2013-10-24 16:36:03 -0700459 bch_check_keys(b, "writing");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700460
Kent Overstreetcafe5632013-03-23 16:11:31 -0700461 cancel_delayed_work(&b->work);
462
Kent Overstreet57943512013-04-25 13:58:35 -0700463 /* If caller isn't waiting for write, parent refcount is cache set */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800464 down(&b->io_mutex);
465 closure_init(&b->io, parent ?: &b->c->cl);
Kent Overstreet57943512013-04-25 13:58:35 -0700466
Kent Overstreetcafe5632013-03-23 16:11:31 -0700467 clear_bit(BTREE_NODE_dirty, &b->flags);
468 change_bit(BTREE_NODE_write_idx, &b->flags);
469
Kent Overstreet57943512013-04-25 13:58:35 -0700470 do_btree_node_write(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700471
Kent Overstreetcafe5632013-03-23 16:11:31 -0700472 b->written += set_blocks(i, b->c);
473 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
474 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
475
476 bch_btree_sort_lazy(b);
477
478 if (b->written < btree_blocks(b))
479 bch_bset_init_next(b);
480}
481
Kent Overstreetf269af52013-07-23 20:48:29 -0700482static void bch_btree_node_write_sync(struct btree *b)
483{
484 struct closure cl;
485
486 closure_init_stack(&cl);
487 bch_btree_node_write(b, &cl);
488 closure_sync(&cl);
489}
490
Kent Overstreet57943512013-04-25 13:58:35 -0700491static void btree_node_write_work(struct work_struct *w)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700492{
493 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
494
Kent Overstreet57943512013-04-25 13:58:35 -0700495 rw_lock(true, b, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700496
497 if (btree_node_dirty(b))
Kent Overstreet57943512013-04-25 13:58:35 -0700498 bch_btree_node_write(b, NULL);
499 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700500}
501
Kent Overstreetc18536a2013-07-24 17:44:17 -0700502static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700503{
504 struct bset *i = b->sets[b->nsets].data;
505 struct btree_write *w = btree_current_write(b);
506
Kent Overstreet57943512013-04-25 13:58:35 -0700507 BUG_ON(!b->written);
508 BUG_ON(!i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700509
Kent Overstreet57943512013-04-25 13:58:35 -0700510 if (!btree_node_dirty(b))
511 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700512
Kent Overstreet57943512013-04-25 13:58:35 -0700513 set_btree_node_dirty(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700514
Kent Overstreetc18536a2013-07-24 17:44:17 -0700515 if (journal_ref) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700516 if (w->journal &&
Kent Overstreetc18536a2013-07-24 17:44:17 -0700517 journal_pin_cmp(b->c, w->journal, journal_ref)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700518 atomic_dec_bug(w->journal);
519 w->journal = NULL;
520 }
521
522 if (!w->journal) {
Kent Overstreetc18536a2013-07-24 17:44:17 -0700523 w->journal = journal_ref;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700524 atomic_inc(w->journal);
525 }
526 }
527
Kent Overstreetcafe5632013-03-23 16:11:31 -0700528 /* Force write if set is too big */
Kent Overstreet57943512013-04-25 13:58:35 -0700529 if (set_bytes(i) > PAGE_SIZE - 48 &&
530 !current->bio_list)
531 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700532}
533
534/*
535 * Btree in memory cache - allocation/freeing
536 * mca -> memory cache
537 */
538
539static void mca_reinit(struct btree *b)
540{
541 unsigned i;
542
543 b->flags = 0;
544 b->written = 0;
545 b->nsets = 0;
546
547 for (i = 0; i < MAX_BSETS; i++)
548 b->sets[i].size = 0;
549 /*
550 * Second loop starts at 1 because b->sets[0]->data is the memory we
551 * allocated
552 */
553 for (i = 1; i < MAX_BSETS; i++)
554 b->sets[i].data = NULL;
555}
556
557#define mca_reserve(c) (((c->root && c->root->level) \
558 ? c->root->level : 1) * 8 + 16)
559#define mca_can_free(c) \
560 max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
561
562static void mca_data_free(struct btree *b)
563{
564 struct bset_tree *t = b->sets;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800565
566 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700567
568 if (bset_prev_bytes(b) < PAGE_SIZE)
569 kfree(t->prev);
570 else
571 free_pages((unsigned long) t->prev,
572 get_order(bset_prev_bytes(b)));
573
574 if (bset_tree_bytes(b) < PAGE_SIZE)
575 kfree(t->tree);
576 else
577 free_pages((unsigned long) t->tree,
578 get_order(bset_tree_bytes(b)));
579
580 free_pages((unsigned long) t->data, b->page_order);
581
582 t->prev = NULL;
583 t->tree = NULL;
584 t->data = NULL;
585 list_move(&b->list, &b->c->btree_cache_freed);
586 b->c->bucket_cache_used--;
587}
588
589static void mca_bucket_free(struct btree *b)
590{
591 BUG_ON(btree_node_dirty(b));
592
593 b->key.ptr[0] = 0;
594 hlist_del_init_rcu(&b->hash);
595 list_move(&b->list, &b->c->btree_cache_freeable);
596}
597
598static unsigned btree_order(struct bkey *k)
599{
600 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
601}
602
603static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
604{
605 struct bset_tree *t = b->sets;
606 BUG_ON(t->data);
607
608 b->page_order = max_t(unsigned,
609 ilog2(b->c->btree_pages),
610 btree_order(k));
611
612 t->data = (void *) __get_free_pages(gfp, b->page_order);
613 if (!t->data)
614 goto err;
615
616 t->tree = bset_tree_bytes(b) < PAGE_SIZE
617 ? kmalloc(bset_tree_bytes(b), gfp)
618 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
619 if (!t->tree)
620 goto err;
621
622 t->prev = bset_prev_bytes(b) < PAGE_SIZE
623 ? kmalloc(bset_prev_bytes(b), gfp)
624 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
625 if (!t->prev)
626 goto err;
627
628 list_move(&b->list, &b->c->btree_cache);
629 b->c->bucket_cache_used++;
630 return;
631err:
632 mca_data_free(b);
633}
634
635static struct btree *mca_bucket_alloc(struct cache_set *c,
636 struct bkey *k, gfp_t gfp)
637{
638 struct btree *b = kzalloc(sizeof(struct btree), gfp);
639 if (!b)
640 return NULL;
641
642 init_rwsem(&b->lock);
643 lockdep_set_novalidate_class(&b->lock);
644 INIT_LIST_HEAD(&b->list);
Kent Overstreet57943512013-04-25 13:58:35 -0700645 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700646 b->c = c;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800647 sema_init(&b->io_mutex, 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700648
649 mca_data_alloc(b, k, gfp);
650 return b;
651}
652
Kent Overstreete8e1d462013-07-24 17:27:07 -0700653static int mca_reap(struct btree *b, unsigned min_order, bool flush)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700654{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700655 struct closure cl;
656
657 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700658 lockdep_assert_held(&b->c->bucket_lock);
659
660 if (!down_write_trylock(&b->lock))
661 return -ENOMEM;
662
Kent Overstreete8e1d462013-07-24 17:27:07 -0700663 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
664
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800665 if (b->page_order < min_order)
666 goto out_unlock;
667
668 if (!flush) {
669 if (btree_node_dirty(b))
670 goto out_unlock;
671
672 if (down_trylock(&b->io_mutex))
673 goto out_unlock;
674 up(&b->io_mutex);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700675 }
676
Kent Overstreetf269af52013-07-23 20:48:29 -0700677 if (btree_node_dirty(b))
678 bch_btree_node_write_sync(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700679
Kent Overstreete8e1d462013-07-24 17:27:07 -0700680 /* wait for any in flight btree write */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800681 down(&b->io_mutex);
682 up(&b->io_mutex);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700683
Kent Overstreetcafe5632013-03-23 16:11:31 -0700684 return 0;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800685out_unlock:
686 rw_unlock(true, b);
687 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700688}
689
Dave Chinner7dc19d52013-08-28 10:18:11 +1000690static unsigned long bch_mca_scan(struct shrinker *shrink,
691 struct shrink_control *sc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700692{
693 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
694 struct btree *b, *t;
695 unsigned long i, nr = sc->nr_to_scan;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000696 unsigned long freed = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700697
698 if (c->shrinker_disabled)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000699 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700700
701 if (c->try_harder)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000702 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700703
704 /* Return -1 if we can't do anything right now */
Kent Overstreeta698e082013-09-23 23:17:34 -0700705 if (sc->gfp_mask & __GFP_IO)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700706 mutex_lock(&c->bucket_lock);
707 else if (!mutex_trylock(&c->bucket_lock))
708 return -1;
709
Kent Overstreet36c9ea92013-06-03 13:04:56 -0700710 /*
711 * It's _really_ critical that we don't free too many btree nodes - we
712 * have to always leave ourselves a reserve. The reserve is how we
713 * guarantee that allocating memory for a new btree node can always
714 * succeed, so that inserting keys into the btree can always succeed and
715 * IO can always make forward progress:
716 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700717 nr /= c->btree_pages;
718 nr = min_t(unsigned long, nr, mca_can_free(c));
719
720 i = 0;
721 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
Dave Chinner7dc19d52013-08-28 10:18:11 +1000722 if (freed >= nr)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700723 break;
724
725 if (++i > 3 &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700726 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700727 mca_data_free(b);
728 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000729 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700730 }
731 }
732
Dave Chinner7dc19d52013-08-28 10:18:11 +1000733 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
Kent Overstreetb0f32a52013-12-10 13:24:26 -0800734 if (list_empty(&c->btree_cache))
735 goto out;
736
Kent Overstreetcafe5632013-03-23 16:11:31 -0700737 b = list_first_entry(&c->btree_cache, struct btree, list);
738 list_rotate_left(&c->btree_cache);
739
740 if (!b->accessed &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700741 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700742 mca_bucket_free(b);
743 mca_data_free(b);
744 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000745 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700746 } else
747 b->accessed = 0;
748 }
749out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700750 mutex_unlock(&c->bucket_lock);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000751 return freed;
752}
753
754static unsigned long bch_mca_count(struct shrinker *shrink,
755 struct shrink_control *sc)
756{
757 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
758
759 if (c->shrinker_disabled)
760 return 0;
761
762 if (c->try_harder)
763 return 0;
764
765 return mca_can_free(c) * c->btree_pages;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700766}
767
768void bch_btree_cache_free(struct cache_set *c)
769{
770 struct btree *b;
771 struct closure cl;
772 closure_init_stack(&cl);
773
774 if (c->shrink.list.next)
775 unregister_shrinker(&c->shrink);
776
777 mutex_lock(&c->bucket_lock);
778
779#ifdef CONFIG_BCACHE_DEBUG
780 if (c->verify_data)
781 list_move(&c->verify_data->list, &c->btree_cache);
782#endif
783
784 list_splice(&c->btree_cache_freeable,
785 &c->btree_cache);
786
787 while (!list_empty(&c->btree_cache)) {
788 b = list_first_entry(&c->btree_cache, struct btree, list);
789
790 if (btree_node_dirty(b))
791 btree_complete_write(b, btree_current_write(b));
792 clear_bit(BTREE_NODE_dirty, &b->flags);
793
794 mca_data_free(b);
795 }
796
797 while (!list_empty(&c->btree_cache_freed)) {
798 b = list_first_entry(&c->btree_cache_freed,
799 struct btree, list);
800 list_del(&b->list);
801 cancel_delayed_work_sync(&b->work);
802 kfree(b);
803 }
804
805 mutex_unlock(&c->bucket_lock);
806}
807
808int bch_btree_cache_alloc(struct cache_set *c)
809{
810 unsigned i;
811
Kent Overstreetcafe5632013-03-23 16:11:31 -0700812 for (i = 0; i < mca_reserve(c); i++)
Kent Overstreet72a44512013-10-24 17:19:26 -0700813 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
814 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700815
816 list_splice_init(&c->btree_cache,
817 &c->btree_cache_freeable);
818
819#ifdef CONFIG_BCACHE_DEBUG
820 mutex_init(&c->verify_lock);
821
822 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
823
824 if (c->verify_data &&
825 c->verify_data->sets[0].data)
826 list_del_init(&c->verify_data->list);
827 else
828 c->verify_data = NULL;
829#endif
830
Dave Chinner7dc19d52013-08-28 10:18:11 +1000831 c->shrink.count_objects = bch_mca_count;
832 c->shrink.scan_objects = bch_mca_scan;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700833 c->shrink.seeks = 4;
834 c->shrink.batch = c->btree_pages * 2;
835 register_shrinker(&c->shrink);
836
837 return 0;
838}
839
840/* Btree in memory cache - hash table */
841
842static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
843{
844 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
845}
846
847static struct btree *mca_find(struct cache_set *c, struct bkey *k)
848{
849 struct btree *b;
850
851 rcu_read_lock();
852 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
853 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
854 goto out;
855 b = NULL;
856out:
857 rcu_read_unlock();
858 return b;
859}
860
Kent Overstreete8e1d462013-07-24 17:27:07 -0700861static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700862{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700863 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700864
Kent Overstreetc37511b2013-04-26 15:39:55 -0700865 trace_bcache_btree_cache_cannibalize(c);
866
Kent Overstreete8e1d462013-07-24 17:27:07 -0700867 if (!c->try_harder) {
868 c->try_harder = current;
869 c->try_harder_start = local_clock();
870 } else if (c->try_harder != current)
871 return ERR_PTR(-ENOSPC);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700872
Kent Overstreete8e1d462013-07-24 17:27:07 -0700873 list_for_each_entry_reverse(b, &c->btree_cache, list)
874 if (!mca_reap(b, btree_order(k), false))
875 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700876
Kent Overstreete8e1d462013-07-24 17:27:07 -0700877 list_for_each_entry_reverse(b, &c->btree_cache, list)
878 if (!mca_reap(b, btree_order(k), true))
879 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700880
Kent Overstreete8e1d462013-07-24 17:27:07 -0700881 return ERR_PTR(-ENOMEM);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700882}
883
884/*
885 * We can only have one thread cannibalizing other cached btree nodes at a time,
886 * or we'll deadlock. We use an open coded mutex to ensure that, which a
887 * cannibalize_bucket() will take. This means every time we unlock the root of
888 * the btree, we need to release this lock if we have it held.
889 */
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700890static void bch_cannibalize_unlock(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700891{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700892 if (c->try_harder == current) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600893 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700894 c->try_harder = NULL;
Kent Overstreete8e1d462013-07-24 17:27:07 -0700895 wake_up(&c->try_wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700896 }
897}
898
Kent Overstreete8e1d462013-07-24 17:27:07 -0700899static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700900{
901 struct btree *b;
902
Kent Overstreete8e1d462013-07-24 17:27:07 -0700903 BUG_ON(current->bio_list);
904
Kent Overstreetcafe5632013-03-23 16:11:31 -0700905 lockdep_assert_held(&c->bucket_lock);
906
907 if (mca_find(c, k))
908 return NULL;
909
910 /* btree_free() doesn't free memory; it sticks the node on the end of
911 * the list. Check if there's any freed nodes there:
912 */
913 list_for_each_entry(b, &c->btree_cache_freeable, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700914 if (!mca_reap(b, btree_order(k), false))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700915 goto out;
916
917 /* We never free struct btree itself, just the memory that holds the on
918 * disk node. Check the freed list before allocating a new one:
919 */
920 list_for_each_entry(b, &c->btree_cache_freed, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700921 if (!mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700922 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
923 if (!b->sets[0].data)
924 goto err;
925 else
926 goto out;
927 }
928
929 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
930 if (!b)
931 goto err;
932
933 BUG_ON(!down_write_trylock(&b->lock));
934 if (!b->sets->data)
935 goto err;
936out:
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800937 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700938
939 bkey_copy(&b->key, k);
940 list_move(&b->list, &c->btree_cache);
941 hlist_del_init_rcu(&b->hash);
942 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
943
944 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
945 b->level = level;
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700946 b->parent = (void *) ~0UL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700947
948 mca_reinit(b);
949
950 return b;
951err:
952 if (b)
953 rw_unlock(true, b);
954
Kent Overstreete8e1d462013-07-24 17:27:07 -0700955 b = mca_cannibalize(c, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700956 if (!IS_ERR(b))
957 goto out;
958
959 return b;
960}
961
962/**
963 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
964 * in from disk if necessary.
965 *
Kent Overstreetb54d6932013-07-24 18:04:18 -0700966 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
Kent Overstreetcafe5632013-03-23 16:11:31 -0700967 *
968 * The btree node will have either a read or a write lock held, depending on
969 * level and op->lock.
970 */
971struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
Kent Overstreete8e1d462013-07-24 17:27:07 -0700972 int level, bool write)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700973{
974 int i = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700975 struct btree *b;
976
977 BUG_ON(level < 0);
978retry:
979 b = mca_find(c, k);
980
981 if (!b) {
Kent Overstreet57943512013-04-25 13:58:35 -0700982 if (current->bio_list)
983 return ERR_PTR(-EAGAIN);
984
Kent Overstreetcafe5632013-03-23 16:11:31 -0700985 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700986 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700987 mutex_unlock(&c->bucket_lock);
988
989 if (!b)
990 goto retry;
991 if (IS_ERR(b))
992 return b;
993
Kent Overstreet57943512013-04-25 13:58:35 -0700994 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700995
996 if (!write)
997 downgrade_write(&b->lock);
998 } else {
999 rw_lock(write, b, level);
1000 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1001 rw_unlock(write, b);
1002 goto retry;
1003 }
1004 BUG_ON(b->level != level);
1005 }
1006
1007 b->accessed = 1;
1008
1009 for (; i <= b->nsets && b->sets[i].size; i++) {
1010 prefetch(b->sets[i].tree);
1011 prefetch(b->sets[i].data);
1012 }
1013
1014 for (; i <= b->nsets; i++)
1015 prefetch(b->sets[i].data);
1016
Kent Overstreet57943512013-04-25 13:58:35 -07001017 if (btree_node_io_error(b)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001018 rw_unlock(write, b);
Kent Overstreet57943512013-04-25 13:58:35 -07001019 return ERR_PTR(-EIO);
1020 }
1021
1022 BUG_ON(!b->written);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001023
1024 return b;
1025}
1026
1027static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
1028{
1029 struct btree *b;
1030
1031 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001032 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001033 mutex_unlock(&c->bucket_lock);
1034
1035 if (!IS_ERR_OR_NULL(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -07001036 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001037 rw_unlock(true, b);
1038 }
1039}
1040
1041/* Btree alloc */
1042
Kent Overstreete8e1d462013-07-24 17:27:07 -07001043static void btree_node_free(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001044{
1045 unsigned i;
1046
Kent Overstreetc37511b2013-04-26 15:39:55 -07001047 trace_bcache_btree_node_free(b);
1048
Kent Overstreetcafe5632013-03-23 16:11:31 -07001049 BUG_ON(b == b->c->root);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001050
1051 if (btree_node_dirty(b))
1052 btree_complete_write(b, btree_current_write(b));
1053 clear_bit(BTREE_NODE_dirty, &b->flags);
1054
Kent Overstreetcafe5632013-03-23 16:11:31 -07001055 cancel_delayed_work(&b->work);
1056
1057 mutex_lock(&b->c->bucket_lock);
1058
1059 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1060 BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
1061
1062 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1063 PTR_BUCKET(b->c, &b->key, i));
1064 }
1065
1066 bch_bucket_free(b->c, &b->key);
1067 mca_bucket_free(b);
1068 mutex_unlock(&b->c->bucket_lock);
1069}
1070
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001071struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001072{
1073 BKEY_PADDED(key) k;
1074 struct btree *b = ERR_PTR(-EAGAIN);
1075
1076 mutex_lock(&c->bucket_lock);
1077retry:
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001078 if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001079 goto err;
1080
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07001081 bkey_put(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001082 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1083
Kent Overstreete8e1d462013-07-24 17:27:07 -07001084 b = mca_alloc(c, &k.key, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001085 if (IS_ERR(b))
1086 goto err_free;
1087
1088 if (!b) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001089 cache_bug(c,
1090 "Tried to allocate bucket that was in btree cache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001091 goto retry;
1092 }
1093
Kent Overstreetcafe5632013-03-23 16:11:31 -07001094 b->accessed = 1;
1095 bch_bset_init_next(b);
1096
1097 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001098
1099 trace_bcache_btree_node_alloc(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001100 return b;
1101err_free:
1102 bch_bucket_free(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001103err:
1104 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001105
1106 trace_bcache_btree_node_alloc_fail(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001107 return b;
1108}
1109
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001110static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001111{
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001112 struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001113 if (!IS_ERR_OR_NULL(n))
1114 bch_btree_sort_into(b, n);
1115
1116 return n;
1117}
1118
Kent Overstreet8835c122013-07-24 23:18:05 -07001119static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1120{
1121 unsigned i;
1122
1123 bkey_copy(k, &b->key);
1124 bkey_copy_key(k, &ZERO_KEY);
1125
1126 for (i = 0; i < KEY_PTRS(k); i++) {
1127 uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
1128
1129 SET_PTR_GEN(k, i, g);
1130 }
1131
1132 atomic_inc(&b->c->prio_blocked);
1133}
1134
Kent Overstreetcafe5632013-03-23 16:11:31 -07001135/* Garbage collection */
1136
1137uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1138{
1139 uint8_t stale = 0;
1140 unsigned i;
1141 struct bucket *g;
1142
1143 /*
1144 * ptr_invalid() can't return true for the keys that mark btree nodes as
1145 * freed, but since ptr_bad() returns true we'll never actually use them
1146 * for anything and thus we don't want mark their pointers here
1147 */
1148 if (!bkey_cmp(k, &ZERO_KEY))
1149 return stale;
1150
1151 for (i = 0; i < KEY_PTRS(k); i++) {
1152 if (!ptr_available(c, k, i))
1153 continue;
1154
1155 g = PTR_BUCKET(c, k, i);
1156
1157 if (gen_after(g->gc_gen, PTR_GEN(k, i)))
1158 g->gc_gen = PTR_GEN(k, i);
1159
1160 if (ptr_stale(c, k, i)) {
1161 stale = max(stale, ptr_stale(c, k, i));
1162 continue;
1163 }
1164
1165 cache_bug_on(GC_MARK(g) &&
1166 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1167 c, "inconsistent ptrs: mark = %llu, level = %i",
1168 GC_MARK(g), level);
1169
1170 if (level)
1171 SET_GC_MARK(g, GC_MARK_METADATA);
1172 else if (KEY_DIRTY(k))
1173 SET_GC_MARK(g, GC_MARK_DIRTY);
1174
1175 /* guard against overflow */
1176 SET_GC_SECTORS_USED(g, min_t(unsigned,
1177 GC_SECTORS_USED(g) + KEY_SIZE(k),
1178 (1 << 14) - 1));
1179
1180 BUG_ON(!GC_SECTORS_USED(g));
1181 }
1182
1183 return stale;
1184}
1185
1186#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1187
Kent Overstreeta1f03582013-09-10 19:07:00 -07001188static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001189{
1190 uint8_t stale = 0;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001191 unsigned keys = 0, good_keys = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001192 struct bkey *k;
1193 struct btree_iter iter;
1194 struct bset_tree *t;
1195
1196 gc->nodes++;
1197
1198 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001199 stale = max(stale, btree_mark_key(b, k));
Kent Overstreeta1f03582013-09-10 19:07:00 -07001200 keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001201
1202 if (bch_ptr_bad(b, k))
1203 continue;
1204
Kent Overstreetcafe5632013-03-23 16:11:31 -07001205 gc->key_bytes += bkey_u64s(k);
1206 gc->nkeys++;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001207 good_keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001208
1209 gc->data += KEY_SIZE(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001210 }
1211
1212 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
1213 btree_bug_on(t->size &&
1214 bset_written(b, t) &&
1215 bkey_cmp(&b->key, &t->end) < 0,
1216 b, "found short btree key in gc");
1217
Kent Overstreeta1f03582013-09-10 19:07:00 -07001218 if (b->c->gc_always_rewrite)
1219 return true;
1220
1221 if (stale > 10)
1222 return true;
1223
1224 if ((keys - good_keys) * 2 > keys)
1225 return true;
1226
1227 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001228}
1229
Kent Overstreeta1f03582013-09-10 19:07:00 -07001230#define GC_MERGE_NODES 4U
Kent Overstreetcafe5632013-03-23 16:11:31 -07001231
1232struct gc_merge_info {
1233 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001234 unsigned keys;
1235};
1236
Kent Overstreeta1f03582013-09-10 19:07:00 -07001237static int bch_btree_insert_node(struct btree *, struct btree_op *,
1238 struct keylist *, atomic_t *, struct bkey *);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001239
Kent Overstreeta1f03582013-09-10 19:07:00 -07001240static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1241 struct keylist *keylist, struct gc_stat *gc,
1242 struct gc_merge_info *r)
1243{
1244 unsigned i, nodes = 0, keys = 0, blocks;
1245 struct btree *new_nodes[GC_MERGE_NODES];
1246 struct closure cl;
1247 struct bkey *k;
1248
1249 memset(new_nodes, 0, sizeof(new_nodes));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001250 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001251
Kent Overstreeta1f03582013-09-10 19:07:00 -07001252 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001253 keys += r[nodes++].keys;
1254
1255 blocks = btree_default_blocks(b->c) * 2 / 3;
1256
1257 if (nodes < 2 ||
1258 __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001259 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001260
Kent Overstreeta1f03582013-09-10 19:07:00 -07001261 for (i = 0; i < nodes; i++) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001262 new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001263 if (IS_ERR_OR_NULL(new_nodes[i]))
1264 goto out_nocoalesce;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001265 }
1266
1267 for (i = nodes - 1; i > 0; --i) {
Kent Overstreeta1f03582013-09-10 19:07:00 -07001268 struct bset *n1 = new_nodes[i]->sets->data;
1269 struct bset *n2 = new_nodes[i - 1]->sets->data;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001270 struct bkey *k, *last = NULL;
1271
1272 keys = 0;
1273
Kent Overstreeta1f03582013-09-10 19:07:00 -07001274 if (i > 1) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001275 for (k = n2->start;
1276 k < end(n2);
1277 k = bkey_next(k)) {
1278 if (__set_blocks(n1, n1->keys + keys +
1279 bkey_u64s(k), b->c) > blocks)
1280 break;
1281
1282 last = k;
1283 keys += bkey_u64s(k);
1284 }
Kent Overstreeta1f03582013-09-10 19:07:00 -07001285 } else {
1286 /*
1287 * Last node we're not getting rid of - we're getting
1288 * rid of the node at r[0]. Have to try and fit all of
1289 * the remaining keys into this node; we can't ensure
1290 * they will always fit due to rounding and variable
1291 * length keys (shouldn't be possible in practice,
1292 * though)
1293 */
1294 if (__set_blocks(n1, n1->keys + n2->keys,
1295 b->c) > btree_blocks(new_nodes[i]))
1296 goto out_nocoalesce;
1297
1298 keys = n2->keys;
1299 /* Take the key of the node we're getting rid of */
1300 last = &r->b->key;
1301 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001302
1303 BUG_ON(__set_blocks(n1, n1->keys + keys,
Kent Overstreeta1f03582013-09-10 19:07:00 -07001304 b->c) > btree_blocks(new_nodes[i]));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001305
Kent Overstreeta1f03582013-09-10 19:07:00 -07001306 if (last)
1307 bkey_copy_key(&new_nodes[i]->key, last);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001308
1309 memcpy(end(n1),
1310 n2->start,
1311 (void *) node(n2, keys) - (void *) n2->start);
1312
1313 n1->keys += keys;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001314 r[i].keys = n1->keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001315
1316 memmove(n2->start,
1317 node(n2, keys),
1318 (void *) end(n2) - (void *) node(n2, keys));
1319
1320 n2->keys -= keys;
1321
Kent Overstreeta1f03582013-09-10 19:07:00 -07001322 if (bch_keylist_realloc(keylist,
1323 KEY_PTRS(&new_nodes[i]->key), b->c))
1324 goto out_nocoalesce;
1325
1326 bch_btree_node_write(new_nodes[i], &cl);
1327 bch_keylist_add(keylist, &new_nodes[i]->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001328 }
1329
Kent Overstreeta1f03582013-09-10 19:07:00 -07001330 for (i = 0; i < nodes; i++) {
1331 if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
1332 goto out_nocoalesce;
1333
1334 make_btree_freeing_key(r[i].b, keylist->top);
1335 bch_keylist_push(keylist);
1336 }
1337
1338 /* We emptied out this node */
1339 BUG_ON(new_nodes[0]->sets->data->keys);
1340 btree_node_free(new_nodes[0]);
1341 rw_unlock(true, new_nodes[0]);
1342
1343 closure_sync(&cl);
1344
1345 for (i = 0; i < nodes; i++) {
1346 btree_node_free(r[i].b);
1347 rw_unlock(true, r[i].b);
1348
1349 r[i].b = new_nodes[i];
1350 }
1351
1352 bch_btree_insert_node(b, op, keylist, NULL, NULL);
1353 BUG_ON(!bch_keylist_empty(keylist));
1354
1355 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1356 r[nodes - 1].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001357
Kent Overstreetc37511b2013-04-26 15:39:55 -07001358 trace_bcache_btree_gc_coalesce(nodes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001359 gc->nodes--;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001360
Kent Overstreeta1f03582013-09-10 19:07:00 -07001361 /* Invalidated our iterator */
1362 return -EINTR;
1363
1364out_nocoalesce:
1365 closure_sync(&cl);
1366
1367 while ((k = bch_keylist_pop(keylist)))
1368 if (!bkey_cmp(k, &ZERO_KEY))
1369 atomic_dec(&b->c->prio_blocked);
1370
1371 for (i = 0; i < nodes; i++)
1372 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1373 btree_node_free(new_nodes[i]);
1374 rw_unlock(true, new_nodes[i]);
1375 }
1376 return 0;
1377}
1378
1379static unsigned btree_gc_count_keys(struct btree *b)
1380{
1381 struct bkey *k;
1382 struct btree_iter iter;
1383 unsigned ret = 0;
1384
1385 for_each_key_filter(b, k, &iter, bch_ptr_bad)
1386 ret += bkey_u64s(k);
1387
1388 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001389}
1390
1391static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1392 struct closure *writes, struct gc_stat *gc)
1393{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001394 unsigned i;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001395 int ret = 0;
1396 bool should_rewrite;
1397 struct btree *n;
1398 struct bkey *k;
1399 struct keylist keys;
1400 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001401 struct gc_merge_info r[GC_MERGE_NODES];
Kent Overstreeta1f03582013-09-10 19:07:00 -07001402 struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001403
Kent Overstreeta1f03582013-09-10 19:07:00 -07001404 bch_keylist_init(&keys);
1405 bch_btree_iter_init(b, &iter, &b->c->gc_done);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001406
Kent Overstreeta1f03582013-09-10 19:07:00 -07001407 for (i = 0; i < GC_MERGE_NODES; i++)
1408 r[i].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001409
Kent Overstreeta1f03582013-09-10 19:07:00 -07001410 while (1) {
1411 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1412 if (k) {
1413 r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
1414 if (IS_ERR(r->b)) {
1415 ret = PTR_ERR(r->b);
1416 break;
1417 }
1418
1419 r->keys = btree_gc_count_keys(r->b);
1420
1421 ret = btree_gc_coalesce(b, op, &keys, gc, r);
1422 if (ret)
1423 break;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001424 }
1425
Kent Overstreeta1f03582013-09-10 19:07:00 -07001426 if (!last->b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001427 break;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001428
1429 if (!IS_ERR(last->b)) {
1430 should_rewrite = btree_gc_mark_node(last->b, gc);
1431 if (should_rewrite) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001432 n = btree_node_alloc_replacement(last->b,
1433 false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001434
1435 if (!IS_ERR_OR_NULL(n)) {
1436 bch_btree_node_write_sync(n);
1437 bch_keylist_add(&keys, &n->key);
1438
1439 make_btree_freeing_key(last->b,
1440 keys.top);
1441 bch_keylist_push(&keys);
1442
1443 btree_node_free(last->b);
1444
1445 bch_btree_insert_node(b, op, &keys,
1446 NULL, NULL);
1447 BUG_ON(!bch_keylist_empty(&keys));
1448
1449 rw_unlock(true, last->b);
1450 last->b = n;
1451
1452 /* Invalidated our iterator */
1453 ret = -EINTR;
1454 break;
1455 }
1456 }
1457
1458 if (last->b->level) {
1459 ret = btree_gc_recurse(last->b, op, writes, gc);
1460 if (ret)
1461 break;
1462 }
1463
1464 bkey_copy_key(&b->c->gc_done, &last->b->key);
1465
1466 /*
1467 * Must flush leaf nodes before gc ends, since replace
1468 * operations aren't journalled
1469 */
1470 if (btree_node_dirty(last->b))
1471 bch_btree_node_write(last->b, writes);
1472 rw_unlock(true, last->b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001473 }
1474
Kent Overstreeta1f03582013-09-10 19:07:00 -07001475 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1476 r->b = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001477
Kent Overstreetcafe5632013-03-23 16:11:31 -07001478 if (need_resched()) {
1479 ret = -EAGAIN;
1480 break;
1481 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001482 }
1483
Kent Overstreeta1f03582013-09-10 19:07:00 -07001484 for (i = 0; i < GC_MERGE_NODES; i++)
1485 if (!IS_ERR_OR_NULL(r[i].b)) {
1486 if (btree_node_dirty(r[i].b))
1487 bch_btree_node_write(r[i].b, writes);
1488 rw_unlock(true, r[i].b);
1489 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001490
Kent Overstreeta1f03582013-09-10 19:07:00 -07001491 bch_keylist_free(&keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001492
1493 return ret;
1494}
1495
1496static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1497 struct closure *writes, struct gc_stat *gc)
1498{
1499 struct btree *n = NULL;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001500 int ret = 0;
1501 bool should_rewrite;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001502
Kent Overstreeta1f03582013-09-10 19:07:00 -07001503 should_rewrite = btree_gc_mark_node(b, gc);
1504 if (should_rewrite) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001505 n = btree_node_alloc_replacement(b, false);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001506
Kent Overstreeta1f03582013-09-10 19:07:00 -07001507 if (!IS_ERR_OR_NULL(n)) {
1508 bch_btree_node_write_sync(n);
1509 bch_btree_set_root(n);
1510 btree_node_free(b);
1511 rw_unlock(true, n);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001512
Kent Overstreeta1f03582013-09-10 19:07:00 -07001513 return -EINTR;
1514 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001515 }
1516
Kent Overstreeta1f03582013-09-10 19:07:00 -07001517 if (b->level) {
1518 ret = btree_gc_recurse(b, op, writes, gc);
1519 if (ret)
1520 return ret;
1521 }
1522
1523 bkey_copy_key(&b->c->gc_done, &b->key);
1524
Kent Overstreetcafe5632013-03-23 16:11:31 -07001525 return ret;
1526}
1527
1528static void btree_gc_start(struct cache_set *c)
1529{
1530 struct cache *ca;
1531 struct bucket *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001532 unsigned i;
1533
1534 if (!c->gc_mark_valid)
1535 return;
1536
1537 mutex_lock(&c->bucket_lock);
1538
1539 c->gc_mark_valid = 0;
1540 c->gc_done = ZERO_KEY;
1541
1542 for_each_cache(ca, c, i)
1543 for_each_bucket(b, ca) {
1544 b->gc_gen = b->gen;
Kent Overstreet29ebf462013-07-11 19:43:21 -07001545 if (!atomic_read(&b->pin)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001546 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
Kent Overstreet29ebf462013-07-11 19:43:21 -07001547 SET_GC_SECTORS_USED(b, 0);
1548 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001549 }
1550
Kent Overstreetcafe5632013-03-23 16:11:31 -07001551 mutex_unlock(&c->bucket_lock);
1552}
1553
1554size_t bch_btree_gc_finish(struct cache_set *c)
1555{
1556 size_t available = 0;
1557 struct bucket *b;
1558 struct cache *ca;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001559 unsigned i;
1560
1561 mutex_lock(&c->bucket_lock);
1562
1563 set_gc_sectors(c);
1564 c->gc_mark_valid = 1;
1565 c->need_gc = 0;
1566
1567 if (c->root)
1568 for (i = 0; i < KEY_PTRS(&c->root->key); i++)
1569 SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
1570 GC_MARK_METADATA);
1571
1572 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1573 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1574 GC_MARK_METADATA);
1575
Nicholas Swensonbf0a6282013-11-26 19:14:23 -08001576 /* don't reclaim buckets to which writeback keys point */
1577 rcu_read_lock();
1578 for (i = 0; i < c->nr_uuids; i++) {
1579 struct bcache_device *d = c->devices[i];
1580 struct cached_dev *dc;
1581 struct keybuf_key *w, *n;
1582 unsigned j;
1583
1584 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1585 continue;
1586 dc = container_of(d, struct cached_dev, disk);
1587
1588 spin_lock(&dc->writeback_keys.lock);
1589 rbtree_postorder_for_each_entry_safe(w, n,
1590 &dc->writeback_keys.keys, node)
1591 for (j = 0; j < KEY_PTRS(&w->key); j++)
1592 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1593 GC_MARK_DIRTY);
1594 spin_unlock(&dc->writeback_keys.lock);
1595 }
1596 rcu_read_unlock();
1597
Kent Overstreetcafe5632013-03-23 16:11:31 -07001598 for_each_cache(ca, c, i) {
1599 uint64_t *i;
1600
1601 ca->invalidate_needs_gc = 0;
1602
1603 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1604 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1605
1606 for (i = ca->prio_buckets;
1607 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1608 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1609
1610 for_each_bucket(b, ca) {
1611 b->last_gc = b->gc_gen;
1612 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1613
1614 if (!atomic_read(&b->pin) &&
1615 GC_MARK(b) == GC_MARK_RECLAIMABLE) {
1616 available++;
1617 if (!GC_SECTORS_USED(b))
1618 bch_bucket_add_unused(ca, b);
1619 }
1620 }
1621 }
1622
Kent Overstreetcafe5632013-03-23 16:11:31 -07001623 mutex_unlock(&c->bucket_lock);
1624 return available;
1625}
1626
Kent Overstreet72a44512013-10-24 17:19:26 -07001627static void bch_btree_gc(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001628{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001629 int ret;
1630 unsigned long available;
1631 struct gc_stat stats;
1632 struct closure writes;
1633 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001634 uint64_t start_time = local_clock();
Kent Overstreet57943512013-04-25 13:58:35 -07001635
Kent Overstreetc37511b2013-04-26 15:39:55 -07001636 trace_bcache_gc_start(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001637
1638 memset(&stats, 0, sizeof(struct gc_stat));
1639 closure_init_stack(&writes);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001640 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001641
1642 btree_gc_start(c);
1643
Kent Overstreeta1f03582013-09-10 19:07:00 -07001644 do {
1645 ret = btree_root(gc_root, c, &op, &writes, &stats);
1646 closure_sync(&writes);
Kent Overstreet57943512013-04-25 13:58:35 -07001647
Kent Overstreeta1f03582013-09-10 19:07:00 -07001648 if (ret && ret != -EAGAIN)
1649 pr_warn("gc failed!");
1650 } while (ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001651
1652 available = bch_btree_gc_finish(c);
Kent Overstreet57943512013-04-25 13:58:35 -07001653 wake_up_allocators(c);
1654
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001655 bch_time_stats_update(&c->btree_gc_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001656
1657 stats.key_bytes *= sizeof(uint64_t);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001658 stats.data <<= 9;
1659 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1660 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001661
Kent Overstreetc37511b2013-04-26 15:39:55 -07001662 trace_bcache_gc_end(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001663
Kent Overstreet72a44512013-10-24 17:19:26 -07001664 bch_moving_gc(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001665}
1666
Kent Overstreet72a44512013-10-24 17:19:26 -07001667static int bch_gc_thread(void *arg)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001668{
Kent Overstreet72a44512013-10-24 17:19:26 -07001669 struct cache_set *c = arg;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001670 struct cache *ca;
1671 unsigned i;
Kent Overstreet72a44512013-10-24 17:19:26 -07001672
1673 while (1) {
Kent Overstreeta1f03582013-09-10 19:07:00 -07001674again:
Kent Overstreet72a44512013-10-24 17:19:26 -07001675 bch_btree_gc(c);
1676
1677 set_current_state(TASK_INTERRUPTIBLE);
1678 if (kthread_should_stop())
1679 break;
1680
Kent Overstreeta1f03582013-09-10 19:07:00 -07001681 mutex_lock(&c->bucket_lock);
1682
1683 for_each_cache(ca, c, i)
1684 if (ca->invalidate_needs_gc) {
1685 mutex_unlock(&c->bucket_lock);
1686 set_current_state(TASK_RUNNING);
1687 goto again;
1688 }
1689
1690 mutex_unlock(&c->bucket_lock);
1691
Kent Overstreet72a44512013-10-24 17:19:26 -07001692 try_to_freeze();
1693 schedule();
1694 }
1695
1696 return 0;
1697}
1698
1699int bch_gc_thread_start(struct cache_set *c)
1700{
1701 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1702 if (IS_ERR(c->gc_thread))
1703 return PTR_ERR(c->gc_thread);
1704
1705 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1706 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001707}
1708
1709/* Initial partial gc */
1710
1711static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
1712 unsigned long **seen)
1713{
Kent Overstreet50310162013-09-10 17:18:59 -07001714 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001715 unsigned i;
Kent Overstreet50310162013-09-10 17:18:59 -07001716 struct bkey *k, *p = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001717 struct bucket *g;
1718 struct btree_iter iter;
1719
1720 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1721 for (i = 0; i < KEY_PTRS(k); i++) {
1722 if (!ptr_available(b->c, k, i))
1723 continue;
1724
1725 g = PTR_BUCKET(b->c, k, i);
1726
1727 if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
1728 seen[PTR_DEV(k, i)]) ||
1729 !ptr_stale(b->c, k, i)) {
1730 g->gen = PTR_GEN(k, i);
1731
1732 if (b->level)
1733 g->prio = BTREE_PRIO;
1734 else if (g->prio == BTREE_PRIO)
1735 g->prio = INITIAL_PRIO;
1736 }
1737 }
1738
1739 btree_mark_key(b, k);
1740 }
1741
1742 if (b->level) {
Kent Overstreet50310162013-09-10 17:18:59 -07001743 bch_btree_iter_init(b, &iter, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001744
Kent Overstreet50310162013-09-10 17:18:59 -07001745 do {
1746 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1747 if (k)
1748 btree_node_prefetch(b->c, k, b->level - 1);
1749
Kent Overstreetcafe5632013-03-23 16:11:31 -07001750 if (p)
Kent Overstreet50310162013-09-10 17:18:59 -07001751 ret = btree(check_recurse, p, b, op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001752
Kent Overstreet50310162013-09-10 17:18:59 -07001753 p = k;
1754 } while (p && !ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001755 }
1756
1757 return 0;
1758}
1759
Kent Overstreetc18536a2013-07-24 17:44:17 -07001760int bch_btree_check(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001761{
1762 int ret = -ENOMEM;
1763 unsigned i;
1764 unsigned long *seen[MAX_CACHES_PER_SET];
Kent Overstreetc18536a2013-07-24 17:44:17 -07001765 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001766
1767 memset(seen, 0, sizeof(seen));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001768 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001769
1770 for (i = 0; c->cache[i]; i++) {
1771 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
1772 seen[i] = kmalloc(n, GFP_KERNEL);
1773 if (!seen[i])
1774 goto err;
1775
1776 /* Disables the seen array until prio_read() uses it too */
1777 memset(seen[i], 0xFF, n);
1778 }
1779
Kent Overstreetc18536a2013-07-24 17:44:17 -07001780 ret = btree_root(check_recurse, c, &op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001781err:
1782 for (i = 0; i < MAX_CACHES_PER_SET; i++)
1783 kfree(seen[i]);
1784 return ret;
1785}
1786
1787/* Btree insertion */
1788
1789static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
1790{
1791 struct bset *i = b->sets[b->nsets].data;
1792
1793 memmove((uint64_t *) where + bkey_u64s(insert),
1794 where,
1795 (void *) end(i) - (void *) where);
1796
1797 i->keys += bkey_u64s(insert);
1798 bkey_copy(where, insert);
1799 bch_bset_fix_lookup_table(b, where);
1800}
1801
Kent Overstreet1b207d82013-09-10 18:52:54 -07001802static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
Kent Overstreetcafe5632013-03-23 16:11:31 -07001803 struct btree_iter *iter,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001804 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001805{
Kent Overstreet279afba2013-06-05 06:21:07 -07001806 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001807 {
Kent Overstreet279afba2013-06-05 06:21:07 -07001808 if (KEY_DIRTY(k))
1809 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1810 offset, -sectors);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001811 }
1812
Kent Overstreet279afba2013-06-05 06:21:07 -07001813 uint64_t old_offset;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001814 unsigned old_size, sectors_found = 0;
1815
1816 while (1) {
1817 struct bkey *k = bch_btree_iter_next(iter);
1818 if (!k ||
1819 bkey_cmp(&START_KEY(k), insert) >= 0)
1820 break;
1821
1822 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1823 continue;
1824
Kent Overstreet279afba2013-06-05 06:21:07 -07001825 old_offset = KEY_START(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001826 old_size = KEY_SIZE(k);
1827
1828 /*
1829 * We might overlap with 0 size extents; we can't skip these
1830 * because if they're in the set we're inserting to we have to
1831 * adjust them so they don't overlap with the key we're
Kent Overstreet1b207d82013-09-10 18:52:54 -07001832 * inserting. But we don't want to check them for replace
Kent Overstreetcafe5632013-03-23 16:11:31 -07001833 * operations.
1834 */
1835
Kent Overstreet1b207d82013-09-10 18:52:54 -07001836 if (replace_key && KEY_SIZE(k)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001837 /*
1838 * k might have been split since we inserted/found the
1839 * key we're replacing
1840 */
1841 unsigned i;
1842 uint64_t offset = KEY_START(k) -
Kent Overstreet1b207d82013-09-10 18:52:54 -07001843 KEY_START(replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001844
1845 /* But it must be a subset of the replace key */
Kent Overstreet1b207d82013-09-10 18:52:54 -07001846 if (KEY_START(k) < KEY_START(replace_key) ||
1847 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001848 goto check_failed;
1849
1850 /* We didn't find a key that we were supposed to */
1851 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1852 goto check_failed;
1853
Kent Overstreetd24a6e12013-11-10 21:55:27 -08001854 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1855 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001856 goto check_failed;
1857
1858 /* skip past gen */
1859 offset <<= 8;
1860
Kent Overstreet1b207d82013-09-10 18:52:54 -07001861 BUG_ON(!KEY_PTRS(replace_key));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001862
Kent Overstreet1b207d82013-09-10 18:52:54 -07001863 for (i = 0; i < KEY_PTRS(replace_key); i++)
1864 if (k->ptr[i] != replace_key->ptr[i] + offset)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001865 goto check_failed;
1866
1867 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1868 }
1869
1870 if (bkey_cmp(insert, k) < 0 &&
1871 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1872 /*
1873 * We overlapped in the middle of an existing key: that
1874 * means we have to split the old key. But we have to do
1875 * slightly different things depending on whether the
1876 * old key has been written out yet.
1877 */
1878
1879 struct bkey *top;
1880
Kent Overstreet279afba2013-06-05 06:21:07 -07001881 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001882
1883 if (bkey_written(b, k)) {
1884 /*
1885 * We insert a new key to cover the top of the
1886 * old key, and the old key is modified in place
1887 * to represent the bottom split.
1888 *
1889 * It's completely arbitrary whether the new key
1890 * is the top or the bottom, but it has to match
1891 * up with what btree_sort_fixup() does - it
1892 * doesn't check for this kind of overlap, it
1893 * depends on us inserting a new key for the top
1894 * here.
1895 */
1896 top = bch_bset_search(b, &b->sets[b->nsets],
1897 insert);
1898 shift_keys(b, top, k);
1899 } else {
1900 BKEY_PADDED(key) temp;
1901 bkey_copy(&temp.key, k);
1902 shift_keys(b, k, &temp.key);
1903 top = bkey_next(k);
1904 }
1905
1906 bch_cut_front(insert, top);
1907 bch_cut_back(&START_KEY(insert), k);
1908 bch_bset_fix_invalidated_key(b, k);
1909 return false;
1910 }
1911
1912 if (bkey_cmp(insert, k) < 0) {
1913 bch_cut_front(insert, k);
1914 } else {
Kent Overstreet1fa84552013-11-10 21:55:27 -08001915 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1916 old_offset = KEY_START(insert);
1917
Kent Overstreetcafe5632013-03-23 16:11:31 -07001918 if (bkey_written(b, k) &&
1919 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1920 /*
1921 * Completely overwrote, so we don't have to
1922 * invalidate the binary search tree
1923 */
1924 bch_cut_front(k, k);
1925 } else {
1926 __bch_cut_back(&START_KEY(insert), k);
1927 bch_bset_fix_invalidated_key(b, k);
1928 }
1929 }
1930
Kent Overstreet279afba2013-06-05 06:21:07 -07001931 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001932 }
1933
1934check_failed:
Kent Overstreet1b207d82013-09-10 18:52:54 -07001935 if (replace_key) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001936 if (!sectors_found) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001937 return true;
1938 } else if (sectors_found < KEY_SIZE(insert)) {
1939 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1940 (KEY_SIZE(insert) - sectors_found));
1941 SET_KEY_SIZE(insert, sectors_found);
1942 }
1943 }
1944
1945 return false;
1946}
1947
1948static bool btree_insert_key(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001949 struct bkey *k, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001950{
1951 struct bset *i = b->sets[b->nsets].data;
1952 struct bkey *m, *prev;
Kent Overstreet85b14922013-05-14 20:33:16 -07001953 unsigned status = BTREE_INSERT_STATUS_INSERT;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001954
1955 BUG_ON(bkey_cmp(k, &b->key) > 0);
1956 BUG_ON(b->level && !KEY_PTRS(k));
1957 BUG_ON(!b->level && !KEY_OFFSET(k));
1958
1959 if (!b->level) {
1960 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001961
1962 /*
1963 * bset_search() returns the first key that is strictly greater
1964 * than the search key - but for back merging, we want to find
Kent Overstreet0eacac22013-07-01 19:29:05 -07001965 * the previous key.
Kent Overstreetcafe5632013-03-23 16:11:31 -07001966 */
Kent Overstreetcafe5632013-03-23 16:11:31 -07001967 prev = NULL;
Kent Overstreet0eacac22013-07-01 19:29:05 -07001968 m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001969
Kent Overstreet1b207d82013-09-10 18:52:54 -07001970 if (fix_overlapping_extents(b, k, &iter, replace_key)) {
1971 op->insert_collision = true;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001972 return false;
Kent Overstreet1b207d82013-09-10 18:52:54 -07001973 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001974
Kent Overstreet1fa84552013-11-10 21:55:27 -08001975 if (KEY_DIRTY(k))
1976 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1977 KEY_START(k), KEY_SIZE(k));
1978
Kent Overstreetcafe5632013-03-23 16:11:31 -07001979 while (m != end(i) &&
1980 bkey_cmp(k, &START_KEY(m)) > 0)
1981 prev = m, m = bkey_next(m);
1982
1983 if (key_merging_disabled(b->c))
1984 goto insert;
1985
1986 /* prev is in the tree, if we merge we're done */
Kent Overstreet85b14922013-05-14 20:33:16 -07001987 status = BTREE_INSERT_STATUS_BACK_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001988 if (prev &&
1989 bch_bkey_try_merge(b, prev, k))
1990 goto merged;
1991
Kent Overstreet85b14922013-05-14 20:33:16 -07001992 status = BTREE_INSERT_STATUS_OVERWROTE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001993 if (m != end(i) &&
1994 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1995 goto copy;
1996
Kent Overstreet85b14922013-05-14 20:33:16 -07001997 status = BTREE_INSERT_STATUS_FRONT_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001998 if (m != end(i) &&
1999 bch_bkey_try_merge(b, k, m))
2000 goto copy;
Kent Overstreet1b207d82013-09-10 18:52:54 -07002001 } else {
2002 BUG_ON(replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002003 m = bch_bset_search(b, &b->sets[b->nsets], k);
Kent Overstreet1b207d82013-09-10 18:52:54 -07002004 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002005
2006insert: shift_keys(b, m, k);
2007copy: bkey_copy(m, k);
2008merged:
Kent Overstreet1b207d82013-09-10 18:52:54 -07002009 bch_check_keys(b, "%u for %s", status,
2010 replace_key ? "replace" : "insert");
Kent Overstreetcafe5632013-03-23 16:11:31 -07002011
2012 if (b->level && !KEY_OFFSET(k))
Kent Overstreet57943512013-04-25 13:58:35 -07002013 btree_current_write(b)->prio_blocked++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002014
Kent Overstreet1b207d82013-09-10 18:52:54 -07002015 trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002016
2017 return true;
2018}
2019
Kent Overstreet26c949f2013-09-10 18:41:15 -07002020static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002021 struct keylist *insert_keys,
2022 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002023{
2024 bool ret = false;
Kent Overstreet280481d2013-10-24 16:36:03 -07002025 int oldsize = bch_count_data(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002026
Kent Overstreet26c949f2013-09-10 18:41:15 -07002027 while (!bch_keylist_empty(insert_keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002028 struct bset *i = write_block(b);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002029 struct bkey *k = insert_keys->keys;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002030
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002031 if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
2032 > btree_blocks(b))
2033 break;
2034
2035 if (bkey_cmp(k, &b->key) <= 0) {
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002036 if (!b->level)
2037 bkey_put(b->c, k);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002038
Kent Overstreet1b207d82013-09-10 18:52:54 -07002039 ret |= btree_insert_key(b, op, k, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002040 bch_keylist_pop_front(insert_keys);
2041 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
Kent Overstreet26c949f2013-09-10 18:41:15 -07002042 BKEY_PADDED(key) temp;
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002043 bkey_copy(&temp.key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002044
2045 bch_cut_back(&b->key, &temp.key);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002046 bch_cut_front(&b->key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002047
Kent Overstreet1b207d82013-09-10 18:52:54 -07002048 ret |= btree_insert_key(b, op, &temp.key, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002049 break;
2050 } else {
2051 break;
2052 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002053 }
2054
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002055 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2056
Kent Overstreetcafe5632013-03-23 16:11:31 -07002057 BUG_ON(bch_count_data(b) < oldsize);
2058 return ret;
2059}
2060
Kent Overstreet26c949f2013-09-10 18:41:15 -07002061static int btree_split(struct btree *b, struct btree_op *op,
2062 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002063 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002064{
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002065 bool split;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002066 struct btree *n1, *n2 = NULL, *n3 = NULL;
2067 uint64_t start_time = local_clock();
Kent Overstreetb54d6932013-07-24 18:04:18 -07002068 struct closure cl;
Kent Overstreet17e21a92013-07-26 12:32:38 -07002069 struct keylist parent_keys;
Kent Overstreetb54d6932013-07-24 18:04:18 -07002070
2071 closure_init_stack(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002072 bch_keylist_init(&parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002073
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002074 n1 = btree_node_alloc_replacement(b, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002075 if (IS_ERR(n1))
2076 goto err;
2077
2078 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
2079
Kent Overstreetcafe5632013-03-23 16:11:31 -07002080 if (split) {
2081 unsigned keys = 0;
2082
Kent Overstreetc37511b2013-04-26 15:39:55 -07002083 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
2084
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002085 n2 = bch_btree_node_alloc(b->c, b->level, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002086 if (IS_ERR(n2))
2087 goto err_free1;
2088
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002089 if (!b->parent) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002090 n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002091 if (IS_ERR(n3))
2092 goto err_free2;
2093 }
2094
Kent Overstreet1b207d82013-09-10 18:52:54 -07002095 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002096
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002097 /*
2098 * Has to be a linear search because we don't have an auxiliary
Kent Overstreetcafe5632013-03-23 16:11:31 -07002099 * search tree yet
2100 */
2101
2102 while (keys < (n1->sets[0].data->keys * 3) / 5)
2103 keys += bkey_u64s(node(n1->sets[0].data, keys));
2104
2105 bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
2106 keys += bkey_u64s(node(n1->sets[0].data, keys));
2107
2108 n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
2109 n1->sets[0].data->keys = keys;
2110
2111 memcpy(n2->sets[0].data->start,
2112 end(n1->sets[0].data),
2113 n2->sets[0].data->keys * sizeof(uint64_t));
2114
2115 bkey_copy_key(&n2->key, &b->key);
2116
Kent Overstreet17e21a92013-07-26 12:32:38 -07002117 bch_keylist_add(&parent_keys, &n2->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002118 bch_btree_node_write(n2, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002119 rw_unlock(true, n2);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002120 } else {
2121 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
2122
Kent Overstreet1b207d82013-09-10 18:52:54 -07002123 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002124 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002125
Kent Overstreet17e21a92013-07-26 12:32:38 -07002126 bch_keylist_add(&parent_keys, &n1->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002127 bch_btree_node_write(n1, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002128
2129 if (n3) {
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002130 /* Depth increases, make a new root */
Kent Overstreetcafe5632013-03-23 16:11:31 -07002131 bkey_copy_key(&n3->key, &MAX_KEY);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002132 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002133 bch_btree_node_write(n3, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002134
Kent Overstreetb54d6932013-07-24 18:04:18 -07002135 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002136 bch_btree_set_root(n3);
2137 rw_unlock(true, n3);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002138
2139 btree_node_free(b);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002140 } else if (!b->parent) {
2141 /* Root filled up but didn't need to be split */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002142 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002143 bch_btree_set_root(n1);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002144
2145 btree_node_free(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002146 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002147 /* Split a non root node */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002148 closure_sync(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002149 make_btree_freeing_key(b, parent_keys.top);
2150 bch_keylist_push(&parent_keys);
2151
2152 btree_node_free(b);
2153
2154 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2155 BUG_ON(!bch_keylist_empty(&parent_keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002156 }
2157
2158 rw_unlock(true, n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002159
Kent Overstreet169ef1c2013-03-28 12:50:55 -06002160 bch_time_stats_update(&b->c->btree_split_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002161
2162 return 0;
2163err_free2:
Kent Overstreete8e1d462013-07-24 17:27:07 -07002164 btree_node_free(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002165 rw_unlock(true, n2);
2166err_free1:
Kent Overstreete8e1d462013-07-24 17:27:07 -07002167 btree_node_free(n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002168 rw_unlock(true, n1);
2169err:
2170 if (n3 == ERR_PTR(-EAGAIN) ||
2171 n2 == ERR_PTR(-EAGAIN) ||
2172 n1 == ERR_PTR(-EAGAIN))
2173 return -EAGAIN;
2174
2175 pr_warn("couldn't split");
2176 return -ENOMEM;
2177}
2178
Kent Overstreet26c949f2013-09-10 18:41:15 -07002179static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002180 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002181 atomic_t *journal_ref,
2182 struct bkey *replace_key)
Kent Overstreet26c949f2013-09-10 18:41:15 -07002183{
Kent Overstreet17e21a92013-07-26 12:32:38 -07002184 BUG_ON(b->level && replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002185
Kent Overstreet17e21a92013-07-26 12:32:38 -07002186 if (should_split(b)) {
2187 if (current->bio_list) {
2188 op->lock = b->c->root->level + 1;
2189 return -EAGAIN;
2190 } else if (op->lock <= b->c->root->level) {
2191 op->lock = b->c->root->level + 1;
2192 return -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002193 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002194 /* Invalidated all iterators */
2195 return btree_split(b, op, insert_keys, replace_key) ?:
2196 -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002197 }
Kent Overstreet17e21a92013-07-26 12:32:38 -07002198 } else {
2199 BUG_ON(write_block(b) != b->sets[b->nsets].data);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002200
Kent Overstreet17e21a92013-07-26 12:32:38 -07002201 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2202 if (!b->level)
2203 bch_btree_leaf_dirty(b, journal_ref);
2204 else
2205 bch_btree_node_write_sync(b);
2206 }
2207
2208 return 0;
2209 }
Kent Overstreet26c949f2013-09-10 18:41:15 -07002210}
2211
Kent Overstreete7c590e2013-09-10 18:39:16 -07002212int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2213 struct bkey *check_key)
2214{
2215 int ret = -EINTR;
2216 uint64_t btree_ptr = b->key.ptr[0];
2217 unsigned long seq = b->seq;
2218 struct keylist insert;
2219 bool upgrade = op->lock == -1;
2220
2221 bch_keylist_init(&insert);
2222
2223 if (upgrade) {
2224 rw_unlock(false, b);
2225 rw_lock(true, b, b->level);
2226
2227 if (b->key.ptr[0] != btree_ptr ||
2228 b->seq != seq + 1)
2229 goto out;
2230 }
2231
2232 SET_KEY_PTRS(check_key, 1);
2233 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2234
2235 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2236
2237 bch_keylist_add(&insert, check_key);
2238
Kent Overstreet1b207d82013-09-10 18:52:54 -07002239 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
Kent Overstreete7c590e2013-09-10 18:39:16 -07002240
2241 BUG_ON(!ret && !bch_keylist_empty(&insert));
2242out:
2243 if (upgrade)
2244 downgrade_write(&b->lock);
2245 return ret;
2246}
2247
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002248struct btree_insert_op {
2249 struct btree_op op;
2250 struct keylist *keys;
2251 atomic_t *journal_ref;
2252 struct bkey *replace_key;
2253};
2254
Wei Yongjun08239ca2013-11-28 10:31:35 +08002255static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002256{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002257 struct btree_insert_op *op = container_of(b_op,
2258 struct btree_insert_op, op);
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002259
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002260 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2261 op->journal_ref, op->replace_key);
2262 if (ret && !bch_keylist_empty(op->keys))
2263 return ret;
2264 else
2265 return MAP_DONE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002266}
2267
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002268int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2269 atomic_t *journal_ref, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002270{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002271 struct btree_insert_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002272 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002273
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002274 BUG_ON(current->bio_list);
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002275 BUG_ON(bch_keylist_empty(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002276
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002277 bch_btree_op_init(&op.op, 0);
2278 op.keys = keys;
2279 op.journal_ref = journal_ref;
2280 op.replace_key = replace_key;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002281
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002282 while (!ret && !bch_keylist_empty(keys)) {
2283 op.op.lock = 0;
2284 ret = bch_btree_map_leaf_nodes(&op.op, c,
2285 &START_KEY(keys->keys),
2286 btree_insert_fn);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002287 }
2288
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002289 if (ret) {
2290 struct bkey *k;
2291
2292 pr_err("error %i", ret);
2293
2294 while ((k = bch_keylist_pop(keys)))
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002295 bkey_put(c, k);
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002296 } else if (op.op.insert_collision)
2297 ret = -ESRCH;
Kent Overstreet6054c6d2013-07-24 18:06:22 -07002298
Kent Overstreetcafe5632013-03-23 16:11:31 -07002299 return ret;
2300}
2301
2302void bch_btree_set_root(struct btree *b)
2303{
2304 unsigned i;
Kent Overstreete49c7c32013-06-26 17:25:38 -07002305 struct closure cl;
2306
2307 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002308
Kent Overstreetc37511b2013-04-26 15:39:55 -07002309 trace_bcache_btree_set_root(b);
2310
Kent Overstreetcafe5632013-03-23 16:11:31 -07002311 BUG_ON(!b->written);
2312
2313 for (i = 0; i < KEY_PTRS(&b->key); i++)
2314 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2315
2316 mutex_lock(&b->c->bucket_lock);
2317 list_del_init(&b->list);
2318 mutex_unlock(&b->c->bucket_lock);
2319
2320 b->c->root = b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002321
Kent Overstreete49c7c32013-06-26 17:25:38 -07002322 bch_journal_meta(b->c, &cl);
2323 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002324}
2325
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002326/* Map across nodes or keys */
2327
2328static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2329 struct bkey *from,
2330 btree_map_nodes_fn *fn, int flags)
2331{
2332 int ret = MAP_CONTINUE;
2333
2334 if (b->level) {
2335 struct bkey *k;
2336 struct btree_iter iter;
2337
2338 bch_btree_iter_init(b, &iter, from);
2339
2340 while ((k = bch_btree_iter_next_filter(&iter, b,
2341 bch_ptr_bad))) {
2342 ret = btree(map_nodes_recurse, k, b,
2343 op, from, fn, flags);
2344 from = NULL;
2345
2346 if (ret != MAP_CONTINUE)
2347 return ret;
2348 }
2349 }
2350
2351 if (!b->level || flags == MAP_ALL_NODES)
2352 ret = fn(op, b);
2353
2354 return ret;
2355}
2356
2357int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2358 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2359{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002360 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002361}
2362
2363static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2364 struct bkey *from, btree_map_keys_fn *fn,
2365 int flags)
2366{
2367 int ret = MAP_CONTINUE;
2368 struct bkey *k;
2369 struct btree_iter iter;
2370
2371 bch_btree_iter_init(b, &iter, from);
2372
2373 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
2374 ret = !b->level
2375 ? fn(op, b, k)
2376 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2377 from = NULL;
2378
2379 if (ret != MAP_CONTINUE)
2380 return ret;
2381 }
2382
2383 if (!b->level && (flags & MAP_END_KEY))
2384 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2385 KEY_OFFSET(&b->key), 0));
2386
2387 return ret;
2388}
2389
2390int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2391 struct bkey *from, btree_map_keys_fn *fn, int flags)
2392{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002393 return btree_root(map_keys_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002394}
2395
Kent Overstreetcafe5632013-03-23 16:11:31 -07002396/* Keybuf code */
2397
2398static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2399{
2400 /* Overlapping keys compare equal */
2401 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2402 return -1;
2403 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2404 return 1;
2405 return 0;
2406}
2407
2408static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2409 struct keybuf_key *r)
2410{
2411 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2412}
2413
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002414struct refill {
2415 struct btree_op op;
Kent Overstreet48a915a2013-10-31 15:43:22 -07002416 unsigned nr_found;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002417 struct keybuf *buf;
2418 struct bkey *end;
2419 keybuf_pred_fn *pred;
2420};
2421
2422static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2423 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002424{
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002425 struct refill *refill = container_of(op, struct refill, op);
2426 struct keybuf *buf = refill->buf;
2427 int ret = MAP_CONTINUE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002428
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002429 if (bkey_cmp(k, refill->end) >= 0) {
2430 ret = MAP_DONE;
2431 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002432 }
2433
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002434 if (!KEY_SIZE(k)) /* end key */
2435 goto out;
2436
2437 if (refill->pred(buf, k)) {
2438 struct keybuf_key *w;
2439
2440 spin_lock(&buf->lock);
2441
2442 w = array_alloc(&buf->freelist);
2443 if (!w) {
2444 spin_unlock(&buf->lock);
2445 return MAP_DONE;
2446 }
2447
2448 w->private = NULL;
2449 bkey_copy(&w->key, k);
2450
2451 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2452 array_free(&buf->freelist, w);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002453 else
2454 refill->nr_found++;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002455
2456 if (array_freelist_empty(&buf->freelist))
2457 ret = MAP_DONE;
2458
2459 spin_unlock(&buf->lock);
2460 }
2461out:
2462 buf->last_scanned = *k;
2463 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002464}
2465
2466void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002467 struct bkey *end, keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002468{
2469 struct bkey start = buf->last_scanned;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002470 struct refill refill;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002471
2472 cond_resched();
2473
Kent Overstreetb54d6932013-07-24 18:04:18 -07002474 bch_btree_op_init(&refill.op, -1);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002475 refill.nr_found = 0;
2476 refill.buf = buf;
2477 refill.end = end;
2478 refill.pred = pred;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002479
2480 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2481 refill_keybuf_fn, MAP_END_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002482
Kent Overstreet48a915a2013-10-31 15:43:22 -07002483 trace_bcache_keyscan(refill.nr_found,
2484 KEY_INODE(&start), KEY_OFFSET(&start),
2485 KEY_INODE(&buf->last_scanned),
2486 KEY_OFFSET(&buf->last_scanned));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002487
2488 spin_lock(&buf->lock);
2489
2490 if (!RB_EMPTY_ROOT(&buf->keys)) {
2491 struct keybuf_key *w;
2492 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2493 buf->start = START_KEY(&w->key);
2494
2495 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2496 buf->end = w->key;
2497 } else {
2498 buf->start = MAX_KEY;
2499 buf->end = MAX_KEY;
2500 }
2501
2502 spin_unlock(&buf->lock);
2503}
2504
2505static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2506{
2507 rb_erase(&w->node, &buf->keys);
2508 array_free(&buf->freelist, w);
2509}
2510
2511void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2512{
2513 spin_lock(&buf->lock);
2514 __bch_keybuf_del(buf, w);
2515 spin_unlock(&buf->lock);
2516}
2517
2518bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2519 struct bkey *end)
2520{
2521 bool ret = false;
2522 struct keybuf_key *p, *w, s;
2523 s.key = *start;
2524
2525 if (bkey_cmp(end, &buf->start) <= 0 ||
2526 bkey_cmp(start, &buf->end) >= 0)
2527 return false;
2528
2529 spin_lock(&buf->lock);
2530 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2531
2532 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2533 p = w;
2534 w = RB_NEXT(w, node);
2535
2536 if (p->private)
2537 ret = true;
2538 else
2539 __bch_keybuf_del(buf, p);
2540 }
2541
2542 spin_unlock(&buf->lock);
2543 return ret;
2544}
2545
2546struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2547{
2548 struct keybuf_key *w;
2549 spin_lock(&buf->lock);
2550
2551 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2552
2553 while (w && w->private)
2554 w = RB_NEXT(w, node);
2555
2556 if (w)
2557 w->private = ERR_PTR(-EINTR);
2558
2559 spin_unlock(&buf->lock);
2560 return w;
2561}
2562
2563struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002564 struct keybuf *buf,
2565 struct bkey *end,
2566 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002567{
2568 struct keybuf_key *ret;
2569
2570 while (1) {
2571 ret = bch_keybuf_next(buf);
2572 if (ret)
2573 break;
2574
2575 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2576 pr_debug("scan finished");
2577 break;
2578 }
2579
Kent Overstreet72c27062013-06-05 06:24:39 -07002580 bch_refill_keybuf(c, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002581 }
2582
2583 return ret;
2584}
2585
Kent Overstreet72c27062013-06-05 06:24:39 -07002586void bch_keybuf_init(struct keybuf *buf)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002587{
Kent Overstreetcafe5632013-03-23 16:11:31 -07002588 buf->last_scanned = MAX_KEY;
2589 buf->keys = RB_ROOT;
2590
2591 spin_lock_init(&buf->lock);
2592 array_allocator_init(&buf->freelist);
2593}
2594
2595void bch_btree_exit(void)
2596{
2597 if (btree_io_wq)
2598 destroy_workqueue(btree_io_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002599}
2600
2601int __init bch_btree_init(void)
2602{
Kent Overstreet72a44512013-10-24 17:19:26 -07002603 btree_io_wq = create_singlethread_workqueue("bch_btree_io");
2604 if (!btree_io_wq)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002605 return -ENOMEM;
2606
2607 return 0;
2608}