blob: fc3cae5c94b294bf3437b475880f51d5e24e69c4 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
26#include "request.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070027#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070028
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010032#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070033#include <linux/random.h>
34#include <linux/rcupdate.h>
35#include <trace/events/bcache.h>
36
37/*
38 * Todo:
39 * register_bcache: Return errors out to userspace correctly
40 *
41 * Writeback: don't undirty key until after a cache flush
42 *
43 * Create an iterator for key pointers
44 *
45 * On btree write error, mark bucket such that it won't be freed from the cache
46 *
47 * Journalling:
48 * Check for bad keys in replay
49 * Propagate barriers
50 * Refcount journal entries in journal_replay
51 *
52 * Garbage collection:
53 * Finish incremental gc
54 * Gc should free old UUIDs, data for invalid UUIDs
55 *
56 * Provide a way to list backing device UUIDs we have data cached for, and
57 * probably how long it's been since we've seen them, and a way to invalidate
58 * dirty data for devices that will never be attached again
59 *
60 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
61 * that based on that and how much dirty data we have we can keep writeback
62 * from being starved
63 *
64 * Add a tracepoint or somesuch to watch for writeback starvation
65 *
66 * When btree depth > 1 and splitting an interior node, we have to make sure
67 * alloc_bucket() cannot fail. This should be true but is not completely
68 * obvious.
69 *
70 * Make sure all allocations get charged to the root cgroup
71 *
72 * Plugging?
73 *
74 * If data write is less than hard sector size of ssd, round up offset in open
75 * bucket to the next whole sector
76 *
77 * Also lookup by cgroup in get_open_bucket()
78 *
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
91static const char * const op_types[] = {
92 "insert", "replace"
93};
94
95static const char *op_type(struct btree_op *op)
96{
97 return op_types[op->type];
98}
99
100#define MAX_NEED_GC 64
101#define MAX_SAVE_PRIO 72
102
103#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
104
105#define PTR_HASH(c, k) \
106 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
107
108struct workqueue_struct *bch_gc_wq;
109static struct workqueue_struct *btree_io_wq;
110
111void bch_btree_op_init_stack(struct btree_op *op)
112{
113 memset(op, 0, sizeof(struct btree_op));
114 closure_init_stack(&op->cl);
115 op->lock = -1;
116 bch_keylist_init(&op->keys);
117}
118
119/* Btree key manipulation */
120
Kent Overstreete7c590e2013-09-10 18:39:16 -0700121void __bkey_put(struct cache_set *c, struct bkey *k)
122{
123 unsigned i;
124
125 for (i = 0; i < KEY_PTRS(k); i++)
126 if (ptr_available(c, k, i))
127 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
128}
129
Kent Overstreetcafe5632013-03-23 16:11:31 -0700130static void bkey_put(struct cache_set *c, struct bkey *k, int level)
131{
132 if ((level && KEY_OFFSET(k)) || !level)
133 __bkey_put(c, k);
134}
135
136/* Btree IO */
137
138static uint64_t btree_csum_set(struct btree *b, struct bset *i)
139{
140 uint64_t crc = b->key.ptr[0];
141 void *data = (void *) i + 8, *end = end(i);
142
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600143 crc = bch_crc64_update(crc, data, end - data);
Kent Overstreetc19ed232013-03-26 13:49:02 -0700144 return crc ^ 0xffffffffffffffffULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700145}
146
Kent Overstreetf3059a52013-05-15 17:13:45 -0700147static void bch_btree_node_read_done(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700148{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700149 const char *err = "bad btree header";
Kent Overstreet57943512013-04-25 13:58:35 -0700150 struct bset *i = b->sets[0].data;
151 struct btree_iter *iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700152
Kent Overstreet57943512013-04-25 13:58:35 -0700153 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
154 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700155 iter->used = 0;
156
Kent Overstreet57943512013-04-25 13:58:35 -0700157 if (!i->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700158 goto err;
159
160 for (;
161 b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
162 i = write_block(b)) {
163 err = "unsupported bset version";
164 if (i->version > BCACHE_BSET_VERSION)
165 goto err;
166
167 err = "bad btree header";
168 if (b->written + set_blocks(i, b->c) > btree_blocks(b))
169 goto err;
170
171 err = "bad magic";
172 if (i->magic != bset_magic(b->c))
173 goto err;
174
175 err = "bad checksum";
176 switch (i->version) {
177 case 0:
178 if (i->csum != csum_set(i))
179 goto err;
180 break;
181 case BCACHE_BSET_VERSION:
182 if (i->csum != btree_csum_set(b, i))
183 goto err;
184 break;
185 }
186
187 err = "empty set";
188 if (i != b->sets[0].data && !i->keys)
189 goto err;
190
191 bch_btree_iter_push(iter, i->start, end(i));
192
193 b->written += set_blocks(i, b->c);
194 }
195
196 err = "corrupted btree";
197 for (i = write_block(b);
198 index(i, b) < btree_blocks(b);
199 i = ((void *) i) + block_bytes(b->c))
200 if (i->seq == b->sets[0].data->seq)
201 goto err;
202
203 bch_btree_sort_and_fix_extents(b, iter);
204
205 i = b->sets[0].data;
206 err = "short btree key";
207 if (b->sets[0].size &&
208 bkey_cmp(&b->key, &b->sets[0].end) < 0)
209 goto err;
210
211 if (b->written < btree_blocks(b))
212 bch_bset_init_next(b);
213out:
Kent Overstreet57943512013-04-25 13:58:35 -0700214 mempool_free(iter, b->c->fill_iter);
215 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700216err:
217 set_btree_node_io_error(b);
Kent Overstreet07e86cc2013-03-25 11:46:43 -0700218 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
Kent Overstreetcafe5632013-03-23 16:11:31 -0700219 err, PTR_BUCKET_NR(b->c, &b->key, 0),
220 index(i, b), i->keys);
221 goto out;
222}
223
Kent Overstreet57943512013-04-25 13:58:35 -0700224static void btree_node_read_endio(struct bio *bio, int error)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700225{
Kent Overstreet57943512013-04-25 13:58:35 -0700226 struct closure *cl = bio->bi_private;
227 closure_put(cl);
228}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700229
Kent Overstreet57943512013-04-25 13:58:35 -0700230void bch_btree_node_read(struct btree *b)
231{
232 uint64_t start_time = local_clock();
233 struct closure cl;
234 struct bio *bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700235
Kent Overstreetc37511b2013-04-26 15:39:55 -0700236 trace_bcache_btree_read(b);
237
Kent Overstreet57943512013-04-25 13:58:35 -0700238 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700239
Kent Overstreet57943512013-04-25 13:58:35 -0700240 bio = bch_bbio_alloc(b->c);
241 bio->bi_rw = REQ_META|READ_SYNC;
242 bio->bi_size = KEY_SIZE(&b->key) << 9;
243 bio->bi_end_io = btree_node_read_endio;
244 bio->bi_private = &cl;
245
246 bch_bio_map(bio, b->sets[0].data);
247
Kent Overstreet57943512013-04-25 13:58:35 -0700248 bch_submit_bbio(bio, b->c, &b->key, 0);
249 closure_sync(&cl);
250
251 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
252 set_btree_node_io_error(b);
253
254 bch_bbio_free(bio, b->c);
255
256 if (btree_node_io_error(b))
257 goto err;
258
259 bch_btree_node_read_done(b);
260
261 spin_lock(&b->c->btree_read_time_lock);
262 bch_time_stats_update(&b->c->btree_read_time, start_time);
263 spin_unlock(&b->c->btree_read_time_lock);
264
265 return;
266err:
Geert Uytterhoeven61cbd252013-09-23 23:17:30 -0700267 bch_cache_set_error(b->c, "io error reading bucket %zu",
Kent Overstreet57943512013-04-25 13:58:35 -0700268 PTR_BUCKET_NR(b->c, &b->key, 0));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700269}
270
271static void btree_complete_write(struct btree *b, struct btree_write *w)
272{
273 if (w->prio_blocked &&
274 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700275 wake_up_allocators(b->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700276
277 if (w->journal) {
278 atomic_dec_bug(w->journal);
279 __closure_wake_up(&b->c->journal.wait);
280 }
281
Kent Overstreetcafe5632013-03-23 16:11:31 -0700282 w->prio_blocked = 0;
283 w->journal = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700284}
285
Kent Overstreet57943512013-04-25 13:58:35 -0700286static void __btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700287{
288 struct btree *b = container_of(cl, struct btree, io.cl);
289 struct btree_write *w = btree_prev_write(b);
290
291 bch_bbio_free(b->bio, b->c);
292 b->bio = NULL;
293 btree_complete_write(b, w);
294
295 if (btree_node_dirty(b))
296 queue_delayed_work(btree_io_wq, &b->work,
297 msecs_to_jiffies(30000));
298
299 closure_return(cl);
300}
301
Kent Overstreet57943512013-04-25 13:58:35 -0700302static void btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700303{
304 struct btree *b = container_of(cl, struct btree, io.cl);
305 struct bio_vec *bv;
306 int n;
307
308 __bio_for_each_segment(bv, b->bio, n, 0)
309 __free_page(bv->bv_page);
310
Kent Overstreet57943512013-04-25 13:58:35 -0700311 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700312}
313
Kent Overstreet57943512013-04-25 13:58:35 -0700314static void btree_node_write_endio(struct bio *bio, int error)
315{
316 struct closure *cl = bio->bi_private;
317 struct btree *b = container_of(cl, struct btree, io.cl);
318
319 if (error)
320 set_btree_node_io_error(b);
321
322 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
323 closure_put(cl);
324}
325
326static void do_btree_node_write(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700327{
328 struct closure *cl = &b->io.cl;
329 struct bset *i = b->sets[b->nsets].data;
330 BKEY_PADDED(key) k;
331
332 i->version = BCACHE_BSET_VERSION;
333 i->csum = btree_csum_set(b, i);
334
Kent Overstreet57943512013-04-25 13:58:35 -0700335 BUG_ON(b->bio);
336 b->bio = bch_bbio_alloc(b->c);
337
338 b->bio->bi_end_io = btree_node_write_endio;
339 b->bio->bi_private = &b->io.cl;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700340 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
341 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600342 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700343
Kent Overstreete49c7c32013-06-26 17:25:38 -0700344 /*
345 * If we're appending to a leaf node, we don't technically need FUA -
346 * this write just needs to be persisted before the next journal write,
347 * which will be marked FLUSH|FUA.
348 *
349 * Similarly if we're writing a new btree root - the pointer is going to
350 * be in the next journal entry.
351 *
352 * But if we're writing a new btree node (that isn't a root) or
353 * appending to a non leaf btree node, we need either FUA or a flush
354 * when we write the parent with the new pointer. FUA is cheaper than a
355 * flush, and writes appending to leaf nodes aren't blocking anything so
356 * just make all btree node writes FUA to keep things sane.
357 */
358
Kent Overstreetcafe5632013-03-23 16:11:31 -0700359 bkey_copy(&k.key, &b->key);
360 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
361
Kent Overstreet8e51e412013-06-06 18:15:57 -0700362 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700363 int j;
364 struct bio_vec *bv;
365 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
366
367 bio_for_each_segment(bv, b->bio, j)
368 memcpy(page_address(bv->bv_page),
369 base + j * PAGE_SIZE, PAGE_SIZE);
370
Kent Overstreetcafe5632013-03-23 16:11:31 -0700371 bch_submit_bbio(b->bio, b->c, &k.key, 0);
372
Kent Overstreet57943512013-04-25 13:58:35 -0700373 continue_at(cl, btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700374 } else {
375 b->bio->bi_vcnt = 0;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600376 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700377
Kent Overstreetcafe5632013-03-23 16:11:31 -0700378 bch_submit_bbio(b->bio, b->c, &k.key, 0);
379
380 closure_sync(cl);
Kent Overstreet57943512013-04-25 13:58:35 -0700381 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700382 }
383}
384
Kent Overstreet57943512013-04-25 13:58:35 -0700385void bch_btree_node_write(struct btree *b, struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700386{
387 struct bset *i = b->sets[b->nsets].data;
388
Kent Overstreetc37511b2013-04-26 15:39:55 -0700389 trace_bcache_btree_write(b);
390
Kent Overstreetcafe5632013-03-23 16:11:31 -0700391 BUG_ON(current->bio_list);
Kent Overstreet57943512013-04-25 13:58:35 -0700392 BUG_ON(b->written >= btree_blocks(b));
393 BUG_ON(b->written && !i->keys);
394 BUG_ON(b->sets->data->seq != i->seq);
Kent Overstreetc37511b2013-04-26 15:39:55 -0700395 bch_check_key_order(b, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700396
Kent Overstreetcafe5632013-03-23 16:11:31 -0700397 cancel_delayed_work(&b->work);
398
Kent Overstreet57943512013-04-25 13:58:35 -0700399 /* If caller isn't waiting for write, parent refcount is cache set */
400 closure_lock(&b->io, parent ?: &b->c->cl);
401
Kent Overstreetcafe5632013-03-23 16:11:31 -0700402 clear_bit(BTREE_NODE_dirty, &b->flags);
403 change_bit(BTREE_NODE_write_idx, &b->flags);
404
Kent Overstreet57943512013-04-25 13:58:35 -0700405 do_btree_node_write(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700406
Kent Overstreetcafe5632013-03-23 16:11:31 -0700407 b->written += set_blocks(i, b->c);
408 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
409 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
410
411 bch_btree_sort_lazy(b);
412
413 if (b->written < btree_blocks(b))
414 bch_bset_init_next(b);
415}
416
Kent Overstreet57943512013-04-25 13:58:35 -0700417static void btree_node_write_work(struct work_struct *w)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700418{
419 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
420
Kent Overstreet57943512013-04-25 13:58:35 -0700421 rw_lock(true, b, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700422
423 if (btree_node_dirty(b))
Kent Overstreet57943512013-04-25 13:58:35 -0700424 bch_btree_node_write(b, NULL);
425 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700426}
427
Kent Overstreet57943512013-04-25 13:58:35 -0700428static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700429{
430 struct bset *i = b->sets[b->nsets].data;
431 struct btree_write *w = btree_current_write(b);
432
Kent Overstreet57943512013-04-25 13:58:35 -0700433 BUG_ON(!b->written);
434 BUG_ON(!i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700435
Kent Overstreet57943512013-04-25 13:58:35 -0700436 if (!btree_node_dirty(b))
437 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700438
Kent Overstreet57943512013-04-25 13:58:35 -0700439 set_btree_node_dirty(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700440
Kent Overstreet57943512013-04-25 13:58:35 -0700441 if (op && op->journal) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700442 if (w->journal &&
443 journal_pin_cmp(b->c, w, op)) {
444 atomic_dec_bug(w->journal);
445 w->journal = NULL;
446 }
447
448 if (!w->journal) {
449 w->journal = op->journal;
450 atomic_inc(w->journal);
451 }
452 }
453
Kent Overstreetcafe5632013-03-23 16:11:31 -0700454 /* Force write if set is too big */
Kent Overstreet57943512013-04-25 13:58:35 -0700455 if (set_bytes(i) > PAGE_SIZE - 48 &&
456 !current->bio_list)
457 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700458}
459
460/*
461 * Btree in memory cache - allocation/freeing
462 * mca -> memory cache
463 */
464
465static void mca_reinit(struct btree *b)
466{
467 unsigned i;
468
469 b->flags = 0;
470 b->written = 0;
471 b->nsets = 0;
472
473 for (i = 0; i < MAX_BSETS; i++)
474 b->sets[i].size = 0;
475 /*
476 * Second loop starts at 1 because b->sets[0]->data is the memory we
477 * allocated
478 */
479 for (i = 1; i < MAX_BSETS; i++)
480 b->sets[i].data = NULL;
481}
482
483#define mca_reserve(c) (((c->root && c->root->level) \
484 ? c->root->level : 1) * 8 + 16)
485#define mca_can_free(c) \
486 max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
487
488static void mca_data_free(struct btree *b)
489{
490 struct bset_tree *t = b->sets;
491 BUG_ON(!closure_is_unlocked(&b->io.cl));
492
493 if (bset_prev_bytes(b) < PAGE_SIZE)
494 kfree(t->prev);
495 else
496 free_pages((unsigned long) t->prev,
497 get_order(bset_prev_bytes(b)));
498
499 if (bset_tree_bytes(b) < PAGE_SIZE)
500 kfree(t->tree);
501 else
502 free_pages((unsigned long) t->tree,
503 get_order(bset_tree_bytes(b)));
504
505 free_pages((unsigned long) t->data, b->page_order);
506
507 t->prev = NULL;
508 t->tree = NULL;
509 t->data = NULL;
510 list_move(&b->list, &b->c->btree_cache_freed);
511 b->c->bucket_cache_used--;
512}
513
514static void mca_bucket_free(struct btree *b)
515{
516 BUG_ON(btree_node_dirty(b));
517
518 b->key.ptr[0] = 0;
519 hlist_del_init_rcu(&b->hash);
520 list_move(&b->list, &b->c->btree_cache_freeable);
521}
522
523static unsigned btree_order(struct bkey *k)
524{
525 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
526}
527
528static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
529{
530 struct bset_tree *t = b->sets;
531 BUG_ON(t->data);
532
533 b->page_order = max_t(unsigned,
534 ilog2(b->c->btree_pages),
535 btree_order(k));
536
537 t->data = (void *) __get_free_pages(gfp, b->page_order);
538 if (!t->data)
539 goto err;
540
541 t->tree = bset_tree_bytes(b) < PAGE_SIZE
542 ? kmalloc(bset_tree_bytes(b), gfp)
543 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
544 if (!t->tree)
545 goto err;
546
547 t->prev = bset_prev_bytes(b) < PAGE_SIZE
548 ? kmalloc(bset_prev_bytes(b), gfp)
549 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
550 if (!t->prev)
551 goto err;
552
553 list_move(&b->list, &b->c->btree_cache);
554 b->c->bucket_cache_used++;
555 return;
556err:
557 mca_data_free(b);
558}
559
560static struct btree *mca_bucket_alloc(struct cache_set *c,
561 struct bkey *k, gfp_t gfp)
562{
563 struct btree *b = kzalloc(sizeof(struct btree), gfp);
564 if (!b)
565 return NULL;
566
567 init_rwsem(&b->lock);
568 lockdep_set_novalidate_class(&b->lock);
569 INIT_LIST_HEAD(&b->list);
Kent Overstreet57943512013-04-25 13:58:35 -0700570 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700571 b->c = c;
572 closure_init_unlocked(&b->io);
573
574 mca_data_alloc(b, k, gfp);
575 return b;
576}
577
578static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
579{
580 lockdep_assert_held(&b->c->bucket_lock);
581
582 if (!down_write_trylock(&b->lock))
583 return -ENOMEM;
584
585 if (b->page_order < min_order) {
586 rw_unlock(true, b);
587 return -ENOMEM;
588 }
589
590 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
591
592 if (cl && btree_node_dirty(b))
Kent Overstreet57943512013-04-25 13:58:35 -0700593 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700594
595 if (cl)
596 closure_wait_event_async(&b->io.wait, cl,
597 atomic_read(&b->io.cl.remaining) == -1);
598
599 if (btree_node_dirty(b) ||
600 !closure_is_unlocked(&b->io.cl) ||
601 work_pending(&b->work.work)) {
602 rw_unlock(true, b);
603 return -EAGAIN;
604 }
605
606 return 0;
607}
608
Dave Chinner7dc19d52013-08-28 10:18:11 +1000609static unsigned long bch_mca_scan(struct shrinker *shrink,
610 struct shrink_control *sc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700611{
612 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
613 struct btree *b, *t;
614 unsigned long i, nr = sc->nr_to_scan;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000615 unsigned long freed = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700616
617 if (c->shrinker_disabled)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000618 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700619
620 if (c->try_harder)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000621 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700622
623 /* Return -1 if we can't do anything right now */
Kent Overstreeta698e082013-09-23 23:17:34 -0700624 if (sc->gfp_mask & __GFP_IO)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700625 mutex_lock(&c->bucket_lock);
626 else if (!mutex_trylock(&c->bucket_lock))
627 return -1;
628
Kent Overstreet36c9ea92013-06-03 13:04:56 -0700629 /*
630 * It's _really_ critical that we don't free too many btree nodes - we
631 * have to always leave ourselves a reserve. The reserve is how we
632 * guarantee that allocating memory for a new btree node can always
633 * succeed, so that inserting keys into the btree can always succeed and
634 * IO can always make forward progress:
635 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700636 nr /= c->btree_pages;
637 nr = min_t(unsigned long, nr, mca_can_free(c));
638
639 i = 0;
640 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
Dave Chinner7dc19d52013-08-28 10:18:11 +1000641 if (freed >= nr)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700642 break;
643
644 if (++i > 3 &&
645 !mca_reap(b, NULL, 0)) {
646 mca_data_free(b);
647 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000648 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700649 }
650 }
651
652 /*
653 * Can happen right when we first start up, before we've read in any
654 * btree nodes
655 */
656 if (list_empty(&c->btree_cache))
657 goto out;
658
Dave Chinner7dc19d52013-08-28 10:18:11 +1000659 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700660 b = list_first_entry(&c->btree_cache, struct btree, list);
661 list_rotate_left(&c->btree_cache);
662
663 if (!b->accessed &&
664 !mca_reap(b, NULL, 0)) {
665 mca_bucket_free(b);
666 mca_data_free(b);
667 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000668 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700669 } else
670 b->accessed = 0;
671 }
672out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700673 mutex_unlock(&c->bucket_lock);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000674 return freed;
675}
676
677static unsigned long bch_mca_count(struct shrinker *shrink,
678 struct shrink_control *sc)
679{
680 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
681
682 if (c->shrinker_disabled)
683 return 0;
684
685 if (c->try_harder)
686 return 0;
687
688 return mca_can_free(c) * c->btree_pages;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700689}
690
691void bch_btree_cache_free(struct cache_set *c)
692{
693 struct btree *b;
694 struct closure cl;
695 closure_init_stack(&cl);
696
697 if (c->shrink.list.next)
698 unregister_shrinker(&c->shrink);
699
700 mutex_lock(&c->bucket_lock);
701
702#ifdef CONFIG_BCACHE_DEBUG
703 if (c->verify_data)
704 list_move(&c->verify_data->list, &c->btree_cache);
705#endif
706
707 list_splice(&c->btree_cache_freeable,
708 &c->btree_cache);
709
710 while (!list_empty(&c->btree_cache)) {
711 b = list_first_entry(&c->btree_cache, struct btree, list);
712
713 if (btree_node_dirty(b))
714 btree_complete_write(b, btree_current_write(b));
715 clear_bit(BTREE_NODE_dirty, &b->flags);
716
717 mca_data_free(b);
718 }
719
720 while (!list_empty(&c->btree_cache_freed)) {
721 b = list_first_entry(&c->btree_cache_freed,
722 struct btree, list);
723 list_del(&b->list);
724 cancel_delayed_work_sync(&b->work);
725 kfree(b);
726 }
727
728 mutex_unlock(&c->bucket_lock);
729}
730
731int bch_btree_cache_alloc(struct cache_set *c)
732{
733 unsigned i;
734
735 /* XXX: doesn't check for errors */
736
737 closure_init_unlocked(&c->gc);
738
739 for (i = 0; i < mca_reserve(c); i++)
740 mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
741
742 list_splice_init(&c->btree_cache,
743 &c->btree_cache_freeable);
744
745#ifdef CONFIG_BCACHE_DEBUG
746 mutex_init(&c->verify_lock);
747
748 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
749
750 if (c->verify_data &&
751 c->verify_data->sets[0].data)
752 list_del_init(&c->verify_data->list);
753 else
754 c->verify_data = NULL;
755#endif
756
Dave Chinner7dc19d52013-08-28 10:18:11 +1000757 c->shrink.count_objects = bch_mca_count;
758 c->shrink.scan_objects = bch_mca_scan;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700759 c->shrink.seeks = 4;
760 c->shrink.batch = c->btree_pages * 2;
761 register_shrinker(&c->shrink);
762
763 return 0;
764}
765
766/* Btree in memory cache - hash table */
767
768static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
769{
770 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
771}
772
773static struct btree *mca_find(struct cache_set *c, struct bkey *k)
774{
775 struct btree *b;
776
777 rcu_read_lock();
778 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
779 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
780 goto out;
781 b = NULL;
782out:
783 rcu_read_unlock();
784 return b;
785}
786
787static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
788 int level, struct closure *cl)
789{
790 int ret = -ENOMEM;
791 struct btree *i;
792
Kent Overstreetc37511b2013-04-26 15:39:55 -0700793 trace_bcache_btree_cache_cannibalize(c);
794
Kent Overstreetcafe5632013-03-23 16:11:31 -0700795 if (!cl)
796 return ERR_PTR(-ENOMEM);
797
798 /*
799 * Trying to free up some memory - i.e. reuse some btree nodes - may
800 * require initiating IO to flush the dirty part of the node. If we're
801 * running under generic_make_request(), that IO will never finish and
802 * we would deadlock. Returning -EAGAIN causes the cache lookup code to
803 * punt to workqueue and retry.
804 */
805 if (current->bio_list)
806 return ERR_PTR(-EAGAIN);
807
808 if (c->try_harder && c->try_harder != cl) {
809 closure_wait_event_async(&c->try_wait, cl, !c->try_harder);
810 return ERR_PTR(-EAGAIN);
811 }
812
Kent Overstreetcafe5632013-03-23 16:11:31 -0700813 c->try_harder = cl;
814 c->try_harder_start = local_clock();
815retry:
816 list_for_each_entry_reverse(i, &c->btree_cache, list) {
817 int r = mca_reap(i, cl, btree_order(k));
818 if (!r)
819 return i;
820 if (r != -ENOMEM)
821 ret = r;
822 }
823
824 if (ret == -EAGAIN &&
825 closure_blocking(cl)) {
826 mutex_unlock(&c->bucket_lock);
827 closure_sync(cl);
828 mutex_lock(&c->bucket_lock);
829 goto retry;
830 }
831
832 return ERR_PTR(ret);
833}
834
835/*
836 * We can only have one thread cannibalizing other cached btree nodes at a time,
837 * or we'll deadlock. We use an open coded mutex to ensure that, which a
838 * cannibalize_bucket() will take. This means every time we unlock the root of
839 * the btree, we need to release this lock if we have it held.
840 */
841void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
842{
843 if (c->try_harder == cl) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600844 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700845 c->try_harder = NULL;
846 __closure_wake_up(&c->try_wait);
847 }
848}
849
850static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
851 int level, struct closure *cl)
852{
853 struct btree *b;
854
855 lockdep_assert_held(&c->bucket_lock);
856
857 if (mca_find(c, k))
858 return NULL;
859
860 /* btree_free() doesn't free memory; it sticks the node on the end of
861 * the list. Check if there's any freed nodes there:
862 */
863 list_for_each_entry(b, &c->btree_cache_freeable, list)
864 if (!mca_reap(b, NULL, btree_order(k)))
865 goto out;
866
867 /* We never free struct btree itself, just the memory that holds the on
868 * disk node. Check the freed list before allocating a new one:
869 */
870 list_for_each_entry(b, &c->btree_cache_freed, list)
871 if (!mca_reap(b, NULL, 0)) {
872 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
873 if (!b->sets[0].data)
874 goto err;
875 else
876 goto out;
877 }
878
879 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
880 if (!b)
881 goto err;
882
883 BUG_ON(!down_write_trylock(&b->lock));
884 if (!b->sets->data)
885 goto err;
886out:
887 BUG_ON(!closure_is_unlocked(&b->io.cl));
888
889 bkey_copy(&b->key, k);
890 list_move(&b->list, &c->btree_cache);
891 hlist_del_init_rcu(&b->hash);
892 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
893
894 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
895 b->level = level;
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700896 b->parent = (void *) ~0UL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700897
898 mca_reinit(b);
899
900 return b;
901err:
902 if (b)
903 rw_unlock(true, b);
904
905 b = mca_cannibalize(c, k, level, cl);
906 if (!IS_ERR(b))
907 goto out;
908
909 return b;
910}
911
912/**
913 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
914 * in from disk if necessary.
915 *
916 * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
917 * if that closure is in non blocking mode, will return -EAGAIN.
918 *
919 * The btree node will have either a read or a write lock held, depending on
920 * level and op->lock.
921 */
922struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
923 int level, struct btree_op *op)
924{
925 int i = 0;
926 bool write = level <= op->lock;
927 struct btree *b;
928
929 BUG_ON(level < 0);
930retry:
931 b = mca_find(c, k);
932
933 if (!b) {
Kent Overstreet57943512013-04-25 13:58:35 -0700934 if (current->bio_list)
935 return ERR_PTR(-EAGAIN);
936
Kent Overstreetcafe5632013-03-23 16:11:31 -0700937 mutex_lock(&c->bucket_lock);
938 b = mca_alloc(c, k, level, &op->cl);
939 mutex_unlock(&c->bucket_lock);
940
941 if (!b)
942 goto retry;
943 if (IS_ERR(b))
944 return b;
945
Kent Overstreet57943512013-04-25 13:58:35 -0700946 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700947
948 if (!write)
949 downgrade_write(&b->lock);
950 } else {
951 rw_lock(write, b, level);
952 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
953 rw_unlock(write, b);
954 goto retry;
955 }
956 BUG_ON(b->level != level);
957 }
958
959 b->accessed = 1;
960
961 for (; i <= b->nsets && b->sets[i].size; i++) {
962 prefetch(b->sets[i].tree);
963 prefetch(b->sets[i].data);
964 }
965
966 for (; i <= b->nsets; i++)
967 prefetch(b->sets[i].data);
968
Kent Overstreet57943512013-04-25 13:58:35 -0700969 if (btree_node_io_error(b)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700970 rw_unlock(write, b);
Kent Overstreet57943512013-04-25 13:58:35 -0700971 return ERR_PTR(-EIO);
972 }
973
974 BUG_ON(!b->written);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700975
976 return b;
977}
978
979static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
980{
981 struct btree *b;
982
983 mutex_lock(&c->bucket_lock);
984 b = mca_alloc(c, k, level, NULL);
985 mutex_unlock(&c->bucket_lock);
986
987 if (!IS_ERR_OR_NULL(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -0700988 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700989 rw_unlock(true, b);
990 }
991}
992
993/* Btree alloc */
994
995static void btree_node_free(struct btree *b, struct btree_op *op)
996{
997 unsigned i;
998
Kent Overstreetc37511b2013-04-26 15:39:55 -0700999 trace_bcache_btree_node_free(b);
1000
Kent Overstreetcafe5632013-03-23 16:11:31 -07001001 /*
1002 * The BUG_ON() in btree_node_get() implies that we must have a write
1003 * lock on parent to free or even invalidate a node
1004 */
1005 BUG_ON(op->lock <= b->level);
1006 BUG_ON(b == b->c->root);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001007
1008 if (btree_node_dirty(b))
1009 btree_complete_write(b, btree_current_write(b));
1010 clear_bit(BTREE_NODE_dirty, &b->flags);
1011
Kent Overstreetcafe5632013-03-23 16:11:31 -07001012 cancel_delayed_work(&b->work);
1013
1014 mutex_lock(&b->c->bucket_lock);
1015
1016 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1017 BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
1018
1019 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1020 PTR_BUCKET(b->c, &b->key, i));
1021 }
1022
1023 bch_bucket_free(b->c, &b->key);
1024 mca_bucket_free(b);
1025 mutex_unlock(&b->c->bucket_lock);
1026}
1027
1028struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
1029 struct closure *cl)
1030{
1031 BKEY_PADDED(key) k;
1032 struct btree *b = ERR_PTR(-EAGAIN);
1033
1034 mutex_lock(&c->bucket_lock);
1035retry:
1036 if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl))
1037 goto err;
1038
1039 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1040
1041 b = mca_alloc(c, &k.key, level, cl);
1042 if (IS_ERR(b))
1043 goto err_free;
1044
1045 if (!b) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001046 cache_bug(c,
1047 "Tried to allocate bucket that was in btree cache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001048 __bkey_put(c, &k.key);
1049 goto retry;
1050 }
1051
Kent Overstreetcafe5632013-03-23 16:11:31 -07001052 b->accessed = 1;
1053 bch_bset_init_next(b);
1054
1055 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001056
1057 trace_bcache_btree_node_alloc(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001058 return b;
1059err_free:
1060 bch_bucket_free(c, &k.key);
1061 __bkey_put(c, &k.key);
1062err:
1063 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001064
1065 trace_bcache_btree_node_alloc_fail(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001066 return b;
1067}
1068
1069static struct btree *btree_node_alloc_replacement(struct btree *b,
1070 struct closure *cl)
1071{
1072 struct btree *n = bch_btree_node_alloc(b->c, b->level, cl);
1073 if (!IS_ERR_OR_NULL(n))
1074 bch_btree_sort_into(b, n);
1075
1076 return n;
1077}
1078
1079/* Garbage collection */
1080
1081uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1082{
1083 uint8_t stale = 0;
1084 unsigned i;
1085 struct bucket *g;
1086
1087 /*
1088 * ptr_invalid() can't return true for the keys that mark btree nodes as
1089 * freed, but since ptr_bad() returns true we'll never actually use them
1090 * for anything and thus we don't want mark their pointers here
1091 */
1092 if (!bkey_cmp(k, &ZERO_KEY))
1093 return stale;
1094
1095 for (i = 0; i < KEY_PTRS(k); i++) {
1096 if (!ptr_available(c, k, i))
1097 continue;
1098
1099 g = PTR_BUCKET(c, k, i);
1100
1101 if (gen_after(g->gc_gen, PTR_GEN(k, i)))
1102 g->gc_gen = PTR_GEN(k, i);
1103
1104 if (ptr_stale(c, k, i)) {
1105 stale = max(stale, ptr_stale(c, k, i));
1106 continue;
1107 }
1108
1109 cache_bug_on(GC_MARK(g) &&
1110 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1111 c, "inconsistent ptrs: mark = %llu, level = %i",
1112 GC_MARK(g), level);
1113
1114 if (level)
1115 SET_GC_MARK(g, GC_MARK_METADATA);
1116 else if (KEY_DIRTY(k))
1117 SET_GC_MARK(g, GC_MARK_DIRTY);
1118
1119 /* guard against overflow */
1120 SET_GC_SECTORS_USED(g, min_t(unsigned,
1121 GC_SECTORS_USED(g) + KEY_SIZE(k),
1122 (1 << 14) - 1));
1123
1124 BUG_ON(!GC_SECTORS_USED(g));
1125 }
1126
1127 return stale;
1128}
1129
1130#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1131
1132static int btree_gc_mark_node(struct btree *b, unsigned *keys,
1133 struct gc_stat *gc)
1134{
1135 uint8_t stale = 0;
1136 unsigned last_dev = -1;
1137 struct bcache_device *d = NULL;
1138 struct bkey *k;
1139 struct btree_iter iter;
1140 struct bset_tree *t;
1141
1142 gc->nodes++;
1143
1144 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1145 if (last_dev != KEY_INODE(k)) {
1146 last_dev = KEY_INODE(k);
1147
1148 d = KEY_INODE(k) < b->c->nr_uuids
1149 ? b->c->devices[last_dev]
1150 : NULL;
1151 }
1152
1153 stale = max(stale, btree_mark_key(b, k));
1154
1155 if (bch_ptr_bad(b, k))
1156 continue;
1157
1158 *keys += bkey_u64s(k);
1159
1160 gc->key_bytes += bkey_u64s(k);
1161 gc->nkeys++;
1162
1163 gc->data += KEY_SIZE(k);
Kent Overstreet444fc0b2013-05-11 17:07:26 -07001164 if (KEY_DIRTY(k))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001165 gc->dirty += KEY_SIZE(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001166 }
1167
1168 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
1169 btree_bug_on(t->size &&
1170 bset_written(b, t) &&
1171 bkey_cmp(&b->key, &t->end) < 0,
1172 b, "found short btree key in gc");
1173
1174 return stale;
1175}
1176
1177static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
1178 struct btree_op *op)
1179{
1180 /*
1181 * We block priorities from being written for the duration of garbage
1182 * collection, so we can't sleep in btree_alloc() ->
1183 * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
1184 * our closure.
1185 */
1186 struct btree *n = btree_node_alloc_replacement(b, NULL);
1187
1188 if (!IS_ERR_OR_NULL(n)) {
1189 swap(b, n);
Kent Overstreet57943512013-04-25 13:58:35 -07001190 __bkey_put(b->c, &b->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001191
1192 memcpy(k->ptr, b->key.ptr,
1193 sizeof(uint64_t) * KEY_PTRS(&b->key));
1194
Kent Overstreetcafe5632013-03-23 16:11:31 -07001195 btree_node_free(n, op);
1196 up_write(&n->lock);
1197 }
1198
1199 return b;
1200}
1201
1202/*
1203 * Leaving this at 2 until we've got incremental garbage collection done; it
1204 * could be higher (and has been tested with 4) except that garbage collection
1205 * could take much longer, adversely affecting latency.
1206 */
1207#define GC_MERGE_NODES 2U
1208
1209struct gc_merge_info {
1210 struct btree *b;
1211 struct bkey *k;
1212 unsigned keys;
1213};
1214
1215static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
1216 struct gc_stat *gc, struct gc_merge_info *r)
1217{
1218 unsigned nodes = 0, keys = 0, blocks;
1219 int i;
1220
1221 while (nodes < GC_MERGE_NODES && r[nodes].b)
1222 keys += r[nodes++].keys;
1223
1224 blocks = btree_default_blocks(b->c) * 2 / 3;
1225
1226 if (nodes < 2 ||
1227 __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
1228 return;
1229
1230 for (i = nodes - 1; i >= 0; --i) {
1231 if (r[i].b->written)
1232 r[i].b = btree_gc_alloc(r[i].b, r[i].k, op);
1233
1234 if (r[i].b->written)
1235 return;
1236 }
1237
1238 for (i = nodes - 1; i > 0; --i) {
1239 struct bset *n1 = r[i].b->sets->data;
1240 struct bset *n2 = r[i - 1].b->sets->data;
1241 struct bkey *k, *last = NULL;
1242
1243 keys = 0;
1244
1245 if (i == 1) {
1246 /*
1247 * Last node we're not getting rid of - we're getting
1248 * rid of the node at r[0]. Have to try and fit all of
1249 * the remaining keys into this node; we can't ensure
1250 * they will always fit due to rounding and variable
1251 * length keys (shouldn't be possible in practice,
1252 * though)
1253 */
1254 if (__set_blocks(n1, n1->keys + r->keys,
1255 b->c) > btree_blocks(r[i].b))
1256 return;
1257
1258 keys = n2->keys;
1259 last = &r->b->key;
1260 } else
1261 for (k = n2->start;
1262 k < end(n2);
1263 k = bkey_next(k)) {
1264 if (__set_blocks(n1, n1->keys + keys +
1265 bkey_u64s(k), b->c) > blocks)
1266 break;
1267
1268 last = k;
1269 keys += bkey_u64s(k);
1270 }
1271
1272 BUG_ON(__set_blocks(n1, n1->keys + keys,
1273 b->c) > btree_blocks(r[i].b));
1274
1275 if (last) {
1276 bkey_copy_key(&r[i].b->key, last);
1277 bkey_copy_key(r[i].k, last);
1278 }
1279
1280 memcpy(end(n1),
1281 n2->start,
1282 (void *) node(n2, keys) - (void *) n2->start);
1283
1284 n1->keys += keys;
1285
1286 memmove(n2->start,
1287 node(n2, keys),
1288 (void *) end(n2) - (void *) node(n2, keys));
1289
1290 n2->keys -= keys;
1291
1292 r[i].keys = n1->keys;
1293 r[i - 1].keys = n2->keys;
1294 }
1295
1296 btree_node_free(r->b, op);
1297 up_write(&r->b->lock);
1298
Kent Overstreetc37511b2013-04-26 15:39:55 -07001299 trace_bcache_btree_gc_coalesce(nodes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001300
1301 gc->nodes--;
1302 nodes--;
1303
1304 memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
1305 memset(&r[nodes], 0, sizeof(struct gc_merge_info));
1306}
1307
1308static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1309 struct closure *writes, struct gc_stat *gc)
1310{
1311 void write(struct btree *r)
1312 {
1313 if (!r->written)
Kent Overstreet57943512013-04-25 13:58:35 -07001314 bch_btree_node_write(r, &op->cl);
1315 else if (btree_node_dirty(r))
1316 bch_btree_node_write(r, writes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001317
1318 up_write(&r->lock);
1319 }
1320
1321 int ret = 0, stale;
1322 unsigned i;
1323 struct gc_merge_info r[GC_MERGE_NODES];
1324
1325 memset(r, 0, sizeof(r));
1326
1327 while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
1328 r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op);
1329
1330 if (IS_ERR(r->b)) {
1331 ret = PTR_ERR(r->b);
1332 break;
1333 }
1334
1335 r->keys = 0;
1336 stale = btree_gc_mark_node(r->b, &r->keys, gc);
1337
1338 if (!b->written &&
1339 (r->b->level || stale > 10 ||
1340 b->c->gc_always_rewrite))
1341 r->b = btree_gc_alloc(r->b, r->k, op);
1342
1343 if (r->b->level)
1344 ret = btree_gc_recurse(r->b, op, writes, gc);
1345
1346 if (ret) {
1347 write(r->b);
1348 break;
1349 }
1350
1351 bkey_copy_key(&b->c->gc_done, r->k);
1352
1353 if (!b->written)
1354 btree_gc_coalesce(b, op, gc, r);
1355
1356 if (r[GC_MERGE_NODES - 1].b)
1357 write(r[GC_MERGE_NODES - 1].b);
1358
1359 memmove(&r[1], &r[0],
1360 sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
1361
1362 /* When we've got incremental GC working, we'll want to do
1363 * if (should_resched())
1364 * return -EAGAIN;
1365 */
1366 cond_resched();
1367#if 0
1368 if (need_resched()) {
1369 ret = -EAGAIN;
1370 break;
1371 }
1372#endif
1373 }
1374
1375 for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
1376 write(r[i].b);
1377
1378 /* Might have freed some children, must remove their keys */
1379 if (!b->written)
1380 bch_btree_sort(b);
1381
1382 return ret;
1383}
1384
1385static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1386 struct closure *writes, struct gc_stat *gc)
1387{
1388 struct btree *n = NULL;
1389 unsigned keys = 0;
1390 int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
1391
1392 if (b->level || stale > 10)
1393 n = btree_node_alloc_replacement(b, NULL);
1394
1395 if (!IS_ERR_OR_NULL(n))
1396 swap(b, n);
1397
1398 if (b->level)
1399 ret = btree_gc_recurse(b, op, writes, gc);
1400
1401 if (!b->written || btree_node_dirty(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -07001402 bch_btree_node_write(b, n ? &op->cl : NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001403 }
1404
1405 if (!IS_ERR_OR_NULL(n)) {
1406 closure_sync(&op->cl);
1407 bch_btree_set_root(b);
1408 btree_node_free(n, op);
1409 rw_unlock(true, b);
1410 }
1411
1412 return ret;
1413}
1414
1415static void btree_gc_start(struct cache_set *c)
1416{
1417 struct cache *ca;
1418 struct bucket *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001419 unsigned i;
1420
1421 if (!c->gc_mark_valid)
1422 return;
1423
1424 mutex_lock(&c->bucket_lock);
1425
1426 c->gc_mark_valid = 0;
1427 c->gc_done = ZERO_KEY;
1428
1429 for_each_cache(ca, c, i)
1430 for_each_bucket(b, ca) {
1431 b->gc_gen = b->gen;
Kent Overstreet29ebf462013-07-11 19:43:21 -07001432 if (!atomic_read(&b->pin)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001433 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
Kent Overstreet29ebf462013-07-11 19:43:21 -07001434 SET_GC_SECTORS_USED(b, 0);
1435 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001436 }
1437
Kent Overstreetcafe5632013-03-23 16:11:31 -07001438 mutex_unlock(&c->bucket_lock);
1439}
1440
1441size_t bch_btree_gc_finish(struct cache_set *c)
1442{
1443 size_t available = 0;
1444 struct bucket *b;
1445 struct cache *ca;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001446 unsigned i;
1447
1448 mutex_lock(&c->bucket_lock);
1449
1450 set_gc_sectors(c);
1451 c->gc_mark_valid = 1;
1452 c->need_gc = 0;
1453
1454 if (c->root)
1455 for (i = 0; i < KEY_PTRS(&c->root->key); i++)
1456 SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
1457 GC_MARK_METADATA);
1458
1459 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1460 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1461 GC_MARK_METADATA);
1462
1463 for_each_cache(ca, c, i) {
1464 uint64_t *i;
1465
1466 ca->invalidate_needs_gc = 0;
1467
1468 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1469 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1470
1471 for (i = ca->prio_buckets;
1472 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1473 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1474
1475 for_each_bucket(b, ca) {
1476 b->last_gc = b->gc_gen;
1477 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1478
1479 if (!atomic_read(&b->pin) &&
1480 GC_MARK(b) == GC_MARK_RECLAIMABLE) {
1481 available++;
1482 if (!GC_SECTORS_USED(b))
1483 bch_bucket_add_unused(ca, b);
1484 }
1485 }
1486 }
1487
Kent Overstreetcafe5632013-03-23 16:11:31 -07001488 mutex_unlock(&c->bucket_lock);
1489 return available;
1490}
1491
1492static void bch_btree_gc(struct closure *cl)
1493{
1494 struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
1495 int ret;
1496 unsigned long available;
1497 struct gc_stat stats;
1498 struct closure writes;
1499 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001500 uint64_t start_time = local_clock();
Kent Overstreet57943512013-04-25 13:58:35 -07001501
Kent Overstreetc37511b2013-04-26 15:39:55 -07001502 trace_bcache_gc_start(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001503
1504 memset(&stats, 0, sizeof(struct gc_stat));
1505 closure_init_stack(&writes);
1506 bch_btree_op_init_stack(&op);
1507 op.lock = SHRT_MAX;
1508
1509 btree_gc_start(c);
1510
Kent Overstreet57943512013-04-25 13:58:35 -07001511 atomic_inc(&c->prio_blocked);
1512
Kent Overstreetcafe5632013-03-23 16:11:31 -07001513 ret = btree_root(gc_root, c, &op, &writes, &stats);
1514 closure_sync(&op.cl);
1515 closure_sync(&writes);
1516
1517 if (ret) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001518 pr_warn("gc failed!");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001519 continue_at(cl, bch_btree_gc, bch_gc_wq);
1520 }
1521
1522 /* Possibly wait for new UUIDs or whatever to hit disk */
1523 bch_journal_meta(c, &op.cl);
1524 closure_sync(&op.cl);
1525
1526 available = bch_btree_gc_finish(c);
1527
Kent Overstreet57943512013-04-25 13:58:35 -07001528 atomic_dec(&c->prio_blocked);
1529 wake_up_allocators(c);
1530
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001531 bch_time_stats_update(&c->btree_gc_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001532
1533 stats.key_bytes *= sizeof(uint64_t);
1534 stats.dirty <<= 9;
1535 stats.data <<= 9;
1536 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1537 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001538
Kent Overstreetc37511b2013-04-26 15:39:55 -07001539 trace_bcache_gc_end(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001540
1541 continue_at(cl, bch_moving_gc, bch_gc_wq);
1542}
1543
1544void bch_queue_gc(struct cache_set *c)
1545{
1546 closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
1547}
1548
1549/* Initial partial gc */
1550
1551static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
1552 unsigned long **seen)
1553{
1554 int ret;
1555 unsigned i;
1556 struct bkey *k;
1557 struct bucket *g;
1558 struct btree_iter iter;
1559
1560 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1561 for (i = 0; i < KEY_PTRS(k); i++) {
1562 if (!ptr_available(b->c, k, i))
1563 continue;
1564
1565 g = PTR_BUCKET(b->c, k, i);
1566
1567 if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
1568 seen[PTR_DEV(k, i)]) ||
1569 !ptr_stale(b->c, k, i)) {
1570 g->gen = PTR_GEN(k, i);
1571
1572 if (b->level)
1573 g->prio = BTREE_PRIO;
1574 else if (g->prio == BTREE_PRIO)
1575 g->prio = INITIAL_PRIO;
1576 }
1577 }
1578
1579 btree_mark_key(b, k);
1580 }
1581
1582 if (b->level) {
1583 k = bch_next_recurse_key(b, &ZERO_KEY);
1584
1585 while (k) {
1586 struct bkey *p = bch_next_recurse_key(b, k);
1587 if (p)
1588 btree_node_prefetch(b->c, p, b->level - 1);
1589
1590 ret = btree(check_recurse, k, b, op, seen);
1591 if (ret)
1592 return ret;
1593
1594 k = p;
1595 }
1596 }
1597
1598 return 0;
1599}
1600
1601int bch_btree_check(struct cache_set *c, struct btree_op *op)
1602{
1603 int ret = -ENOMEM;
1604 unsigned i;
1605 unsigned long *seen[MAX_CACHES_PER_SET];
1606
1607 memset(seen, 0, sizeof(seen));
1608
1609 for (i = 0; c->cache[i]; i++) {
1610 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
1611 seen[i] = kmalloc(n, GFP_KERNEL);
1612 if (!seen[i])
1613 goto err;
1614
1615 /* Disables the seen array until prio_read() uses it too */
1616 memset(seen[i], 0xFF, n);
1617 }
1618
1619 ret = btree_root(check_recurse, c, op, seen);
1620err:
1621 for (i = 0; i < MAX_CACHES_PER_SET; i++)
1622 kfree(seen[i]);
1623 return ret;
1624}
1625
1626/* Btree insertion */
1627
1628static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
1629{
1630 struct bset *i = b->sets[b->nsets].data;
1631
1632 memmove((uint64_t *) where + bkey_u64s(insert),
1633 where,
1634 (void *) end(i) - (void *) where);
1635
1636 i->keys += bkey_u64s(insert);
1637 bkey_copy(where, insert);
1638 bch_bset_fix_lookup_table(b, where);
1639}
1640
1641static bool fix_overlapping_extents(struct btree *b,
1642 struct bkey *insert,
1643 struct btree_iter *iter,
1644 struct btree_op *op)
1645{
Kent Overstreet279afba2013-06-05 06:21:07 -07001646 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001647 {
Kent Overstreet279afba2013-06-05 06:21:07 -07001648 if (KEY_DIRTY(k))
1649 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1650 offset, -sectors);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001651 }
1652
Kent Overstreet279afba2013-06-05 06:21:07 -07001653 uint64_t old_offset;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001654 unsigned old_size, sectors_found = 0;
1655
1656 while (1) {
1657 struct bkey *k = bch_btree_iter_next(iter);
1658 if (!k ||
1659 bkey_cmp(&START_KEY(k), insert) >= 0)
1660 break;
1661
1662 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1663 continue;
1664
Kent Overstreet279afba2013-06-05 06:21:07 -07001665 old_offset = KEY_START(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001666 old_size = KEY_SIZE(k);
1667
1668 /*
1669 * We might overlap with 0 size extents; we can't skip these
1670 * because if they're in the set we're inserting to we have to
1671 * adjust them so they don't overlap with the key we're
1672 * inserting. But we don't want to check them for BTREE_REPLACE
1673 * operations.
1674 */
1675
1676 if (op->type == BTREE_REPLACE &&
1677 KEY_SIZE(k)) {
1678 /*
1679 * k might have been split since we inserted/found the
1680 * key we're replacing
1681 */
1682 unsigned i;
1683 uint64_t offset = KEY_START(k) -
1684 KEY_START(&op->replace);
1685
1686 /* But it must be a subset of the replace key */
1687 if (KEY_START(k) < KEY_START(&op->replace) ||
1688 KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
1689 goto check_failed;
1690
1691 /* We didn't find a key that we were supposed to */
1692 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1693 goto check_failed;
1694
1695 if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
1696 goto check_failed;
1697
1698 /* skip past gen */
1699 offset <<= 8;
1700
1701 BUG_ON(!KEY_PTRS(&op->replace));
1702
1703 for (i = 0; i < KEY_PTRS(&op->replace); i++)
1704 if (k->ptr[i] != op->replace.ptr[i] + offset)
1705 goto check_failed;
1706
1707 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1708 }
1709
1710 if (bkey_cmp(insert, k) < 0 &&
1711 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1712 /*
1713 * We overlapped in the middle of an existing key: that
1714 * means we have to split the old key. But we have to do
1715 * slightly different things depending on whether the
1716 * old key has been written out yet.
1717 */
1718
1719 struct bkey *top;
1720
Kent Overstreet279afba2013-06-05 06:21:07 -07001721 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001722
1723 if (bkey_written(b, k)) {
1724 /*
1725 * We insert a new key to cover the top of the
1726 * old key, and the old key is modified in place
1727 * to represent the bottom split.
1728 *
1729 * It's completely arbitrary whether the new key
1730 * is the top or the bottom, but it has to match
1731 * up with what btree_sort_fixup() does - it
1732 * doesn't check for this kind of overlap, it
1733 * depends on us inserting a new key for the top
1734 * here.
1735 */
1736 top = bch_bset_search(b, &b->sets[b->nsets],
1737 insert);
1738 shift_keys(b, top, k);
1739 } else {
1740 BKEY_PADDED(key) temp;
1741 bkey_copy(&temp.key, k);
1742 shift_keys(b, k, &temp.key);
1743 top = bkey_next(k);
1744 }
1745
1746 bch_cut_front(insert, top);
1747 bch_cut_back(&START_KEY(insert), k);
1748 bch_bset_fix_invalidated_key(b, k);
1749 return false;
1750 }
1751
1752 if (bkey_cmp(insert, k) < 0) {
1753 bch_cut_front(insert, k);
1754 } else {
Kent Overstreet1fa84552013-11-10 21:55:27 -08001755 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1756 old_offset = KEY_START(insert);
1757
Kent Overstreetcafe5632013-03-23 16:11:31 -07001758 if (bkey_written(b, k) &&
1759 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1760 /*
1761 * Completely overwrote, so we don't have to
1762 * invalidate the binary search tree
1763 */
1764 bch_cut_front(k, k);
1765 } else {
1766 __bch_cut_back(&START_KEY(insert), k);
1767 bch_bset_fix_invalidated_key(b, k);
1768 }
1769 }
1770
Kent Overstreet279afba2013-06-05 06:21:07 -07001771 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001772 }
1773
1774check_failed:
1775 if (op->type == BTREE_REPLACE) {
1776 if (!sectors_found) {
1777 op->insert_collision = true;
1778 return true;
1779 } else if (sectors_found < KEY_SIZE(insert)) {
1780 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1781 (KEY_SIZE(insert) - sectors_found));
1782 SET_KEY_SIZE(insert, sectors_found);
1783 }
1784 }
1785
1786 return false;
1787}
1788
1789static bool btree_insert_key(struct btree *b, struct btree_op *op,
1790 struct bkey *k)
1791{
1792 struct bset *i = b->sets[b->nsets].data;
1793 struct bkey *m, *prev;
Kent Overstreet85b14922013-05-14 20:33:16 -07001794 unsigned status = BTREE_INSERT_STATUS_INSERT;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001795
1796 BUG_ON(bkey_cmp(k, &b->key) > 0);
1797 BUG_ON(b->level && !KEY_PTRS(k));
1798 BUG_ON(!b->level && !KEY_OFFSET(k));
1799
1800 if (!b->level) {
1801 struct btree_iter iter;
1802 struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
1803
1804 /*
1805 * bset_search() returns the first key that is strictly greater
1806 * than the search key - but for back merging, we want to find
1807 * the first key that is greater than or equal to KEY_START(k) -
1808 * unless KEY_START(k) is 0.
1809 */
1810 if (KEY_OFFSET(&search))
1811 SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
1812
1813 prev = NULL;
1814 m = bch_btree_iter_init(b, &iter, &search);
1815
1816 if (fix_overlapping_extents(b, k, &iter, op))
1817 return false;
1818
Kent Overstreet1fa84552013-11-10 21:55:27 -08001819 if (KEY_DIRTY(k))
1820 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1821 KEY_START(k), KEY_SIZE(k));
1822
Kent Overstreetcafe5632013-03-23 16:11:31 -07001823 while (m != end(i) &&
1824 bkey_cmp(k, &START_KEY(m)) > 0)
1825 prev = m, m = bkey_next(m);
1826
1827 if (key_merging_disabled(b->c))
1828 goto insert;
1829
1830 /* prev is in the tree, if we merge we're done */
Kent Overstreet85b14922013-05-14 20:33:16 -07001831 status = BTREE_INSERT_STATUS_BACK_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001832 if (prev &&
1833 bch_bkey_try_merge(b, prev, k))
1834 goto merged;
1835
Kent Overstreet85b14922013-05-14 20:33:16 -07001836 status = BTREE_INSERT_STATUS_OVERWROTE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001837 if (m != end(i) &&
1838 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1839 goto copy;
1840
Kent Overstreet85b14922013-05-14 20:33:16 -07001841 status = BTREE_INSERT_STATUS_FRONT_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001842 if (m != end(i) &&
1843 bch_bkey_try_merge(b, k, m))
1844 goto copy;
1845 } else
1846 m = bch_bset_search(b, &b->sets[b->nsets], k);
1847
1848insert: shift_keys(b, m, k);
1849copy: bkey_copy(m, k);
1850merged:
Kent Overstreet85b14922013-05-14 20:33:16 -07001851 bch_check_keys(b, "%u for %s", status, op_type(op));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001852
1853 if (b->level && !KEY_OFFSET(k))
Kent Overstreet57943512013-04-25 13:58:35 -07001854 btree_current_write(b)->prio_blocked++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001855
Kent Overstreet85b14922013-05-14 20:33:16 -07001856 trace_bcache_btree_insert_key(b, k, op->type, status);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001857
1858 return true;
1859}
1860
Kent Overstreet26c949f2013-09-10 18:41:15 -07001861static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1862 struct keylist *insert_keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001863{
1864 bool ret = false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001865 unsigned oldsize = bch_count_data(b);
1866
Kent Overstreet26c949f2013-09-10 18:41:15 -07001867 while (!bch_keylist_empty(insert_keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07001868 struct bset *i = write_block(b);
Kent Overstreet26c949f2013-09-10 18:41:15 -07001869 struct bkey *k = insert_keys->bottom;
1870
Kent Overstreet403b6cd2013-07-24 17:22:44 -07001871 if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
1872 > btree_blocks(b))
1873 break;
1874
1875 if (bkey_cmp(k, &b->key) <= 0) {
Kent Overstreet26c949f2013-09-10 18:41:15 -07001876 bkey_put(b->c, k, b->level);
1877
1878 ret |= btree_insert_key(b, op, k);
1879 bch_keylist_pop_front(insert_keys);
1880 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1881#if 0
1882 if (op->type == BTREE_REPLACE) {
1883 bkey_put(b->c, k, b->level);
1884 bch_keylist_pop_front(insert_keys);
1885 op->insert_collision = true;
1886 break;
1887 }
1888#endif
1889 BKEY_PADDED(key) temp;
1890 bkey_copy(&temp.key, insert_keys->bottom);
1891
1892 bch_cut_back(&b->key, &temp.key);
1893 bch_cut_front(&b->key, insert_keys->bottom);
1894
1895 ret |= btree_insert_key(b, op, &temp.key);
1896 break;
1897 } else {
1898 break;
1899 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001900 }
1901
Kent Overstreet403b6cd2013-07-24 17:22:44 -07001902 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1903
Kent Overstreetcafe5632013-03-23 16:11:31 -07001904 BUG_ON(bch_count_data(b) < oldsize);
1905 return ret;
1906}
1907
Kent Overstreet26c949f2013-09-10 18:41:15 -07001908static int btree_split(struct btree *b, struct btree_op *op,
1909 struct keylist *insert_keys,
1910 struct keylist *parent_keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001911{
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07001912 bool split;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001913 struct btree *n1, *n2 = NULL, *n3 = NULL;
1914 uint64_t start_time = local_clock();
1915
1916 if (b->level)
1917 set_closure_blocking(&op->cl);
1918
1919 n1 = btree_node_alloc_replacement(b, &op->cl);
1920 if (IS_ERR(n1))
1921 goto err;
1922
1923 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
1924
Kent Overstreetcafe5632013-03-23 16:11:31 -07001925 if (split) {
1926 unsigned keys = 0;
1927
Kent Overstreetc37511b2013-04-26 15:39:55 -07001928 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
1929
Kent Overstreetcafe5632013-03-23 16:11:31 -07001930 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
1931 if (IS_ERR(n2))
1932 goto err_free1;
1933
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07001934 if (!b->parent) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001935 n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
1936 if (IS_ERR(n3))
1937 goto err_free2;
1938 }
1939
Kent Overstreet26c949f2013-09-10 18:41:15 -07001940 bch_btree_insert_keys(n1, op, insert_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001941
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07001942 /*
1943 * Has to be a linear search because we don't have an auxiliary
Kent Overstreetcafe5632013-03-23 16:11:31 -07001944 * search tree yet
1945 */
1946
1947 while (keys < (n1->sets[0].data->keys * 3) / 5)
1948 keys += bkey_u64s(node(n1->sets[0].data, keys));
1949
1950 bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
1951 keys += bkey_u64s(node(n1->sets[0].data, keys));
1952
1953 n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
1954 n1->sets[0].data->keys = keys;
1955
1956 memcpy(n2->sets[0].data->start,
1957 end(n1->sets[0].data),
1958 n2->sets[0].data->keys * sizeof(uint64_t));
1959
1960 bkey_copy_key(&n2->key, &b->key);
1961
Kent Overstreet26c949f2013-09-10 18:41:15 -07001962 bch_keylist_add(parent_keys, &n2->key);
Kent Overstreet57943512013-04-25 13:58:35 -07001963 bch_btree_node_write(n2, &op->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001964 rw_unlock(true, n2);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001965 } else {
1966 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
1967
Kent Overstreet26c949f2013-09-10 18:41:15 -07001968 bch_btree_insert_keys(n1, op, insert_keys);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001969 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001970
Kent Overstreet26c949f2013-09-10 18:41:15 -07001971 bch_keylist_add(parent_keys, &n1->key);
Kent Overstreet57943512013-04-25 13:58:35 -07001972 bch_btree_node_write(n1, &op->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001973
1974 if (n3) {
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07001975 /* Depth increases, make a new root */
1976
Kent Overstreetcafe5632013-03-23 16:11:31 -07001977 bkey_copy_key(&n3->key, &MAX_KEY);
Kent Overstreet26c949f2013-09-10 18:41:15 -07001978 bch_btree_insert_keys(n3, op, parent_keys);
Kent Overstreet57943512013-04-25 13:58:35 -07001979 bch_btree_node_write(n3, &op->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001980
1981 closure_sync(&op->cl);
1982 bch_btree_set_root(n3);
1983 rw_unlock(true, n3);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07001984 } else if (!b->parent) {
1985 /* Root filled up but didn't need to be split */
1986
Kent Overstreet26c949f2013-09-10 18:41:15 -07001987 parent_keys->top = parent_keys->bottom;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001988 closure_sync(&op->cl);
1989 bch_btree_set_root(n1);
1990 } else {
1991 unsigned i;
1992
Kent Overstreet26c949f2013-09-10 18:41:15 -07001993 bkey_copy(parent_keys->top, &b->key);
1994 bkey_copy_key(parent_keys->top, &ZERO_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001995
1996 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1997 uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
1998
Kent Overstreet26c949f2013-09-10 18:41:15 -07001999 SET_PTR_GEN(parent_keys->top, i, g);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002000 }
2001
Kent Overstreet26c949f2013-09-10 18:41:15 -07002002 bch_keylist_push(parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002003 closure_sync(&op->cl);
2004 atomic_inc(&b->c->prio_blocked);
2005 }
2006
2007 rw_unlock(true, n1);
2008 btree_node_free(b, op);
2009
Kent Overstreet169ef1c2013-03-28 12:50:55 -06002010 bch_time_stats_update(&b->c->btree_split_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002011
2012 return 0;
2013err_free2:
2014 __bkey_put(n2->c, &n2->key);
2015 btree_node_free(n2, op);
2016 rw_unlock(true, n2);
2017err_free1:
2018 __bkey_put(n1->c, &n1->key);
2019 btree_node_free(n1, op);
2020 rw_unlock(true, n1);
2021err:
2022 if (n3 == ERR_PTR(-EAGAIN) ||
2023 n2 == ERR_PTR(-EAGAIN) ||
2024 n1 == ERR_PTR(-EAGAIN))
2025 return -EAGAIN;
2026
2027 pr_warn("couldn't split");
2028 return -ENOMEM;
2029}
2030
Kent Overstreet26c949f2013-09-10 18:41:15 -07002031static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2032 struct keylist *insert_keys)
2033{
2034 int ret = 0;
2035 struct keylist split_keys;
2036
2037 bch_keylist_init(&split_keys);
2038
2039 BUG_ON(b->level);
2040
2041 do {
2042 if (should_split(b)) {
2043 if (current->bio_list) {
2044 op->lock = b->c->root->level + 1;
2045 ret = -EAGAIN;
2046 } else if (op->lock <= b->c->root->level) {
2047 op->lock = b->c->root->level + 1;
2048 ret = -EINTR;
2049 } else {
2050 struct btree *parent = b->parent;
2051
2052 ret = btree_split(b, op, insert_keys,
2053 &split_keys);
2054 insert_keys = &split_keys;
2055 b = parent;
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002056 if (!ret)
2057 ret = -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002058 }
2059 } else {
2060 BUG_ON(write_block(b) != b->sets[b->nsets].data);
2061
2062 if (bch_btree_insert_keys(b, op, insert_keys)) {
2063 if (!b->level)
2064 bch_btree_leaf_dirty(b, op);
2065 else
2066 bch_btree_node_write(b, &op->cl);
2067 }
2068 }
2069 } while (!bch_keylist_empty(&split_keys));
2070
2071 return ret;
2072}
2073
Kent Overstreete7c590e2013-09-10 18:39:16 -07002074int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2075 struct bkey *check_key)
2076{
2077 int ret = -EINTR;
2078 uint64_t btree_ptr = b->key.ptr[0];
2079 unsigned long seq = b->seq;
2080 struct keylist insert;
2081 bool upgrade = op->lock == -1;
2082
2083 bch_keylist_init(&insert);
2084
2085 if (upgrade) {
2086 rw_unlock(false, b);
2087 rw_lock(true, b, b->level);
2088
2089 if (b->key.ptr[0] != btree_ptr ||
2090 b->seq != seq + 1)
2091 goto out;
2092 }
2093
2094 SET_KEY_PTRS(check_key, 1);
2095 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2096
2097 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2098
2099 bch_keylist_add(&insert, check_key);
2100
2101 BUG_ON(op->type != BTREE_INSERT);
2102
2103 ret = bch_btree_insert_node(b, op, &insert);
2104
2105 BUG_ON(!ret && !bch_keylist_empty(&insert));
2106out:
2107 if (upgrade)
2108 downgrade_write(&b->lock);
2109 return ret;
2110}
2111
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002112static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
2113 struct keylist *keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002114{
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002115 if (bch_keylist_empty(keys))
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002116 return 0;
2117
Kent Overstreetcafe5632013-03-23 16:11:31 -07002118 if (b->level) {
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002119 struct bkey *k;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002120
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002121 k = bch_next_recurse_key(b, &START_KEY(keys->bottom));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002122 if (!k) {
2123 btree_bug(b, "no key to recurse on at level %i/%i",
2124 b->level, b->c->root->level);
2125
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002126 keys->top = keys->bottom;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002127 return -EIO;
2128 }
2129
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002130 return btree(insert_recurse, k, b, op, keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002131 } else {
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002132 return bch_btree_insert_node(b, op, keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002133 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002134}
2135
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002136int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2137 struct keylist *keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002138{
2139 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002140
2141 /*
2142 * Don't want to block with the btree locked unless we have to,
2143 * otherwise we get deadlocks with try_harder and between split/gc
2144 */
2145 clear_closure_blocking(&op->cl);
2146
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002147 BUG_ON(bch_keylist_empty(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002148
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002149 while (!bch_keylist_empty(keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002150 op->lock = 0;
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002151 ret = btree_root(insert_recurse, c, op, keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002152
2153 if (ret == -EAGAIN) {
2154 ret = 0;
2155 closure_sync(&op->cl);
2156 } else if (ret) {
2157 struct bkey *k;
2158
2159 pr_err("error %i trying to insert key for %s",
2160 ret, op_type(op));
2161
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002162 while ((k = bch_keylist_pop(keys)))
Kent Overstreetcafe5632013-03-23 16:11:31 -07002163 bkey_put(c, k, 0);
2164 }
2165 }
2166
Kent Overstreetcafe5632013-03-23 16:11:31 -07002167 if (op->journal)
2168 atomic_dec_bug(op->journal);
2169 op->journal = NULL;
2170 return ret;
2171}
2172
2173void bch_btree_set_root(struct btree *b)
2174{
2175 unsigned i;
Kent Overstreete49c7c32013-06-26 17:25:38 -07002176 struct closure cl;
2177
2178 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002179
Kent Overstreetc37511b2013-04-26 15:39:55 -07002180 trace_bcache_btree_set_root(b);
2181
Kent Overstreetcafe5632013-03-23 16:11:31 -07002182 BUG_ON(!b->written);
2183
2184 for (i = 0; i < KEY_PTRS(&b->key); i++)
2185 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2186
2187 mutex_lock(&b->c->bucket_lock);
2188 list_del_init(&b->list);
2189 mutex_unlock(&b->c->bucket_lock);
2190
2191 b->c->root = b;
2192 __bkey_put(b->c, &b->key);
2193
Kent Overstreete49c7c32013-06-26 17:25:38 -07002194 bch_journal_meta(b->c, &cl);
2195 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002196}
2197
2198/* Cache lookup */
2199
2200static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
2201 struct bkey *k)
2202{
2203 struct search *s = container_of(op, struct search, op);
2204 struct bio *bio = &s->bio.bio;
2205 int ret = 0;
2206
2207 while (!ret &&
2208 !op->lookup_done) {
2209 unsigned sectors = INT_MAX;
2210
2211 if (KEY_INODE(k) == op->inode) {
2212 if (KEY_START(k) <= bio->bi_sector)
2213 break;
2214
2215 sectors = min_t(uint64_t, sectors,
2216 KEY_START(k) - bio->bi_sector);
2217 }
2218
2219 ret = s->d->cache_miss(b, s, bio, sectors);
2220 }
2221
2222 return ret;
2223}
2224
2225/*
2226 * Read from a single key, handling the initial cache miss if the key starts in
2227 * the middle of the bio
2228 */
2229static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2230 struct bkey *k)
2231{
2232 struct search *s = container_of(op, struct search, op);
2233 struct bio *bio = &s->bio.bio;
2234 unsigned ptr;
2235 struct bio *n;
2236
2237 int ret = submit_partial_cache_miss(b, op, k);
2238 if (ret || op->lookup_done)
2239 return ret;
2240
2241 /* XXX: figure out best pointer - for multiple cache devices */
2242 ptr = 0;
2243
2244 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
2245
2246 while (!op->lookup_done &&
2247 KEY_INODE(k) == op->inode &&
2248 bio->bi_sector < KEY_OFFSET(k)) {
2249 struct bkey *bio_key;
2250 sector_t sector = PTR_OFFSET(k, ptr) +
2251 (bio->bi_sector - KEY_START(k));
2252 unsigned sectors = min_t(uint64_t, INT_MAX,
2253 KEY_OFFSET(k) - bio->bi_sector);
2254
2255 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002256 if (n == bio)
2257 op->lookup_done = true;
2258
2259 bio_key = &container_of(n, struct bbio, bio)->key;
2260
2261 /*
2262 * The bucket we're reading from might be reused while our bio
2263 * is in flight, and we could then end up reading the wrong
2264 * data.
2265 *
2266 * We guard against this by checking (in cache_read_endio()) if
2267 * the pointer is stale again; if so, we treat it as an error
2268 * and reread from the backing device (but we don't pass that
2269 * error up anywhere).
2270 */
2271
2272 bch_bkey_copy_single_ptr(bio_key, k, ptr);
2273 SET_PTR_OFFSET(bio_key, 0, sector);
2274
2275 n->bi_end_io = bch_cache_read_endio;
2276 n->bi_private = &s->cl;
2277
Kent Overstreetcafe5632013-03-23 16:11:31 -07002278 __bch_submit_bbio(n, b->c);
2279 }
2280
2281 return 0;
2282}
2283
2284int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2285{
2286 struct search *s = container_of(op, struct search, op);
2287 struct bio *bio = &s->bio.bio;
2288
2289 int ret = 0;
2290 struct bkey *k;
2291 struct btree_iter iter;
2292 bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
2293
Kent Overstreetcafe5632013-03-23 16:11:31 -07002294 do {
2295 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
2296 if (!k) {
2297 /*
2298 * b->key would be exactly what we want, except that
2299 * pointers to btree nodes have nonzero size - we
2300 * wouldn't go far enough
2301 */
2302
2303 ret = submit_partial_cache_miss(b, op,
2304 &KEY(KEY_INODE(&b->key),
2305 KEY_OFFSET(&b->key), 0));
2306 break;
2307 }
2308
2309 ret = b->level
2310 ? btree(search_recurse, k, b, op)
2311 : submit_partial_cache_hit(b, op, k);
2312 } while (!ret &&
2313 !op->lookup_done);
2314
2315 return ret;
2316}
2317
2318/* Keybuf code */
2319
2320static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2321{
2322 /* Overlapping keys compare equal */
2323 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2324 return -1;
2325 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2326 return 1;
2327 return 0;
2328}
2329
2330static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2331 struct keybuf_key *r)
2332{
2333 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2334}
2335
2336static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
Kent Overstreet72c27062013-06-05 06:24:39 -07002337 struct keybuf *buf, struct bkey *end,
2338 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002339{
2340 struct btree_iter iter;
2341 bch_btree_iter_init(b, &iter, &buf->last_scanned);
2342
2343 while (!array_freelist_empty(&buf->freelist)) {
2344 struct bkey *k = bch_btree_iter_next_filter(&iter, b,
2345 bch_ptr_bad);
2346
2347 if (!b->level) {
2348 if (!k) {
2349 buf->last_scanned = b->key;
2350 break;
2351 }
2352
2353 buf->last_scanned = *k;
2354 if (bkey_cmp(&buf->last_scanned, end) >= 0)
2355 break;
2356
Kent Overstreet72c27062013-06-05 06:24:39 -07002357 if (pred(buf, k)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07002358 struct keybuf_key *w;
2359
Kent Overstreetcafe5632013-03-23 16:11:31 -07002360 spin_lock(&buf->lock);
2361
2362 w = array_alloc(&buf->freelist);
2363
2364 w->private = NULL;
2365 bkey_copy(&w->key, k);
2366
2367 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2368 array_free(&buf->freelist, w);
2369
2370 spin_unlock(&buf->lock);
2371 }
2372 } else {
2373 if (!k)
2374 break;
2375
Kent Overstreet72c27062013-06-05 06:24:39 -07002376 btree(refill_keybuf, k, b, op, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002377 /*
2378 * Might get an error here, but can't really do anything
2379 * and it'll get logged elsewhere. Just read what we
2380 * can.
2381 */
2382
2383 if (bkey_cmp(&buf->last_scanned, end) >= 0)
2384 break;
2385
2386 cond_resched();
2387 }
2388 }
2389
2390 return 0;
2391}
2392
2393void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002394 struct bkey *end, keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002395{
2396 struct bkey start = buf->last_scanned;
2397 struct btree_op op;
2398 bch_btree_op_init_stack(&op);
2399
2400 cond_resched();
2401
Kent Overstreet72c27062013-06-05 06:24:39 -07002402 btree_root(refill_keybuf, c, &op, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002403 closure_sync(&op.cl);
2404
2405 pr_debug("found %s keys from %llu:%llu to %llu:%llu",
2406 RB_EMPTY_ROOT(&buf->keys) ? "no" :
2407 array_freelist_empty(&buf->freelist) ? "some" : "a few",
2408 KEY_INODE(&start), KEY_OFFSET(&start),
2409 KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
2410
2411 spin_lock(&buf->lock);
2412
2413 if (!RB_EMPTY_ROOT(&buf->keys)) {
2414 struct keybuf_key *w;
2415 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2416 buf->start = START_KEY(&w->key);
2417
2418 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2419 buf->end = w->key;
2420 } else {
2421 buf->start = MAX_KEY;
2422 buf->end = MAX_KEY;
2423 }
2424
2425 spin_unlock(&buf->lock);
2426}
2427
2428static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2429{
2430 rb_erase(&w->node, &buf->keys);
2431 array_free(&buf->freelist, w);
2432}
2433
2434void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2435{
2436 spin_lock(&buf->lock);
2437 __bch_keybuf_del(buf, w);
2438 spin_unlock(&buf->lock);
2439}
2440
2441bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2442 struct bkey *end)
2443{
2444 bool ret = false;
2445 struct keybuf_key *p, *w, s;
2446 s.key = *start;
2447
2448 if (bkey_cmp(end, &buf->start) <= 0 ||
2449 bkey_cmp(start, &buf->end) >= 0)
2450 return false;
2451
2452 spin_lock(&buf->lock);
2453 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2454
2455 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2456 p = w;
2457 w = RB_NEXT(w, node);
2458
2459 if (p->private)
2460 ret = true;
2461 else
2462 __bch_keybuf_del(buf, p);
2463 }
2464
2465 spin_unlock(&buf->lock);
2466 return ret;
2467}
2468
2469struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2470{
2471 struct keybuf_key *w;
2472 spin_lock(&buf->lock);
2473
2474 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2475
2476 while (w && w->private)
2477 w = RB_NEXT(w, node);
2478
2479 if (w)
2480 w->private = ERR_PTR(-EINTR);
2481
2482 spin_unlock(&buf->lock);
2483 return w;
2484}
2485
2486struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2487 struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002488 struct bkey *end,
2489 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002490{
2491 struct keybuf_key *ret;
2492
2493 while (1) {
2494 ret = bch_keybuf_next(buf);
2495 if (ret)
2496 break;
2497
2498 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2499 pr_debug("scan finished");
2500 break;
2501 }
2502
Kent Overstreet72c27062013-06-05 06:24:39 -07002503 bch_refill_keybuf(c, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002504 }
2505
2506 return ret;
2507}
2508
Kent Overstreet72c27062013-06-05 06:24:39 -07002509void bch_keybuf_init(struct keybuf *buf)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002510{
Kent Overstreetcafe5632013-03-23 16:11:31 -07002511 buf->last_scanned = MAX_KEY;
2512 buf->keys = RB_ROOT;
2513
2514 spin_lock_init(&buf->lock);
2515 array_allocator_init(&buf->freelist);
2516}
2517
2518void bch_btree_exit(void)
2519{
2520 if (btree_io_wq)
2521 destroy_workqueue(btree_io_wq);
2522 if (bch_gc_wq)
2523 destroy_workqueue(bch_gc_wq);
2524}
2525
2526int __init bch_btree_init(void)
2527{
2528 if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
2529 !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
2530 return -ENOMEM;
2531
2532 return 0;
2533}