blob: 31bb53fcc67a40806cf73659a596f98297d36128 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070026#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070027
28#include <linux/slab.h>
29#include <linux/bitops.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070030#include <linux/freezer.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070031#include <linux/hash.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070032#include <linux/kthread.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010033#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070034#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <trace/events/bcache.h>
37
38/*
39 * Todo:
40 * register_bcache: Return errors out to userspace correctly
41 *
42 * Writeback: don't undirty key until after a cache flush
43 *
44 * Create an iterator for key pointers
45 *
46 * On btree write error, mark bucket such that it won't be freed from the cache
47 *
48 * Journalling:
49 * Check for bad keys in replay
50 * Propagate barriers
51 * Refcount journal entries in journal_replay
52 *
53 * Garbage collection:
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
56 *
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
60 *
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
63 * from being starved
64 *
65 * Add a tracepoint or somesuch to watch for writeback starvation
66 *
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious.
70 *
71 * Make sure all allocations get charged to the root cgroup
72 *
73 * Plugging?
74 *
75 * If data write is less than hard sector size of ssd, round up offset in open
76 * bucket to the next whole sector
77 *
78 * Also lookup by cgroup in get_open_bucket()
79 *
80 * Superblock needs to be fleshed out for multiple cache devices
81 *
82 * Add a sysfs tunable for the number of writeback IOs in flight
83 *
84 * Add a sysfs tunable for the number of open data buckets
85 *
86 * IO tracking: Can we track when one process is doing io on behalf of another?
87 * IO tracking: Don't use just an average, weigh more recent stuff higher
88 *
89 * Test module load/unload
90 */
91
Kent Overstreetdf8e8972013-07-24 17:37:59 -070092enum {
93 BTREE_INSERT_STATUS_INSERT,
94 BTREE_INSERT_STATUS_BACK_MERGE,
95 BTREE_INSERT_STATUS_OVERWROTE,
96 BTREE_INSERT_STATUS_FRONT_MERGE,
97};
98
Kent Overstreetcafe5632013-03-23 16:11:31 -070099#define MAX_NEED_GC 64
100#define MAX_SAVE_PRIO 72
101
102#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
103
104#define PTR_HASH(c, k) \
105 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
106
Kent Overstreetcafe5632013-03-23 16:11:31 -0700107static struct workqueue_struct *btree_io_wq;
108
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700109static inline bool should_split(struct btree *b)
110{
111 struct bset *i = write_block(b);
112 return b->written >= btree_blocks(b) ||
113 (b->written + __set_blocks(i, i->keys + 15, b->c)
114 > btree_blocks(b));
115}
116
117#define insert_lock(s, b) ((b)->level <= (s)->lock)
118
119/*
120 * These macros are for recursing down the btree - they handle the details of
121 * locking and looking up nodes in the cache for you. They're best treated as
122 * mere syntax when reading code that uses them.
123 *
124 * op->lock determines whether we take a read or a write lock at a given depth.
125 * If you've got a read lock and find that you need a write lock (i.e. you're
126 * going to have to split), set op->lock and return -EINTR; btree_root() will
127 * call you again and you'll have the correct lock.
128 */
129
130/**
131 * btree - recurse down the btree on a specified key
132 * @fn: function to call, which will be passed the child node
133 * @key: key to recurse on
134 * @b: parent btree node
135 * @op: pointer to struct btree_op
136 */
137#define btree(fn, key, b, op, ...) \
138({ \
139 int _r, l = (b)->level - 1; \
140 bool _w = l <= (op)->lock; \
141 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
142 if (!IS_ERR(_child)) { \
143 _child->parent = (b); \
144 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
145 rw_unlock(_w, _child); \
146 } else \
147 _r = PTR_ERR(_child); \
148 _r; \
149})
150
151/**
152 * btree_root - call a function on the root of the btree
153 * @fn: function to call, which will be passed the child node
154 * @c: cache set
155 * @op: pointer to struct btree_op
156 */
157#define btree_root(fn, c, op, ...) \
158({ \
159 int _r = -EINTR; \
160 do { \
161 struct btree *_b = (c)->root; \
162 bool _w = insert_lock(op, _b); \
163 rw_lock(_w, _b, _b->level); \
164 if (_b == (c)->root && \
165 _w == insert_lock(op, _b)) { \
166 _b->parent = NULL; \
167 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
168 } \
169 rw_unlock(_w, _b); \
170 bch_cannibalize_unlock(c); \
171 if (_r == -ENOSPC) { \
172 wait_event((c)->try_wait, \
173 !(c)->try_harder); \
174 _r = -EINTR; \
175 } \
176 } while (_r == -EINTR); \
177 \
178 _r; \
179})
180
Kent Overstreetcafe5632013-03-23 16:11:31 -0700181/* Btree key manipulation */
182
Kent Overstreet3a3b6a42013-07-24 16:46:42 -0700183void bkey_put(struct cache_set *c, struct bkey *k)
Kent Overstreete7c590e2013-09-10 18:39:16 -0700184{
185 unsigned i;
186
187 for (i = 0; i < KEY_PTRS(k); i++)
188 if (ptr_available(c, k, i))
189 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
190}
191
Kent Overstreetcafe5632013-03-23 16:11:31 -0700192/* Btree IO */
193
194static uint64_t btree_csum_set(struct btree *b, struct bset *i)
195{
196 uint64_t crc = b->key.ptr[0];
197 void *data = (void *) i + 8, *end = end(i);
198
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600199 crc = bch_crc64_update(crc, data, end - data);
Kent Overstreetc19ed232013-03-26 13:49:02 -0700200 return crc ^ 0xffffffffffffffffULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700201}
202
Kent Overstreetf3059a52013-05-15 17:13:45 -0700203static void bch_btree_node_read_done(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700204{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700205 const char *err = "bad btree header";
Kent Overstreet57943512013-04-25 13:58:35 -0700206 struct bset *i = b->sets[0].data;
207 struct btree_iter *iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700208
Kent Overstreet57943512013-04-25 13:58:35 -0700209 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
210 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700211 iter->used = 0;
212
Kent Overstreet280481d2013-10-24 16:36:03 -0700213#ifdef CONFIG_BCACHE_DEBUG
214 iter->b = b;
215#endif
216
Kent Overstreet57943512013-04-25 13:58:35 -0700217 if (!i->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700218 goto err;
219
220 for (;
221 b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
222 i = write_block(b)) {
223 err = "unsupported bset version";
224 if (i->version > BCACHE_BSET_VERSION)
225 goto err;
226
227 err = "bad btree header";
228 if (b->written + set_blocks(i, b->c) > btree_blocks(b))
229 goto err;
230
231 err = "bad magic";
Kent Overstreet81ab4192013-10-31 15:46:42 -0700232 if (i->magic != bset_magic(&b->c->sb))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700233 goto err;
234
235 err = "bad checksum";
236 switch (i->version) {
237 case 0:
238 if (i->csum != csum_set(i))
239 goto err;
240 break;
241 case BCACHE_BSET_VERSION:
242 if (i->csum != btree_csum_set(b, i))
243 goto err;
244 break;
245 }
246
247 err = "empty set";
248 if (i != b->sets[0].data && !i->keys)
249 goto err;
250
251 bch_btree_iter_push(iter, i->start, end(i));
252
253 b->written += set_blocks(i, b->c);
254 }
255
256 err = "corrupted btree";
257 for (i = write_block(b);
258 index(i, b) < btree_blocks(b);
259 i = ((void *) i) + block_bytes(b->c))
260 if (i->seq == b->sets[0].data->seq)
261 goto err;
262
263 bch_btree_sort_and_fix_extents(b, iter);
264
265 i = b->sets[0].data;
266 err = "short btree key";
267 if (b->sets[0].size &&
268 bkey_cmp(&b->key, &b->sets[0].end) < 0)
269 goto err;
270
271 if (b->written < btree_blocks(b))
272 bch_bset_init_next(b);
273out:
Kent Overstreet57943512013-04-25 13:58:35 -0700274 mempool_free(iter, b->c->fill_iter);
275 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700276err:
277 set_btree_node_io_error(b);
Kent Overstreet07e86cc2013-03-25 11:46:43 -0700278 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
Kent Overstreetcafe5632013-03-23 16:11:31 -0700279 err, PTR_BUCKET_NR(b->c, &b->key, 0),
280 index(i, b), i->keys);
281 goto out;
282}
283
Kent Overstreet57943512013-04-25 13:58:35 -0700284static void btree_node_read_endio(struct bio *bio, int error)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700285{
Kent Overstreet57943512013-04-25 13:58:35 -0700286 struct closure *cl = bio->bi_private;
287 closure_put(cl);
288}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700289
Kent Overstreet57943512013-04-25 13:58:35 -0700290void bch_btree_node_read(struct btree *b)
291{
292 uint64_t start_time = local_clock();
293 struct closure cl;
294 struct bio *bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700295
Kent Overstreetc37511b2013-04-26 15:39:55 -0700296 trace_bcache_btree_read(b);
297
Kent Overstreet57943512013-04-25 13:58:35 -0700298 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700299
Kent Overstreet57943512013-04-25 13:58:35 -0700300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC;
302 bio->bi_size = KEY_SIZE(&b->key) << 9;
303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl;
305
306 bch_bio_map(bio, b->sets[0].data);
307
Kent Overstreet57943512013-04-25 13:58:35 -0700308 bch_submit_bbio(bio, b->c, &b->key, 0);
309 closure_sync(&cl);
310
311 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
312 set_btree_node_io_error(b);
313
314 bch_bbio_free(bio, b->c);
315
316 if (btree_node_io_error(b))
317 goto err;
318
319 bch_btree_node_read_done(b);
Kent Overstreet57943512013-04-25 13:58:35 -0700320 bch_time_stats_update(&b->c->btree_read_time, start_time);
Kent Overstreet57943512013-04-25 13:58:35 -0700321
322 return;
323err:
Geert Uytterhoeven61cbd252013-09-23 23:17:30 -0700324 bch_cache_set_error(b->c, "io error reading bucket %zu",
Kent Overstreet57943512013-04-25 13:58:35 -0700325 PTR_BUCKET_NR(b->c, &b->key, 0));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700326}
327
328static void btree_complete_write(struct btree *b, struct btree_write *w)
329{
330 if (w->prio_blocked &&
331 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700332 wake_up_allocators(b->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700333
334 if (w->journal) {
335 atomic_dec_bug(w->journal);
336 __closure_wake_up(&b->c->journal.wait);
337 }
338
Kent Overstreetcafe5632013-03-23 16:11:31 -0700339 w->prio_blocked = 0;
340 w->journal = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700341}
342
Kent Overstreet57943512013-04-25 13:58:35 -0700343static void __btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700344{
345 struct btree *b = container_of(cl, struct btree, io.cl);
346 struct btree_write *w = btree_prev_write(b);
347
348 bch_bbio_free(b->bio, b->c);
349 b->bio = NULL;
350 btree_complete_write(b, w);
351
352 if (btree_node_dirty(b))
353 queue_delayed_work(btree_io_wq, &b->work,
354 msecs_to_jiffies(30000));
355
356 closure_return(cl);
357}
358
Kent Overstreet57943512013-04-25 13:58:35 -0700359static void btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700360{
361 struct btree *b = container_of(cl, struct btree, io.cl);
362 struct bio_vec *bv;
363 int n;
364
365 __bio_for_each_segment(bv, b->bio, n, 0)
366 __free_page(bv->bv_page);
367
Kent Overstreet57943512013-04-25 13:58:35 -0700368 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700369}
370
Kent Overstreet57943512013-04-25 13:58:35 -0700371static void btree_node_write_endio(struct bio *bio, int error)
372{
373 struct closure *cl = bio->bi_private;
374 struct btree *b = container_of(cl, struct btree, io.cl);
375
376 if (error)
377 set_btree_node_io_error(b);
378
379 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
380 closure_put(cl);
381}
382
383static void do_btree_node_write(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700384{
385 struct closure *cl = &b->io.cl;
386 struct bset *i = b->sets[b->nsets].data;
387 BKEY_PADDED(key) k;
388
389 i->version = BCACHE_BSET_VERSION;
390 i->csum = btree_csum_set(b, i);
391
Kent Overstreet57943512013-04-25 13:58:35 -0700392 BUG_ON(b->bio);
393 b->bio = bch_bbio_alloc(b->c);
394
395 b->bio->bi_end_io = btree_node_write_endio;
Kent Overstreetfaadf0c2013-11-01 18:03:08 -0700396 b->bio->bi_private = cl;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
398 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600399 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700400
Kent Overstreete49c7c32013-06-26 17:25:38 -0700401 /*
402 * If we're appending to a leaf node, we don't technically need FUA -
403 * this write just needs to be persisted before the next journal write,
404 * which will be marked FLUSH|FUA.
405 *
406 * Similarly if we're writing a new btree root - the pointer is going to
407 * be in the next journal entry.
408 *
409 * But if we're writing a new btree node (that isn't a root) or
410 * appending to a non leaf btree node, we need either FUA or a flush
411 * when we write the parent with the new pointer. FUA is cheaper than a
412 * flush, and writes appending to leaf nodes aren't blocking anything so
413 * just make all btree node writes FUA to keep things sane.
414 */
415
Kent Overstreetcafe5632013-03-23 16:11:31 -0700416 bkey_copy(&k.key, &b->key);
417 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
418
Kent Overstreet8e51e412013-06-06 18:15:57 -0700419 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700420 int j;
421 struct bio_vec *bv;
422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
423
424 bio_for_each_segment(bv, b->bio, j)
425 memcpy(page_address(bv->bv_page),
426 base + j * PAGE_SIZE, PAGE_SIZE);
427
Kent Overstreetcafe5632013-03-23 16:11:31 -0700428 bch_submit_bbio(b->bio, b->c, &k.key, 0);
429
Kent Overstreet57943512013-04-25 13:58:35 -0700430 continue_at(cl, btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700431 } else {
432 b->bio->bi_vcnt = 0;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600433 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700434
Kent Overstreetcafe5632013-03-23 16:11:31 -0700435 bch_submit_bbio(b->bio, b->c, &k.key, 0);
436
437 closure_sync(cl);
Kent Overstreet57943512013-04-25 13:58:35 -0700438 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700439 }
440}
441
Kent Overstreet57943512013-04-25 13:58:35 -0700442void bch_btree_node_write(struct btree *b, struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700443{
444 struct bset *i = b->sets[b->nsets].data;
445
Kent Overstreetc37511b2013-04-26 15:39:55 -0700446 trace_bcache_btree_write(b);
447
Kent Overstreetcafe5632013-03-23 16:11:31 -0700448 BUG_ON(current->bio_list);
Kent Overstreet57943512013-04-25 13:58:35 -0700449 BUG_ON(b->written >= btree_blocks(b));
450 BUG_ON(b->written && !i->keys);
451 BUG_ON(b->sets->data->seq != i->seq);
Kent Overstreet280481d2013-10-24 16:36:03 -0700452 bch_check_keys(b, "writing");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700453
Kent Overstreetcafe5632013-03-23 16:11:31 -0700454 cancel_delayed_work(&b->work);
455
Kent Overstreet57943512013-04-25 13:58:35 -0700456 /* If caller isn't waiting for write, parent refcount is cache set */
457 closure_lock(&b->io, parent ?: &b->c->cl);
458
Kent Overstreetcafe5632013-03-23 16:11:31 -0700459 clear_bit(BTREE_NODE_dirty, &b->flags);
460 change_bit(BTREE_NODE_write_idx, &b->flags);
461
Kent Overstreet57943512013-04-25 13:58:35 -0700462 do_btree_node_write(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700463
Kent Overstreetcafe5632013-03-23 16:11:31 -0700464 b->written += set_blocks(i, b->c);
465 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
466 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
467
468 bch_btree_sort_lazy(b);
469
470 if (b->written < btree_blocks(b))
471 bch_bset_init_next(b);
472}
473
Kent Overstreetf269af52013-07-23 20:48:29 -0700474static void bch_btree_node_write_sync(struct btree *b)
475{
476 struct closure cl;
477
478 closure_init_stack(&cl);
479 bch_btree_node_write(b, &cl);
480 closure_sync(&cl);
481}
482
Kent Overstreet57943512013-04-25 13:58:35 -0700483static void btree_node_write_work(struct work_struct *w)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700484{
485 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
486
Kent Overstreet57943512013-04-25 13:58:35 -0700487 rw_lock(true, b, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700488
489 if (btree_node_dirty(b))
Kent Overstreet57943512013-04-25 13:58:35 -0700490 bch_btree_node_write(b, NULL);
491 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700492}
493
Kent Overstreetc18536a2013-07-24 17:44:17 -0700494static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700495{
496 struct bset *i = b->sets[b->nsets].data;
497 struct btree_write *w = btree_current_write(b);
498
Kent Overstreet57943512013-04-25 13:58:35 -0700499 BUG_ON(!b->written);
500 BUG_ON(!i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700501
Kent Overstreet57943512013-04-25 13:58:35 -0700502 if (!btree_node_dirty(b))
503 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700504
Kent Overstreet57943512013-04-25 13:58:35 -0700505 set_btree_node_dirty(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700506
Kent Overstreetc18536a2013-07-24 17:44:17 -0700507 if (journal_ref) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700508 if (w->journal &&
Kent Overstreetc18536a2013-07-24 17:44:17 -0700509 journal_pin_cmp(b->c, w->journal, journal_ref)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700510 atomic_dec_bug(w->journal);
511 w->journal = NULL;
512 }
513
514 if (!w->journal) {
Kent Overstreetc18536a2013-07-24 17:44:17 -0700515 w->journal = journal_ref;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700516 atomic_inc(w->journal);
517 }
518 }
519
Kent Overstreetcafe5632013-03-23 16:11:31 -0700520 /* Force write if set is too big */
Kent Overstreet57943512013-04-25 13:58:35 -0700521 if (set_bytes(i) > PAGE_SIZE - 48 &&
522 !current->bio_list)
523 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700524}
525
526/*
527 * Btree in memory cache - allocation/freeing
528 * mca -> memory cache
529 */
530
531static void mca_reinit(struct btree *b)
532{
533 unsigned i;
534
535 b->flags = 0;
536 b->written = 0;
537 b->nsets = 0;
538
539 for (i = 0; i < MAX_BSETS; i++)
540 b->sets[i].size = 0;
541 /*
542 * Second loop starts at 1 because b->sets[0]->data is the memory we
543 * allocated
544 */
545 for (i = 1; i < MAX_BSETS; i++)
546 b->sets[i].data = NULL;
547}
548
549#define mca_reserve(c) (((c->root && c->root->level) \
550 ? c->root->level : 1) * 8 + 16)
551#define mca_can_free(c) \
552 max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
553
554static void mca_data_free(struct btree *b)
555{
556 struct bset_tree *t = b->sets;
557 BUG_ON(!closure_is_unlocked(&b->io.cl));
558
559 if (bset_prev_bytes(b) < PAGE_SIZE)
560 kfree(t->prev);
561 else
562 free_pages((unsigned long) t->prev,
563 get_order(bset_prev_bytes(b)));
564
565 if (bset_tree_bytes(b) < PAGE_SIZE)
566 kfree(t->tree);
567 else
568 free_pages((unsigned long) t->tree,
569 get_order(bset_tree_bytes(b)));
570
571 free_pages((unsigned long) t->data, b->page_order);
572
573 t->prev = NULL;
574 t->tree = NULL;
575 t->data = NULL;
576 list_move(&b->list, &b->c->btree_cache_freed);
577 b->c->bucket_cache_used--;
578}
579
580static void mca_bucket_free(struct btree *b)
581{
582 BUG_ON(btree_node_dirty(b));
583
584 b->key.ptr[0] = 0;
585 hlist_del_init_rcu(&b->hash);
586 list_move(&b->list, &b->c->btree_cache_freeable);
587}
588
589static unsigned btree_order(struct bkey *k)
590{
591 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
592}
593
594static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
595{
596 struct bset_tree *t = b->sets;
597 BUG_ON(t->data);
598
599 b->page_order = max_t(unsigned,
600 ilog2(b->c->btree_pages),
601 btree_order(k));
602
603 t->data = (void *) __get_free_pages(gfp, b->page_order);
604 if (!t->data)
605 goto err;
606
607 t->tree = bset_tree_bytes(b) < PAGE_SIZE
608 ? kmalloc(bset_tree_bytes(b), gfp)
609 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
610 if (!t->tree)
611 goto err;
612
613 t->prev = bset_prev_bytes(b) < PAGE_SIZE
614 ? kmalloc(bset_prev_bytes(b), gfp)
615 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
616 if (!t->prev)
617 goto err;
618
619 list_move(&b->list, &b->c->btree_cache);
620 b->c->bucket_cache_used++;
621 return;
622err:
623 mca_data_free(b);
624}
625
626static struct btree *mca_bucket_alloc(struct cache_set *c,
627 struct bkey *k, gfp_t gfp)
628{
629 struct btree *b = kzalloc(sizeof(struct btree), gfp);
630 if (!b)
631 return NULL;
632
633 init_rwsem(&b->lock);
634 lockdep_set_novalidate_class(&b->lock);
635 INIT_LIST_HEAD(&b->list);
Kent Overstreet57943512013-04-25 13:58:35 -0700636 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700637 b->c = c;
638 closure_init_unlocked(&b->io);
639
640 mca_data_alloc(b, k, gfp);
641 return b;
642}
643
Kent Overstreete8e1d462013-07-24 17:27:07 -0700644static int mca_reap(struct btree *b, unsigned min_order, bool flush)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700645{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700646 struct closure cl;
647
648 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700649 lockdep_assert_held(&b->c->bucket_lock);
650
651 if (!down_write_trylock(&b->lock))
652 return -ENOMEM;
653
Kent Overstreete8e1d462013-07-24 17:27:07 -0700654 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
655
656 if (b->page_order < min_order ||
657 (!flush &&
658 (btree_node_dirty(b) ||
659 atomic_read(&b->io.cl.remaining) != -1))) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700660 rw_unlock(true, b);
661 return -ENOMEM;
662 }
663
Kent Overstreetf269af52013-07-23 20:48:29 -0700664 if (btree_node_dirty(b))
665 bch_btree_node_write_sync(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700666
Kent Overstreete8e1d462013-07-24 17:27:07 -0700667 /* wait for any in flight btree write */
Kent Overstreetfaadf0c2013-11-01 18:03:08 -0700668 closure_wait_event(&b->io.wait, &cl,
669 atomic_read(&b->io.cl.remaining) == -1);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700670
Kent Overstreetcafe5632013-03-23 16:11:31 -0700671 return 0;
672}
673
Dave Chinner7dc19d52013-08-28 10:18:11 +1000674static unsigned long bch_mca_scan(struct shrinker *shrink,
675 struct shrink_control *sc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700676{
677 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
678 struct btree *b, *t;
679 unsigned long i, nr = sc->nr_to_scan;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000680 unsigned long freed = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700681
682 if (c->shrinker_disabled)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000683 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700684
685 if (c->try_harder)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000686 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700687
688 /* Return -1 if we can't do anything right now */
Kent Overstreeta698e082013-09-23 23:17:34 -0700689 if (sc->gfp_mask & __GFP_IO)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700690 mutex_lock(&c->bucket_lock);
691 else if (!mutex_trylock(&c->bucket_lock))
692 return -1;
693
Kent Overstreet36c9ea92013-06-03 13:04:56 -0700694 /*
695 * It's _really_ critical that we don't free too many btree nodes - we
696 * have to always leave ourselves a reserve. The reserve is how we
697 * guarantee that allocating memory for a new btree node can always
698 * succeed, so that inserting keys into the btree can always succeed and
699 * IO can always make forward progress:
700 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700701 nr /= c->btree_pages;
702 nr = min_t(unsigned long, nr, mca_can_free(c));
703
704 i = 0;
705 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
Dave Chinner7dc19d52013-08-28 10:18:11 +1000706 if (freed >= nr)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700707 break;
708
709 if (++i > 3 &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700710 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700711 mca_data_free(b);
712 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000713 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700714 }
715 }
716
717 /*
718 * Can happen right when we first start up, before we've read in any
719 * btree nodes
720 */
721 if (list_empty(&c->btree_cache))
722 goto out;
723
Dave Chinner7dc19d52013-08-28 10:18:11 +1000724 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700725 b = list_first_entry(&c->btree_cache, struct btree, list);
726 list_rotate_left(&c->btree_cache);
727
728 if (!b->accessed &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700729 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700730 mca_bucket_free(b);
731 mca_data_free(b);
732 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000733 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700734 } else
735 b->accessed = 0;
736 }
737out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700738 mutex_unlock(&c->bucket_lock);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000739 return freed;
740}
741
742static unsigned long bch_mca_count(struct shrinker *shrink,
743 struct shrink_control *sc)
744{
745 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
746
747 if (c->shrinker_disabled)
748 return 0;
749
750 if (c->try_harder)
751 return 0;
752
753 return mca_can_free(c) * c->btree_pages;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700754}
755
756void bch_btree_cache_free(struct cache_set *c)
757{
758 struct btree *b;
759 struct closure cl;
760 closure_init_stack(&cl);
761
762 if (c->shrink.list.next)
763 unregister_shrinker(&c->shrink);
764
765 mutex_lock(&c->bucket_lock);
766
767#ifdef CONFIG_BCACHE_DEBUG
768 if (c->verify_data)
769 list_move(&c->verify_data->list, &c->btree_cache);
770#endif
771
772 list_splice(&c->btree_cache_freeable,
773 &c->btree_cache);
774
775 while (!list_empty(&c->btree_cache)) {
776 b = list_first_entry(&c->btree_cache, struct btree, list);
777
778 if (btree_node_dirty(b))
779 btree_complete_write(b, btree_current_write(b));
780 clear_bit(BTREE_NODE_dirty, &b->flags);
781
782 mca_data_free(b);
783 }
784
785 while (!list_empty(&c->btree_cache_freed)) {
786 b = list_first_entry(&c->btree_cache_freed,
787 struct btree, list);
788 list_del(&b->list);
789 cancel_delayed_work_sync(&b->work);
790 kfree(b);
791 }
792
793 mutex_unlock(&c->bucket_lock);
794}
795
796int bch_btree_cache_alloc(struct cache_set *c)
797{
798 unsigned i;
799
Kent Overstreetcafe5632013-03-23 16:11:31 -0700800 for (i = 0; i < mca_reserve(c); i++)
Kent Overstreet72a44512013-10-24 17:19:26 -0700801 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
802 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700803
804 list_splice_init(&c->btree_cache,
805 &c->btree_cache_freeable);
806
807#ifdef CONFIG_BCACHE_DEBUG
808 mutex_init(&c->verify_lock);
809
810 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
811
812 if (c->verify_data &&
813 c->verify_data->sets[0].data)
814 list_del_init(&c->verify_data->list);
815 else
816 c->verify_data = NULL;
817#endif
818
Dave Chinner7dc19d52013-08-28 10:18:11 +1000819 c->shrink.count_objects = bch_mca_count;
820 c->shrink.scan_objects = bch_mca_scan;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700821 c->shrink.seeks = 4;
822 c->shrink.batch = c->btree_pages * 2;
823 register_shrinker(&c->shrink);
824
825 return 0;
826}
827
828/* Btree in memory cache - hash table */
829
830static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
831{
832 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
833}
834
835static struct btree *mca_find(struct cache_set *c, struct bkey *k)
836{
837 struct btree *b;
838
839 rcu_read_lock();
840 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
841 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
842 goto out;
843 b = NULL;
844out:
845 rcu_read_unlock();
846 return b;
847}
848
Kent Overstreete8e1d462013-07-24 17:27:07 -0700849static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700850{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700851 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700852
Kent Overstreetc37511b2013-04-26 15:39:55 -0700853 trace_bcache_btree_cache_cannibalize(c);
854
Kent Overstreete8e1d462013-07-24 17:27:07 -0700855 if (!c->try_harder) {
856 c->try_harder = current;
857 c->try_harder_start = local_clock();
858 } else if (c->try_harder != current)
859 return ERR_PTR(-ENOSPC);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700860
Kent Overstreete8e1d462013-07-24 17:27:07 -0700861 list_for_each_entry_reverse(b, &c->btree_cache, list)
862 if (!mca_reap(b, btree_order(k), false))
863 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700864
Kent Overstreete8e1d462013-07-24 17:27:07 -0700865 list_for_each_entry_reverse(b, &c->btree_cache, list)
866 if (!mca_reap(b, btree_order(k), true))
867 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700868
Kent Overstreete8e1d462013-07-24 17:27:07 -0700869 return ERR_PTR(-ENOMEM);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700870}
871
872/*
873 * We can only have one thread cannibalizing other cached btree nodes at a time,
874 * or we'll deadlock. We use an open coded mutex to ensure that, which a
875 * cannibalize_bucket() will take. This means every time we unlock the root of
876 * the btree, we need to release this lock if we have it held.
877 */
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700878static void bch_cannibalize_unlock(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700879{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700880 if (c->try_harder == current) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600881 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700882 c->try_harder = NULL;
Kent Overstreete8e1d462013-07-24 17:27:07 -0700883 wake_up(&c->try_wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700884 }
885}
886
Kent Overstreete8e1d462013-07-24 17:27:07 -0700887static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700888{
889 struct btree *b;
890
Kent Overstreete8e1d462013-07-24 17:27:07 -0700891 BUG_ON(current->bio_list);
892
Kent Overstreetcafe5632013-03-23 16:11:31 -0700893 lockdep_assert_held(&c->bucket_lock);
894
895 if (mca_find(c, k))
896 return NULL;
897
898 /* btree_free() doesn't free memory; it sticks the node on the end of
899 * the list. Check if there's any freed nodes there:
900 */
901 list_for_each_entry(b, &c->btree_cache_freeable, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700902 if (!mca_reap(b, btree_order(k), false))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700903 goto out;
904
905 /* We never free struct btree itself, just the memory that holds the on
906 * disk node. Check the freed list before allocating a new one:
907 */
908 list_for_each_entry(b, &c->btree_cache_freed, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700909 if (!mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700910 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
911 if (!b->sets[0].data)
912 goto err;
913 else
914 goto out;
915 }
916
917 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
918 if (!b)
919 goto err;
920
921 BUG_ON(!down_write_trylock(&b->lock));
922 if (!b->sets->data)
923 goto err;
924out:
925 BUG_ON(!closure_is_unlocked(&b->io.cl));
926
927 bkey_copy(&b->key, k);
928 list_move(&b->list, &c->btree_cache);
929 hlist_del_init_rcu(&b->hash);
930 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
931
932 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
933 b->level = level;
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700934 b->parent = (void *) ~0UL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700935
936 mca_reinit(b);
937
938 return b;
939err:
940 if (b)
941 rw_unlock(true, b);
942
Kent Overstreete8e1d462013-07-24 17:27:07 -0700943 b = mca_cannibalize(c, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700944 if (!IS_ERR(b))
945 goto out;
946
947 return b;
948}
949
950/**
951 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
952 * in from disk if necessary.
953 *
Kent Overstreetb54d6932013-07-24 18:04:18 -0700954 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
Kent Overstreetcafe5632013-03-23 16:11:31 -0700955 *
956 * The btree node will have either a read or a write lock held, depending on
957 * level and op->lock.
958 */
959struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
Kent Overstreete8e1d462013-07-24 17:27:07 -0700960 int level, bool write)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700961{
962 int i = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700963 struct btree *b;
964
965 BUG_ON(level < 0);
966retry:
967 b = mca_find(c, k);
968
969 if (!b) {
Kent Overstreet57943512013-04-25 13:58:35 -0700970 if (current->bio_list)
971 return ERR_PTR(-EAGAIN);
972
Kent Overstreetcafe5632013-03-23 16:11:31 -0700973 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700974 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700975 mutex_unlock(&c->bucket_lock);
976
977 if (!b)
978 goto retry;
979 if (IS_ERR(b))
980 return b;
981
Kent Overstreet57943512013-04-25 13:58:35 -0700982 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700983
984 if (!write)
985 downgrade_write(&b->lock);
986 } else {
987 rw_lock(write, b, level);
988 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
989 rw_unlock(write, b);
990 goto retry;
991 }
992 BUG_ON(b->level != level);
993 }
994
995 b->accessed = 1;
996
997 for (; i <= b->nsets && b->sets[i].size; i++) {
998 prefetch(b->sets[i].tree);
999 prefetch(b->sets[i].data);
1000 }
1001
1002 for (; i <= b->nsets; i++)
1003 prefetch(b->sets[i].data);
1004
Kent Overstreet57943512013-04-25 13:58:35 -07001005 if (btree_node_io_error(b)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001006 rw_unlock(write, b);
Kent Overstreet57943512013-04-25 13:58:35 -07001007 return ERR_PTR(-EIO);
1008 }
1009
1010 BUG_ON(!b->written);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001011
1012 return b;
1013}
1014
1015static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
1016{
1017 struct btree *b;
1018
1019 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001020 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001021 mutex_unlock(&c->bucket_lock);
1022
1023 if (!IS_ERR_OR_NULL(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -07001024 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001025 rw_unlock(true, b);
1026 }
1027}
1028
1029/* Btree alloc */
1030
Kent Overstreete8e1d462013-07-24 17:27:07 -07001031static void btree_node_free(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001032{
1033 unsigned i;
1034
Kent Overstreetc37511b2013-04-26 15:39:55 -07001035 trace_bcache_btree_node_free(b);
1036
Kent Overstreetcafe5632013-03-23 16:11:31 -07001037 BUG_ON(b == b->c->root);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001038
1039 if (btree_node_dirty(b))
1040 btree_complete_write(b, btree_current_write(b));
1041 clear_bit(BTREE_NODE_dirty, &b->flags);
1042
Kent Overstreetcafe5632013-03-23 16:11:31 -07001043 cancel_delayed_work(&b->work);
1044
1045 mutex_lock(&b->c->bucket_lock);
1046
1047 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1048 BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
1049
1050 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1051 PTR_BUCKET(b->c, &b->key, i));
1052 }
1053
1054 bch_bucket_free(b->c, &b->key);
1055 mca_bucket_free(b);
1056 mutex_unlock(&b->c->bucket_lock);
1057}
1058
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001059struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001060{
1061 BKEY_PADDED(key) k;
1062 struct btree *b = ERR_PTR(-EAGAIN);
1063
1064 mutex_lock(&c->bucket_lock);
1065retry:
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001066 if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001067 goto err;
1068
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07001069 bkey_put(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001070 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1071
Kent Overstreete8e1d462013-07-24 17:27:07 -07001072 b = mca_alloc(c, &k.key, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001073 if (IS_ERR(b))
1074 goto err_free;
1075
1076 if (!b) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001077 cache_bug(c,
1078 "Tried to allocate bucket that was in btree cache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001079 goto retry;
1080 }
1081
Kent Overstreetcafe5632013-03-23 16:11:31 -07001082 b->accessed = 1;
1083 bch_bset_init_next(b);
1084
1085 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001086
1087 trace_bcache_btree_node_alloc(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001088 return b;
1089err_free:
1090 bch_bucket_free(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001091err:
1092 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001093
1094 trace_bcache_btree_node_alloc_fail(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001095 return b;
1096}
1097
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001098static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001099{
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001100 struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001101 if (!IS_ERR_OR_NULL(n))
1102 bch_btree_sort_into(b, n);
1103
1104 return n;
1105}
1106
Kent Overstreet8835c122013-07-24 23:18:05 -07001107static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1108{
1109 unsigned i;
1110
1111 bkey_copy(k, &b->key);
1112 bkey_copy_key(k, &ZERO_KEY);
1113
1114 for (i = 0; i < KEY_PTRS(k); i++) {
1115 uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
1116
1117 SET_PTR_GEN(k, i, g);
1118 }
1119
1120 atomic_inc(&b->c->prio_blocked);
1121}
1122
Kent Overstreetcafe5632013-03-23 16:11:31 -07001123/* Garbage collection */
1124
1125uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1126{
1127 uint8_t stale = 0;
1128 unsigned i;
1129 struct bucket *g;
1130
1131 /*
1132 * ptr_invalid() can't return true for the keys that mark btree nodes as
1133 * freed, but since ptr_bad() returns true we'll never actually use them
1134 * for anything and thus we don't want mark their pointers here
1135 */
1136 if (!bkey_cmp(k, &ZERO_KEY))
1137 return stale;
1138
1139 for (i = 0; i < KEY_PTRS(k); i++) {
1140 if (!ptr_available(c, k, i))
1141 continue;
1142
1143 g = PTR_BUCKET(c, k, i);
1144
1145 if (gen_after(g->gc_gen, PTR_GEN(k, i)))
1146 g->gc_gen = PTR_GEN(k, i);
1147
1148 if (ptr_stale(c, k, i)) {
1149 stale = max(stale, ptr_stale(c, k, i));
1150 continue;
1151 }
1152
1153 cache_bug_on(GC_MARK(g) &&
1154 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1155 c, "inconsistent ptrs: mark = %llu, level = %i",
1156 GC_MARK(g), level);
1157
1158 if (level)
1159 SET_GC_MARK(g, GC_MARK_METADATA);
1160 else if (KEY_DIRTY(k))
1161 SET_GC_MARK(g, GC_MARK_DIRTY);
1162
1163 /* guard against overflow */
1164 SET_GC_SECTORS_USED(g, min_t(unsigned,
1165 GC_SECTORS_USED(g) + KEY_SIZE(k),
1166 (1 << 14) - 1));
1167
1168 BUG_ON(!GC_SECTORS_USED(g));
1169 }
1170
1171 return stale;
1172}
1173
1174#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1175
Kent Overstreeta1f03582013-09-10 19:07:00 -07001176static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001177{
1178 uint8_t stale = 0;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001179 unsigned keys = 0, good_keys = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001180 struct bkey *k;
1181 struct btree_iter iter;
1182 struct bset_tree *t;
1183
1184 gc->nodes++;
1185
1186 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001187 stale = max(stale, btree_mark_key(b, k));
Kent Overstreeta1f03582013-09-10 19:07:00 -07001188 keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001189
1190 if (bch_ptr_bad(b, k))
1191 continue;
1192
Kent Overstreetcafe5632013-03-23 16:11:31 -07001193 gc->key_bytes += bkey_u64s(k);
1194 gc->nkeys++;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001195 good_keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001196
1197 gc->data += KEY_SIZE(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001198 }
1199
1200 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
1201 btree_bug_on(t->size &&
1202 bset_written(b, t) &&
1203 bkey_cmp(&b->key, &t->end) < 0,
1204 b, "found short btree key in gc");
1205
Kent Overstreeta1f03582013-09-10 19:07:00 -07001206 if (b->c->gc_always_rewrite)
1207 return true;
1208
1209 if (stale > 10)
1210 return true;
1211
1212 if ((keys - good_keys) * 2 > keys)
1213 return true;
1214
1215 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001216}
1217
Kent Overstreeta1f03582013-09-10 19:07:00 -07001218#define GC_MERGE_NODES 4U
Kent Overstreetcafe5632013-03-23 16:11:31 -07001219
1220struct gc_merge_info {
1221 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001222 unsigned keys;
1223};
1224
Kent Overstreeta1f03582013-09-10 19:07:00 -07001225static int bch_btree_insert_node(struct btree *, struct btree_op *,
1226 struct keylist *, atomic_t *, struct bkey *);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001227
Kent Overstreeta1f03582013-09-10 19:07:00 -07001228static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1229 struct keylist *keylist, struct gc_stat *gc,
1230 struct gc_merge_info *r)
1231{
1232 unsigned i, nodes = 0, keys = 0, blocks;
1233 struct btree *new_nodes[GC_MERGE_NODES];
1234 struct closure cl;
1235 struct bkey *k;
1236
1237 memset(new_nodes, 0, sizeof(new_nodes));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001238 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001239
Kent Overstreeta1f03582013-09-10 19:07:00 -07001240 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001241 keys += r[nodes++].keys;
1242
1243 blocks = btree_default_blocks(b->c) * 2 / 3;
1244
1245 if (nodes < 2 ||
1246 __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001247 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001248
Kent Overstreeta1f03582013-09-10 19:07:00 -07001249 for (i = 0; i < nodes; i++) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001250 new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001251 if (IS_ERR_OR_NULL(new_nodes[i]))
1252 goto out_nocoalesce;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001253 }
1254
1255 for (i = nodes - 1; i > 0; --i) {
Kent Overstreeta1f03582013-09-10 19:07:00 -07001256 struct bset *n1 = new_nodes[i]->sets->data;
1257 struct bset *n2 = new_nodes[i - 1]->sets->data;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001258 struct bkey *k, *last = NULL;
1259
1260 keys = 0;
1261
Kent Overstreeta1f03582013-09-10 19:07:00 -07001262 if (i > 1) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001263 for (k = n2->start;
1264 k < end(n2);
1265 k = bkey_next(k)) {
1266 if (__set_blocks(n1, n1->keys + keys +
1267 bkey_u64s(k), b->c) > blocks)
1268 break;
1269
1270 last = k;
1271 keys += bkey_u64s(k);
1272 }
Kent Overstreeta1f03582013-09-10 19:07:00 -07001273 } else {
1274 /*
1275 * Last node we're not getting rid of - we're getting
1276 * rid of the node at r[0]. Have to try and fit all of
1277 * the remaining keys into this node; we can't ensure
1278 * they will always fit due to rounding and variable
1279 * length keys (shouldn't be possible in practice,
1280 * though)
1281 */
1282 if (__set_blocks(n1, n1->keys + n2->keys,
1283 b->c) > btree_blocks(new_nodes[i]))
1284 goto out_nocoalesce;
1285
1286 keys = n2->keys;
1287 /* Take the key of the node we're getting rid of */
1288 last = &r->b->key;
1289 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001290
1291 BUG_ON(__set_blocks(n1, n1->keys + keys,
Kent Overstreeta1f03582013-09-10 19:07:00 -07001292 b->c) > btree_blocks(new_nodes[i]));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001293
Kent Overstreeta1f03582013-09-10 19:07:00 -07001294 if (last)
1295 bkey_copy_key(&new_nodes[i]->key, last);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001296
1297 memcpy(end(n1),
1298 n2->start,
1299 (void *) node(n2, keys) - (void *) n2->start);
1300
1301 n1->keys += keys;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001302 r[i].keys = n1->keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001303
1304 memmove(n2->start,
1305 node(n2, keys),
1306 (void *) end(n2) - (void *) node(n2, keys));
1307
1308 n2->keys -= keys;
1309
Kent Overstreeta1f03582013-09-10 19:07:00 -07001310 if (bch_keylist_realloc(keylist,
1311 KEY_PTRS(&new_nodes[i]->key), b->c))
1312 goto out_nocoalesce;
1313
1314 bch_btree_node_write(new_nodes[i], &cl);
1315 bch_keylist_add(keylist, &new_nodes[i]->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001316 }
1317
Kent Overstreeta1f03582013-09-10 19:07:00 -07001318 for (i = 0; i < nodes; i++) {
1319 if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
1320 goto out_nocoalesce;
1321
1322 make_btree_freeing_key(r[i].b, keylist->top);
1323 bch_keylist_push(keylist);
1324 }
1325
1326 /* We emptied out this node */
1327 BUG_ON(new_nodes[0]->sets->data->keys);
1328 btree_node_free(new_nodes[0]);
1329 rw_unlock(true, new_nodes[0]);
1330
1331 closure_sync(&cl);
1332
1333 for (i = 0; i < nodes; i++) {
1334 btree_node_free(r[i].b);
1335 rw_unlock(true, r[i].b);
1336
1337 r[i].b = new_nodes[i];
1338 }
1339
1340 bch_btree_insert_node(b, op, keylist, NULL, NULL);
1341 BUG_ON(!bch_keylist_empty(keylist));
1342
1343 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1344 r[nodes - 1].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001345
Kent Overstreetc37511b2013-04-26 15:39:55 -07001346 trace_bcache_btree_gc_coalesce(nodes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001347 gc->nodes--;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001348
Kent Overstreeta1f03582013-09-10 19:07:00 -07001349 /* Invalidated our iterator */
1350 return -EINTR;
1351
1352out_nocoalesce:
1353 closure_sync(&cl);
1354
1355 while ((k = bch_keylist_pop(keylist)))
1356 if (!bkey_cmp(k, &ZERO_KEY))
1357 atomic_dec(&b->c->prio_blocked);
1358
1359 for (i = 0; i < nodes; i++)
1360 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1361 btree_node_free(new_nodes[i]);
1362 rw_unlock(true, new_nodes[i]);
1363 }
1364 return 0;
1365}
1366
1367static unsigned btree_gc_count_keys(struct btree *b)
1368{
1369 struct bkey *k;
1370 struct btree_iter iter;
1371 unsigned ret = 0;
1372
1373 for_each_key_filter(b, k, &iter, bch_ptr_bad)
1374 ret += bkey_u64s(k);
1375
1376 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001377}
1378
1379static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1380 struct closure *writes, struct gc_stat *gc)
1381{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001382 unsigned i;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001383 int ret = 0;
1384 bool should_rewrite;
1385 struct btree *n;
1386 struct bkey *k;
1387 struct keylist keys;
1388 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001389 struct gc_merge_info r[GC_MERGE_NODES];
Kent Overstreeta1f03582013-09-10 19:07:00 -07001390 struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001391
Kent Overstreeta1f03582013-09-10 19:07:00 -07001392 bch_keylist_init(&keys);
1393 bch_btree_iter_init(b, &iter, &b->c->gc_done);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001394
Kent Overstreeta1f03582013-09-10 19:07:00 -07001395 for (i = 0; i < GC_MERGE_NODES; i++)
1396 r[i].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001397
Kent Overstreeta1f03582013-09-10 19:07:00 -07001398 while (1) {
1399 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1400 if (k) {
1401 r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
1402 if (IS_ERR(r->b)) {
1403 ret = PTR_ERR(r->b);
1404 break;
1405 }
1406
1407 r->keys = btree_gc_count_keys(r->b);
1408
1409 ret = btree_gc_coalesce(b, op, &keys, gc, r);
1410 if (ret)
1411 break;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001412 }
1413
Kent Overstreeta1f03582013-09-10 19:07:00 -07001414 if (!last->b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001415 break;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001416
1417 if (!IS_ERR(last->b)) {
1418 should_rewrite = btree_gc_mark_node(last->b, gc);
1419 if (should_rewrite) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001420 n = btree_node_alloc_replacement(last->b,
1421 false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001422
1423 if (!IS_ERR_OR_NULL(n)) {
1424 bch_btree_node_write_sync(n);
1425 bch_keylist_add(&keys, &n->key);
1426
1427 make_btree_freeing_key(last->b,
1428 keys.top);
1429 bch_keylist_push(&keys);
1430
1431 btree_node_free(last->b);
1432
1433 bch_btree_insert_node(b, op, &keys,
1434 NULL, NULL);
1435 BUG_ON(!bch_keylist_empty(&keys));
1436
1437 rw_unlock(true, last->b);
1438 last->b = n;
1439
1440 /* Invalidated our iterator */
1441 ret = -EINTR;
1442 break;
1443 }
1444 }
1445
1446 if (last->b->level) {
1447 ret = btree_gc_recurse(last->b, op, writes, gc);
1448 if (ret)
1449 break;
1450 }
1451
1452 bkey_copy_key(&b->c->gc_done, &last->b->key);
1453
1454 /*
1455 * Must flush leaf nodes before gc ends, since replace
1456 * operations aren't journalled
1457 */
1458 if (btree_node_dirty(last->b))
1459 bch_btree_node_write(last->b, writes);
1460 rw_unlock(true, last->b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001461 }
1462
Kent Overstreeta1f03582013-09-10 19:07:00 -07001463 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1464 r->b = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001465
Kent Overstreetcafe5632013-03-23 16:11:31 -07001466 if (need_resched()) {
1467 ret = -EAGAIN;
1468 break;
1469 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001470 }
1471
Kent Overstreeta1f03582013-09-10 19:07:00 -07001472 for (i = 0; i < GC_MERGE_NODES; i++)
1473 if (!IS_ERR_OR_NULL(r[i].b)) {
1474 if (btree_node_dirty(r[i].b))
1475 bch_btree_node_write(r[i].b, writes);
1476 rw_unlock(true, r[i].b);
1477 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001478
Kent Overstreeta1f03582013-09-10 19:07:00 -07001479 bch_keylist_free(&keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001480
1481 return ret;
1482}
1483
1484static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1485 struct closure *writes, struct gc_stat *gc)
1486{
1487 struct btree *n = NULL;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001488 int ret = 0;
1489 bool should_rewrite;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001490
Kent Overstreeta1f03582013-09-10 19:07:00 -07001491 should_rewrite = btree_gc_mark_node(b, gc);
1492 if (should_rewrite) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001493 n = btree_node_alloc_replacement(b, false);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001494
Kent Overstreeta1f03582013-09-10 19:07:00 -07001495 if (!IS_ERR_OR_NULL(n)) {
1496 bch_btree_node_write_sync(n);
1497 bch_btree_set_root(n);
1498 btree_node_free(b);
1499 rw_unlock(true, n);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001500
Kent Overstreeta1f03582013-09-10 19:07:00 -07001501 return -EINTR;
1502 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001503 }
1504
Kent Overstreeta1f03582013-09-10 19:07:00 -07001505 if (b->level) {
1506 ret = btree_gc_recurse(b, op, writes, gc);
1507 if (ret)
1508 return ret;
1509 }
1510
1511 bkey_copy_key(&b->c->gc_done, &b->key);
1512
Kent Overstreetcafe5632013-03-23 16:11:31 -07001513 return ret;
1514}
1515
1516static void btree_gc_start(struct cache_set *c)
1517{
1518 struct cache *ca;
1519 struct bucket *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001520 unsigned i;
1521
1522 if (!c->gc_mark_valid)
1523 return;
1524
1525 mutex_lock(&c->bucket_lock);
1526
1527 c->gc_mark_valid = 0;
1528 c->gc_done = ZERO_KEY;
1529
1530 for_each_cache(ca, c, i)
1531 for_each_bucket(b, ca) {
1532 b->gc_gen = b->gen;
Kent Overstreet29ebf462013-07-11 19:43:21 -07001533 if (!atomic_read(&b->pin)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001534 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
Kent Overstreet29ebf462013-07-11 19:43:21 -07001535 SET_GC_SECTORS_USED(b, 0);
1536 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001537 }
1538
Kent Overstreetcafe5632013-03-23 16:11:31 -07001539 mutex_unlock(&c->bucket_lock);
1540}
1541
1542size_t bch_btree_gc_finish(struct cache_set *c)
1543{
1544 size_t available = 0;
1545 struct bucket *b;
1546 struct cache *ca;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001547 unsigned i;
1548
1549 mutex_lock(&c->bucket_lock);
1550
1551 set_gc_sectors(c);
1552 c->gc_mark_valid = 1;
1553 c->need_gc = 0;
1554
1555 if (c->root)
1556 for (i = 0; i < KEY_PTRS(&c->root->key); i++)
1557 SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
1558 GC_MARK_METADATA);
1559
1560 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1562 GC_MARK_METADATA);
1563
Nicholas Swensonbf0a6282013-11-26 19:14:23 -08001564 /* don't reclaim buckets to which writeback keys point */
1565 rcu_read_lock();
1566 for (i = 0; i < c->nr_uuids; i++) {
1567 struct bcache_device *d = c->devices[i];
1568 struct cached_dev *dc;
1569 struct keybuf_key *w, *n;
1570 unsigned j;
1571
1572 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1573 continue;
1574 dc = container_of(d, struct cached_dev, disk);
1575
1576 spin_lock(&dc->writeback_keys.lock);
1577 rbtree_postorder_for_each_entry_safe(w, n,
1578 &dc->writeback_keys.keys, node)
1579 for (j = 0; j < KEY_PTRS(&w->key); j++)
1580 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1581 GC_MARK_DIRTY);
1582 spin_unlock(&dc->writeback_keys.lock);
1583 }
1584 rcu_read_unlock();
1585
Kent Overstreetcafe5632013-03-23 16:11:31 -07001586 for_each_cache(ca, c, i) {
1587 uint64_t *i;
1588
1589 ca->invalidate_needs_gc = 0;
1590
1591 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1592 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1593
1594 for (i = ca->prio_buckets;
1595 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1596 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1597
1598 for_each_bucket(b, ca) {
1599 b->last_gc = b->gc_gen;
1600 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1601
1602 if (!atomic_read(&b->pin) &&
1603 GC_MARK(b) == GC_MARK_RECLAIMABLE) {
1604 available++;
1605 if (!GC_SECTORS_USED(b))
1606 bch_bucket_add_unused(ca, b);
1607 }
1608 }
1609 }
1610
Kent Overstreetcafe5632013-03-23 16:11:31 -07001611 mutex_unlock(&c->bucket_lock);
1612 return available;
1613}
1614
Kent Overstreet72a44512013-10-24 17:19:26 -07001615static void bch_btree_gc(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001616{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001617 int ret;
1618 unsigned long available;
1619 struct gc_stat stats;
1620 struct closure writes;
1621 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001622 uint64_t start_time = local_clock();
Kent Overstreet57943512013-04-25 13:58:35 -07001623
Kent Overstreetc37511b2013-04-26 15:39:55 -07001624 trace_bcache_gc_start(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001625
1626 memset(&stats, 0, sizeof(struct gc_stat));
1627 closure_init_stack(&writes);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001628 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001629
1630 btree_gc_start(c);
1631
Kent Overstreeta1f03582013-09-10 19:07:00 -07001632 do {
1633 ret = btree_root(gc_root, c, &op, &writes, &stats);
1634 closure_sync(&writes);
Kent Overstreet57943512013-04-25 13:58:35 -07001635
Kent Overstreeta1f03582013-09-10 19:07:00 -07001636 if (ret && ret != -EAGAIN)
1637 pr_warn("gc failed!");
1638 } while (ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001639
1640 available = bch_btree_gc_finish(c);
Kent Overstreet57943512013-04-25 13:58:35 -07001641 wake_up_allocators(c);
1642
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001643 bch_time_stats_update(&c->btree_gc_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001644
1645 stats.key_bytes *= sizeof(uint64_t);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001646 stats.data <<= 9;
1647 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1648 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001649
Kent Overstreetc37511b2013-04-26 15:39:55 -07001650 trace_bcache_gc_end(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001651
Kent Overstreet72a44512013-10-24 17:19:26 -07001652 bch_moving_gc(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001653}
1654
Kent Overstreet72a44512013-10-24 17:19:26 -07001655static int bch_gc_thread(void *arg)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001656{
Kent Overstreet72a44512013-10-24 17:19:26 -07001657 struct cache_set *c = arg;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001658 struct cache *ca;
1659 unsigned i;
Kent Overstreet72a44512013-10-24 17:19:26 -07001660
1661 while (1) {
Kent Overstreeta1f03582013-09-10 19:07:00 -07001662again:
Kent Overstreet72a44512013-10-24 17:19:26 -07001663 bch_btree_gc(c);
1664
1665 set_current_state(TASK_INTERRUPTIBLE);
1666 if (kthread_should_stop())
1667 break;
1668
Kent Overstreeta1f03582013-09-10 19:07:00 -07001669 mutex_lock(&c->bucket_lock);
1670
1671 for_each_cache(ca, c, i)
1672 if (ca->invalidate_needs_gc) {
1673 mutex_unlock(&c->bucket_lock);
1674 set_current_state(TASK_RUNNING);
1675 goto again;
1676 }
1677
1678 mutex_unlock(&c->bucket_lock);
1679
Kent Overstreet72a44512013-10-24 17:19:26 -07001680 try_to_freeze();
1681 schedule();
1682 }
1683
1684 return 0;
1685}
1686
1687int bch_gc_thread_start(struct cache_set *c)
1688{
1689 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1690 if (IS_ERR(c->gc_thread))
1691 return PTR_ERR(c->gc_thread);
1692
1693 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1694 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001695}
1696
1697/* Initial partial gc */
1698
1699static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
1700 unsigned long **seen)
1701{
Kent Overstreet50310162013-09-10 17:18:59 -07001702 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001703 unsigned i;
Kent Overstreet50310162013-09-10 17:18:59 -07001704 struct bkey *k, *p = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001705 struct bucket *g;
1706 struct btree_iter iter;
1707
1708 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1709 for (i = 0; i < KEY_PTRS(k); i++) {
1710 if (!ptr_available(b->c, k, i))
1711 continue;
1712
1713 g = PTR_BUCKET(b->c, k, i);
1714
1715 if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
1716 seen[PTR_DEV(k, i)]) ||
1717 !ptr_stale(b->c, k, i)) {
1718 g->gen = PTR_GEN(k, i);
1719
1720 if (b->level)
1721 g->prio = BTREE_PRIO;
1722 else if (g->prio == BTREE_PRIO)
1723 g->prio = INITIAL_PRIO;
1724 }
1725 }
1726
1727 btree_mark_key(b, k);
1728 }
1729
1730 if (b->level) {
Kent Overstreet50310162013-09-10 17:18:59 -07001731 bch_btree_iter_init(b, &iter, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001732
Kent Overstreet50310162013-09-10 17:18:59 -07001733 do {
1734 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1735 if (k)
1736 btree_node_prefetch(b->c, k, b->level - 1);
1737
Kent Overstreetcafe5632013-03-23 16:11:31 -07001738 if (p)
Kent Overstreet50310162013-09-10 17:18:59 -07001739 ret = btree(check_recurse, p, b, op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001740
Kent Overstreet50310162013-09-10 17:18:59 -07001741 p = k;
1742 } while (p && !ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001743 }
1744
1745 return 0;
1746}
1747
Kent Overstreetc18536a2013-07-24 17:44:17 -07001748int bch_btree_check(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001749{
1750 int ret = -ENOMEM;
1751 unsigned i;
1752 unsigned long *seen[MAX_CACHES_PER_SET];
Kent Overstreetc18536a2013-07-24 17:44:17 -07001753 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001754
1755 memset(seen, 0, sizeof(seen));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001756 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001757
1758 for (i = 0; c->cache[i]; i++) {
1759 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
1760 seen[i] = kmalloc(n, GFP_KERNEL);
1761 if (!seen[i])
1762 goto err;
1763
1764 /* Disables the seen array until prio_read() uses it too */
1765 memset(seen[i], 0xFF, n);
1766 }
1767
Kent Overstreetc18536a2013-07-24 17:44:17 -07001768 ret = btree_root(check_recurse, c, &op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001769err:
1770 for (i = 0; i < MAX_CACHES_PER_SET; i++)
1771 kfree(seen[i]);
1772 return ret;
1773}
1774
1775/* Btree insertion */
1776
1777static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
1778{
1779 struct bset *i = b->sets[b->nsets].data;
1780
1781 memmove((uint64_t *) where + bkey_u64s(insert),
1782 where,
1783 (void *) end(i) - (void *) where);
1784
1785 i->keys += bkey_u64s(insert);
1786 bkey_copy(where, insert);
1787 bch_bset_fix_lookup_table(b, where);
1788}
1789
Kent Overstreet1b207d82013-09-10 18:52:54 -07001790static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
Kent Overstreetcafe5632013-03-23 16:11:31 -07001791 struct btree_iter *iter,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001792 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001793{
Kent Overstreet279afba2013-06-05 06:21:07 -07001794 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001795 {
Kent Overstreet279afba2013-06-05 06:21:07 -07001796 if (KEY_DIRTY(k))
1797 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1798 offset, -sectors);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001799 }
1800
Kent Overstreet279afba2013-06-05 06:21:07 -07001801 uint64_t old_offset;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001802 unsigned old_size, sectors_found = 0;
1803
1804 while (1) {
1805 struct bkey *k = bch_btree_iter_next(iter);
1806 if (!k ||
1807 bkey_cmp(&START_KEY(k), insert) >= 0)
1808 break;
1809
1810 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1811 continue;
1812
Kent Overstreet279afba2013-06-05 06:21:07 -07001813 old_offset = KEY_START(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001814 old_size = KEY_SIZE(k);
1815
1816 /*
1817 * We might overlap with 0 size extents; we can't skip these
1818 * because if they're in the set we're inserting to we have to
1819 * adjust them so they don't overlap with the key we're
Kent Overstreet1b207d82013-09-10 18:52:54 -07001820 * inserting. But we don't want to check them for replace
Kent Overstreetcafe5632013-03-23 16:11:31 -07001821 * operations.
1822 */
1823
Kent Overstreet1b207d82013-09-10 18:52:54 -07001824 if (replace_key && KEY_SIZE(k)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001825 /*
1826 * k might have been split since we inserted/found the
1827 * key we're replacing
1828 */
1829 unsigned i;
1830 uint64_t offset = KEY_START(k) -
Kent Overstreet1b207d82013-09-10 18:52:54 -07001831 KEY_START(replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001832
1833 /* But it must be a subset of the replace key */
Kent Overstreet1b207d82013-09-10 18:52:54 -07001834 if (KEY_START(k) < KEY_START(replace_key) ||
1835 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001836 goto check_failed;
1837
1838 /* We didn't find a key that we were supposed to */
1839 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1840 goto check_failed;
1841
Kent Overstreetd24a6e12013-11-10 21:55:27 -08001842 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1843 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001844 goto check_failed;
1845
1846 /* skip past gen */
1847 offset <<= 8;
1848
Kent Overstreet1b207d82013-09-10 18:52:54 -07001849 BUG_ON(!KEY_PTRS(replace_key));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001850
Kent Overstreet1b207d82013-09-10 18:52:54 -07001851 for (i = 0; i < KEY_PTRS(replace_key); i++)
1852 if (k->ptr[i] != replace_key->ptr[i] + offset)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001853 goto check_failed;
1854
1855 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1856 }
1857
1858 if (bkey_cmp(insert, k) < 0 &&
1859 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1860 /*
1861 * We overlapped in the middle of an existing key: that
1862 * means we have to split the old key. But we have to do
1863 * slightly different things depending on whether the
1864 * old key has been written out yet.
1865 */
1866
1867 struct bkey *top;
1868
Kent Overstreet279afba2013-06-05 06:21:07 -07001869 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001870
1871 if (bkey_written(b, k)) {
1872 /*
1873 * We insert a new key to cover the top of the
1874 * old key, and the old key is modified in place
1875 * to represent the bottom split.
1876 *
1877 * It's completely arbitrary whether the new key
1878 * is the top or the bottom, but it has to match
1879 * up with what btree_sort_fixup() does - it
1880 * doesn't check for this kind of overlap, it
1881 * depends on us inserting a new key for the top
1882 * here.
1883 */
1884 top = bch_bset_search(b, &b->sets[b->nsets],
1885 insert);
1886 shift_keys(b, top, k);
1887 } else {
1888 BKEY_PADDED(key) temp;
1889 bkey_copy(&temp.key, k);
1890 shift_keys(b, k, &temp.key);
1891 top = bkey_next(k);
1892 }
1893
1894 bch_cut_front(insert, top);
1895 bch_cut_back(&START_KEY(insert), k);
1896 bch_bset_fix_invalidated_key(b, k);
1897 return false;
1898 }
1899
1900 if (bkey_cmp(insert, k) < 0) {
1901 bch_cut_front(insert, k);
1902 } else {
Kent Overstreet1fa84552013-11-10 21:55:27 -08001903 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1904 old_offset = KEY_START(insert);
1905
Kent Overstreetcafe5632013-03-23 16:11:31 -07001906 if (bkey_written(b, k) &&
1907 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1908 /*
1909 * Completely overwrote, so we don't have to
1910 * invalidate the binary search tree
1911 */
1912 bch_cut_front(k, k);
1913 } else {
1914 __bch_cut_back(&START_KEY(insert), k);
1915 bch_bset_fix_invalidated_key(b, k);
1916 }
1917 }
1918
Kent Overstreet279afba2013-06-05 06:21:07 -07001919 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001920 }
1921
1922check_failed:
Kent Overstreet1b207d82013-09-10 18:52:54 -07001923 if (replace_key) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001924 if (!sectors_found) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001925 return true;
1926 } else if (sectors_found < KEY_SIZE(insert)) {
1927 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1928 (KEY_SIZE(insert) - sectors_found));
1929 SET_KEY_SIZE(insert, sectors_found);
1930 }
1931 }
1932
1933 return false;
1934}
1935
1936static bool btree_insert_key(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001937 struct bkey *k, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001938{
1939 struct bset *i = b->sets[b->nsets].data;
1940 struct bkey *m, *prev;
Kent Overstreet85b14922013-05-14 20:33:16 -07001941 unsigned status = BTREE_INSERT_STATUS_INSERT;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001942
1943 BUG_ON(bkey_cmp(k, &b->key) > 0);
1944 BUG_ON(b->level && !KEY_PTRS(k));
1945 BUG_ON(!b->level && !KEY_OFFSET(k));
1946
1947 if (!b->level) {
1948 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001949
1950 /*
1951 * bset_search() returns the first key that is strictly greater
1952 * than the search key - but for back merging, we want to find
Kent Overstreet0eacac22013-07-01 19:29:05 -07001953 * the previous key.
Kent Overstreetcafe5632013-03-23 16:11:31 -07001954 */
Kent Overstreetcafe5632013-03-23 16:11:31 -07001955 prev = NULL;
Kent Overstreet0eacac22013-07-01 19:29:05 -07001956 m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001957
Kent Overstreet1b207d82013-09-10 18:52:54 -07001958 if (fix_overlapping_extents(b, k, &iter, replace_key)) {
1959 op->insert_collision = true;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001960 return false;
Kent Overstreet1b207d82013-09-10 18:52:54 -07001961 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001962
Kent Overstreet1fa84552013-11-10 21:55:27 -08001963 if (KEY_DIRTY(k))
1964 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1965 KEY_START(k), KEY_SIZE(k));
1966
Kent Overstreetcafe5632013-03-23 16:11:31 -07001967 while (m != end(i) &&
1968 bkey_cmp(k, &START_KEY(m)) > 0)
1969 prev = m, m = bkey_next(m);
1970
1971 if (key_merging_disabled(b->c))
1972 goto insert;
1973
1974 /* prev is in the tree, if we merge we're done */
Kent Overstreet85b14922013-05-14 20:33:16 -07001975 status = BTREE_INSERT_STATUS_BACK_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001976 if (prev &&
1977 bch_bkey_try_merge(b, prev, k))
1978 goto merged;
1979
Kent Overstreet85b14922013-05-14 20:33:16 -07001980 status = BTREE_INSERT_STATUS_OVERWROTE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001981 if (m != end(i) &&
1982 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1983 goto copy;
1984
Kent Overstreet85b14922013-05-14 20:33:16 -07001985 status = BTREE_INSERT_STATUS_FRONT_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001986 if (m != end(i) &&
1987 bch_bkey_try_merge(b, k, m))
1988 goto copy;
Kent Overstreet1b207d82013-09-10 18:52:54 -07001989 } else {
1990 BUG_ON(replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001991 m = bch_bset_search(b, &b->sets[b->nsets], k);
Kent Overstreet1b207d82013-09-10 18:52:54 -07001992 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001993
1994insert: shift_keys(b, m, k);
1995copy: bkey_copy(m, k);
1996merged:
Kent Overstreet1b207d82013-09-10 18:52:54 -07001997 bch_check_keys(b, "%u for %s", status,
1998 replace_key ? "replace" : "insert");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001999
2000 if (b->level && !KEY_OFFSET(k))
Kent Overstreet57943512013-04-25 13:58:35 -07002001 btree_current_write(b)->prio_blocked++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002002
Kent Overstreet1b207d82013-09-10 18:52:54 -07002003 trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002004
2005 return true;
2006}
2007
Kent Overstreet26c949f2013-09-10 18:41:15 -07002008static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002009 struct keylist *insert_keys,
2010 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002011{
2012 bool ret = false;
Kent Overstreet280481d2013-10-24 16:36:03 -07002013 int oldsize = bch_count_data(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002014
Kent Overstreet26c949f2013-09-10 18:41:15 -07002015 while (!bch_keylist_empty(insert_keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002016 struct bset *i = write_block(b);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002017 struct bkey *k = insert_keys->keys;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002018
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002019 if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
2020 > btree_blocks(b))
2021 break;
2022
2023 if (bkey_cmp(k, &b->key) <= 0) {
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002024 if (!b->level)
2025 bkey_put(b->c, k);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002026
Kent Overstreet1b207d82013-09-10 18:52:54 -07002027 ret |= btree_insert_key(b, op, k, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002028 bch_keylist_pop_front(insert_keys);
2029 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
Kent Overstreet26c949f2013-09-10 18:41:15 -07002030 BKEY_PADDED(key) temp;
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002031 bkey_copy(&temp.key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002032
2033 bch_cut_back(&b->key, &temp.key);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002034 bch_cut_front(&b->key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002035
Kent Overstreet1b207d82013-09-10 18:52:54 -07002036 ret |= btree_insert_key(b, op, &temp.key, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002037 break;
2038 } else {
2039 break;
2040 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002041 }
2042
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002043 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2044
Kent Overstreetcafe5632013-03-23 16:11:31 -07002045 BUG_ON(bch_count_data(b) < oldsize);
2046 return ret;
2047}
2048
Kent Overstreet26c949f2013-09-10 18:41:15 -07002049static int btree_split(struct btree *b, struct btree_op *op,
2050 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002051 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002052{
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002053 bool split;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002054 struct btree *n1, *n2 = NULL, *n3 = NULL;
2055 uint64_t start_time = local_clock();
Kent Overstreetb54d6932013-07-24 18:04:18 -07002056 struct closure cl;
Kent Overstreet17e21a92013-07-26 12:32:38 -07002057 struct keylist parent_keys;
Kent Overstreetb54d6932013-07-24 18:04:18 -07002058
2059 closure_init_stack(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002060 bch_keylist_init(&parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002061
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002062 n1 = btree_node_alloc_replacement(b, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002063 if (IS_ERR(n1))
2064 goto err;
2065
2066 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
2067
Kent Overstreetcafe5632013-03-23 16:11:31 -07002068 if (split) {
2069 unsigned keys = 0;
2070
Kent Overstreetc37511b2013-04-26 15:39:55 -07002071 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
2072
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002073 n2 = bch_btree_node_alloc(b->c, b->level, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002074 if (IS_ERR(n2))
2075 goto err_free1;
2076
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002077 if (!b->parent) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002078 n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002079 if (IS_ERR(n3))
2080 goto err_free2;
2081 }
2082
Kent Overstreet1b207d82013-09-10 18:52:54 -07002083 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002084
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002085 /*
2086 * Has to be a linear search because we don't have an auxiliary
Kent Overstreetcafe5632013-03-23 16:11:31 -07002087 * search tree yet
2088 */
2089
2090 while (keys < (n1->sets[0].data->keys * 3) / 5)
2091 keys += bkey_u64s(node(n1->sets[0].data, keys));
2092
2093 bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
2094 keys += bkey_u64s(node(n1->sets[0].data, keys));
2095
2096 n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
2097 n1->sets[0].data->keys = keys;
2098
2099 memcpy(n2->sets[0].data->start,
2100 end(n1->sets[0].data),
2101 n2->sets[0].data->keys * sizeof(uint64_t));
2102
2103 bkey_copy_key(&n2->key, &b->key);
2104
Kent Overstreet17e21a92013-07-26 12:32:38 -07002105 bch_keylist_add(&parent_keys, &n2->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002106 bch_btree_node_write(n2, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002107 rw_unlock(true, n2);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002108 } else {
2109 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
2110
Kent Overstreet1b207d82013-09-10 18:52:54 -07002111 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002112 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002113
Kent Overstreet17e21a92013-07-26 12:32:38 -07002114 bch_keylist_add(&parent_keys, &n1->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002115 bch_btree_node_write(n1, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002116
2117 if (n3) {
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002118 /* Depth increases, make a new root */
Kent Overstreetcafe5632013-03-23 16:11:31 -07002119 bkey_copy_key(&n3->key, &MAX_KEY);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002120 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002121 bch_btree_node_write(n3, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002122
Kent Overstreetb54d6932013-07-24 18:04:18 -07002123 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002124 bch_btree_set_root(n3);
2125 rw_unlock(true, n3);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002126
2127 btree_node_free(b);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002128 } else if (!b->parent) {
2129 /* Root filled up but didn't need to be split */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002130 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002131 bch_btree_set_root(n1);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002132
2133 btree_node_free(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002134 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002135 /* Split a non root node */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002136 closure_sync(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002137 make_btree_freeing_key(b, parent_keys.top);
2138 bch_keylist_push(&parent_keys);
2139
2140 btree_node_free(b);
2141
2142 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2143 BUG_ON(!bch_keylist_empty(&parent_keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002144 }
2145
2146 rw_unlock(true, n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002147
Kent Overstreet169ef1c2013-03-28 12:50:55 -06002148 bch_time_stats_update(&b->c->btree_split_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002149
2150 return 0;
2151err_free2:
Kent Overstreete8e1d462013-07-24 17:27:07 -07002152 btree_node_free(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002153 rw_unlock(true, n2);
2154err_free1:
Kent Overstreete8e1d462013-07-24 17:27:07 -07002155 btree_node_free(n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002156 rw_unlock(true, n1);
2157err:
2158 if (n3 == ERR_PTR(-EAGAIN) ||
2159 n2 == ERR_PTR(-EAGAIN) ||
2160 n1 == ERR_PTR(-EAGAIN))
2161 return -EAGAIN;
2162
2163 pr_warn("couldn't split");
2164 return -ENOMEM;
2165}
2166
Kent Overstreet26c949f2013-09-10 18:41:15 -07002167static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002168 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002169 atomic_t *journal_ref,
2170 struct bkey *replace_key)
Kent Overstreet26c949f2013-09-10 18:41:15 -07002171{
Kent Overstreet17e21a92013-07-26 12:32:38 -07002172 BUG_ON(b->level && replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002173
Kent Overstreet17e21a92013-07-26 12:32:38 -07002174 if (should_split(b)) {
2175 if (current->bio_list) {
2176 op->lock = b->c->root->level + 1;
2177 return -EAGAIN;
2178 } else if (op->lock <= b->c->root->level) {
2179 op->lock = b->c->root->level + 1;
2180 return -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002181 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002182 /* Invalidated all iterators */
2183 return btree_split(b, op, insert_keys, replace_key) ?:
2184 -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002185 }
Kent Overstreet17e21a92013-07-26 12:32:38 -07002186 } else {
2187 BUG_ON(write_block(b) != b->sets[b->nsets].data);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002188
Kent Overstreet17e21a92013-07-26 12:32:38 -07002189 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2190 if (!b->level)
2191 bch_btree_leaf_dirty(b, journal_ref);
2192 else
2193 bch_btree_node_write_sync(b);
2194 }
2195
2196 return 0;
2197 }
Kent Overstreet26c949f2013-09-10 18:41:15 -07002198}
2199
Kent Overstreete7c590e2013-09-10 18:39:16 -07002200int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2201 struct bkey *check_key)
2202{
2203 int ret = -EINTR;
2204 uint64_t btree_ptr = b->key.ptr[0];
2205 unsigned long seq = b->seq;
2206 struct keylist insert;
2207 bool upgrade = op->lock == -1;
2208
2209 bch_keylist_init(&insert);
2210
2211 if (upgrade) {
2212 rw_unlock(false, b);
2213 rw_lock(true, b, b->level);
2214
2215 if (b->key.ptr[0] != btree_ptr ||
2216 b->seq != seq + 1)
2217 goto out;
2218 }
2219
2220 SET_KEY_PTRS(check_key, 1);
2221 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2222
2223 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2224
2225 bch_keylist_add(&insert, check_key);
2226
Kent Overstreet1b207d82013-09-10 18:52:54 -07002227 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
Kent Overstreete7c590e2013-09-10 18:39:16 -07002228
2229 BUG_ON(!ret && !bch_keylist_empty(&insert));
2230out:
2231 if (upgrade)
2232 downgrade_write(&b->lock);
2233 return ret;
2234}
2235
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002236struct btree_insert_op {
2237 struct btree_op op;
2238 struct keylist *keys;
2239 atomic_t *journal_ref;
2240 struct bkey *replace_key;
2241};
2242
Wei Yongjun08239ca2013-11-28 10:31:35 +08002243static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002244{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002245 struct btree_insert_op *op = container_of(b_op,
2246 struct btree_insert_op, op);
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002247
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002248 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2249 op->journal_ref, op->replace_key);
2250 if (ret && !bch_keylist_empty(op->keys))
2251 return ret;
2252 else
2253 return MAP_DONE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002254}
2255
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002256int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2257 atomic_t *journal_ref, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002258{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002259 struct btree_insert_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002260 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002261
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002262 BUG_ON(current->bio_list);
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002263 BUG_ON(bch_keylist_empty(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002264
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002265 bch_btree_op_init(&op.op, 0);
2266 op.keys = keys;
2267 op.journal_ref = journal_ref;
2268 op.replace_key = replace_key;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002269
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002270 while (!ret && !bch_keylist_empty(keys)) {
2271 op.op.lock = 0;
2272 ret = bch_btree_map_leaf_nodes(&op.op, c,
2273 &START_KEY(keys->keys),
2274 btree_insert_fn);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002275 }
2276
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002277 if (ret) {
2278 struct bkey *k;
2279
2280 pr_err("error %i", ret);
2281
2282 while ((k = bch_keylist_pop(keys)))
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002283 bkey_put(c, k);
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002284 } else if (op.op.insert_collision)
2285 ret = -ESRCH;
Kent Overstreet6054c6d2013-07-24 18:06:22 -07002286
Kent Overstreetcafe5632013-03-23 16:11:31 -07002287 return ret;
2288}
2289
2290void bch_btree_set_root(struct btree *b)
2291{
2292 unsigned i;
Kent Overstreete49c7c32013-06-26 17:25:38 -07002293 struct closure cl;
2294
2295 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002296
Kent Overstreetc37511b2013-04-26 15:39:55 -07002297 trace_bcache_btree_set_root(b);
2298
Kent Overstreetcafe5632013-03-23 16:11:31 -07002299 BUG_ON(!b->written);
2300
2301 for (i = 0; i < KEY_PTRS(&b->key); i++)
2302 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2303
2304 mutex_lock(&b->c->bucket_lock);
2305 list_del_init(&b->list);
2306 mutex_unlock(&b->c->bucket_lock);
2307
2308 b->c->root = b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002309
Kent Overstreete49c7c32013-06-26 17:25:38 -07002310 bch_journal_meta(b->c, &cl);
2311 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002312}
2313
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002314/* Map across nodes or keys */
2315
2316static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2317 struct bkey *from,
2318 btree_map_nodes_fn *fn, int flags)
2319{
2320 int ret = MAP_CONTINUE;
2321
2322 if (b->level) {
2323 struct bkey *k;
2324 struct btree_iter iter;
2325
2326 bch_btree_iter_init(b, &iter, from);
2327
2328 while ((k = bch_btree_iter_next_filter(&iter, b,
2329 bch_ptr_bad))) {
2330 ret = btree(map_nodes_recurse, k, b,
2331 op, from, fn, flags);
2332 from = NULL;
2333
2334 if (ret != MAP_CONTINUE)
2335 return ret;
2336 }
2337 }
2338
2339 if (!b->level || flags == MAP_ALL_NODES)
2340 ret = fn(op, b);
2341
2342 return ret;
2343}
2344
2345int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2346 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2347{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002348 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002349}
2350
2351static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2352 struct bkey *from, btree_map_keys_fn *fn,
2353 int flags)
2354{
2355 int ret = MAP_CONTINUE;
2356 struct bkey *k;
2357 struct btree_iter iter;
2358
2359 bch_btree_iter_init(b, &iter, from);
2360
2361 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
2362 ret = !b->level
2363 ? fn(op, b, k)
2364 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2365 from = NULL;
2366
2367 if (ret != MAP_CONTINUE)
2368 return ret;
2369 }
2370
2371 if (!b->level && (flags & MAP_END_KEY))
2372 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2373 KEY_OFFSET(&b->key), 0));
2374
2375 return ret;
2376}
2377
2378int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2379 struct bkey *from, btree_map_keys_fn *fn, int flags)
2380{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002381 return btree_root(map_keys_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002382}
2383
Kent Overstreetcafe5632013-03-23 16:11:31 -07002384/* Keybuf code */
2385
2386static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2387{
2388 /* Overlapping keys compare equal */
2389 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2390 return -1;
2391 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2392 return 1;
2393 return 0;
2394}
2395
2396static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2397 struct keybuf_key *r)
2398{
2399 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2400}
2401
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002402struct refill {
2403 struct btree_op op;
Kent Overstreet48a915a2013-10-31 15:43:22 -07002404 unsigned nr_found;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002405 struct keybuf *buf;
2406 struct bkey *end;
2407 keybuf_pred_fn *pred;
2408};
2409
2410static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2411 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002412{
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002413 struct refill *refill = container_of(op, struct refill, op);
2414 struct keybuf *buf = refill->buf;
2415 int ret = MAP_CONTINUE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002416
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002417 if (bkey_cmp(k, refill->end) >= 0) {
2418 ret = MAP_DONE;
2419 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002420 }
2421
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002422 if (!KEY_SIZE(k)) /* end key */
2423 goto out;
2424
2425 if (refill->pred(buf, k)) {
2426 struct keybuf_key *w;
2427
2428 spin_lock(&buf->lock);
2429
2430 w = array_alloc(&buf->freelist);
2431 if (!w) {
2432 spin_unlock(&buf->lock);
2433 return MAP_DONE;
2434 }
2435
2436 w->private = NULL;
2437 bkey_copy(&w->key, k);
2438
2439 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2440 array_free(&buf->freelist, w);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002441 else
2442 refill->nr_found++;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002443
2444 if (array_freelist_empty(&buf->freelist))
2445 ret = MAP_DONE;
2446
2447 spin_unlock(&buf->lock);
2448 }
2449out:
2450 buf->last_scanned = *k;
2451 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002452}
2453
2454void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002455 struct bkey *end, keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002456{
2457 struct bkey start = buf->last_scanned;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002458 struct refill refill;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002459
2460 cond_resched();
2461
Kent Overstreetb54d6932013-07-24 18:04:18 -07002462 bch_btree_op_init(&refill.op, -1);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002463 refill.nr_found = 0;
2464 refill.buf = buf;
2465 refill.end = end;
2466 refill.pred = pred;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002467
2468 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2469 refill_keybuf_fn, MAP_END_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002470
Kent Overstreet48a915a2013-10-31 15:43:22 -07002471 trace_bcache_keyscan(refill.nr_found,
2472 KEY_INODE(&start), KEY_OFFSET(&start),
2473 KEY_INODE(&buf->last_scanned),
2474 KEY_OFFSET(&buf->last_scanned));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002475
2476 spin_lock(&buf->lock);
2477
2478 if (!RB_EMPTY_ROOT(&buf->keys)) {
2479 struct keybuf_key *w;
2480 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2481 buf->start = START_KEY(&w->key);
2482
2483 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2484 buf->end = w->key;
2485 } else {
2486 buf->start = MAX_KEY;
2487 buf->end = MAX_KEY;
2488 }
2489
2490 spin_unlock(&buf->lock);
2491}
2492
2493static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2494{
2495 rb_erase(&w->node, &buf->keys);
2496 array_free(&buf->freelist, w);
2497}
2498
2499void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2500{
2501 spin_lock(&buf->lock);
2502 __bch_keybuf_del(buf, w);
2503 spin_unlock(&buf->lock);
2504}
2505
2506bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2507 struct bkey *end)
2508{
2509 bool ret = false;
2510 struct keybuf_key *p, *w, s;
2511 s.key = *start;
2512
2513 if (bkey_cmp(end, &buf->start) <= 0 ||
2514 bkey_cmp(start, &buf->end) >= 0)
2515 return false;
2516
2517 spin_lock(&buf->lock);
2518 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2519
2520 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2521 p = w;
2522 w = RB_NEXT(w, node);
2523
2524 if (p->private)
2525 ret = true;
2526 else
2527 __bch_keybuf_del(buf, p);
2528 }
2529
2530 spin_unlock(&buf->lock);
2531 return ret;
2532}
2533
2534struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2535{
2536 struct keybuf_key *w;
2537 spin_lock(&buf->lock);
2538
2539 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2540
2541 while (w && w->private)
2542 w = RB_NEXT(w, node);
2543
2544 if (w)
2545 w->private = ERR_PTR(-EINTR);
2546
2547 spin_unlock(&buf->lock);
2548 return w;
2549}
2550
2551struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002552 struct keybuf *buf,
2553 struct bkey *end,
2554 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002555{
2556 struct keybuf_key *ret;
2557
2558 while (1) {
2559 ret = bch_keybuf_next(buf);
2560 if (ret)
2561 break;
2562
2563 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2564 pr_debug("scan finished");
2565 break;
2566 }
2567
Kent Overstreet72c27062013-06-05 06:24:39 -07002568 bch_refill_keybuf(c, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002569 }
2570
2571 return ret;
2572}
2573
Kent Overstreet72c27062013-06-05 06:24:39 -07002574void bch_keybuf_init(struct keybuf *buf)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002575{
Kent Overstreetcafe5632013-03-23 16:11:31 -07002576 buf->last_scanned = MAX_KEY;
2577 buf->keys = RB_ROOT;
2578
2579 spin_lock_init(&buf->lock);
2580 array_allocator_init(&buf->freelist);
2581}
2582
2583void bch_btree_exit(void)
2584{
2585 if (btree_io_wq)
2586 destroy_workqueue(btree_io_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002587}
2588
2589int __init bch_btree_init(void)
2590{
Kent Overstreet72a44512013-10-24 17:19:26 -07002591 btree_io_wq = create_singlethread_workqueue("bch_btree_io");
2592 if (!btree_io_wq)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002593 return -ENOMEM;
2594
2595 return 0;
2596}