blob: 5cb59c313dc3fbab33237771f4397713b7d9f4d7 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070026#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070027
28#include <linux/slab.h>
29#include <linux/bitops.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070030#include <linux/freezer.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070031#include <linux/hash.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070032#include <linux/kthread.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010033#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070034#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <trace/events/bcache.h>
37
38/*
39 * Todo:
40 * register_bcache: Return errors out to userspace correctly
41 *
42 * Writeback: don't undirty key until after a cache flush
43 *
44 * Create an iterator for key pointers
45 *
46 * On btree write error, mark bucket such that it won't be freed from the cache
47 *
48 * Journalling:
49 * Check for bad keys in replay
50 * Propagate barriers
51 * Refcount journal entries in journal_replay
52 *
53 * Garbage collection:
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
56 *
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
60 *
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
63 * from being starved
64 *
65 * Add a tracepoint or somesuch to watch for writeback starvation
66 *
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious.
70 *
71 * Make sure all allocations get charged to the root cgroup
72 *
73 * Plugging?
74 *
75 * If data write is less than hard sector size of ssd, round up offset in open
76 * bucket to the next whole sector
77 *
78 * Also lookup by cgroup in get_open_bucket()
79 *
80 * Superblock needs to be fleshed out for multiple cache devices
81 *
82 * Add a sysfs tunable for the number of writeback IOs in flight
83 *
84 * Add a sysfs tunable for the number of open data buckets
85 *
86 * IO tracking: Can we track when one process is doing io on behalf of another?
87 * IO tracking: Don't use just an average, weigh more recent stuff higher
88 *
89 * Test module load/unload
90 */
91
92static const char * const op_types[] = {
93 "insert", "replace"
94};
95
96static const char *op_type(struct btree_op *op)
97{
98 return op_types[op->type];
99}
100
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700101enum {
102 BTREE_INSERT_STATUS_INSERT,
103 BTREE_INSERT_STATUS_BACK_MERGE,
104 BTREE_INSERT_STATUS_OVERWROTE,
105 BTREE_INSERT_STATUS_FRONT_MERGE,
106};
107
Kent Overstreetcafe5632013-03-23 16:11:31 -0700108#define MAX_NEED_GC 64
109#define MAX_SAVE_PRIO 72
110
111#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
112
113#define PTR_HASH(c, k) \
114 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
115
Kent Overstreetcafe5632013-03-23 16:11:31 -0700116static struct workqueue_struct *btree_io_wq;
117
118void bch_btree_op_init_stack(struct btree_op *op)
119{
120 memset(op, 0, sizeof(struct btree_op));
121 closure_init_stack(&op->cl);
122 op->lock = -1;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700123}
124
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700125static inline bool should_split(struct btree *b)
126{
127 struct bset *i = write_block(b);
128 return b->written >= btree_blocks(b) ||
129 (b->written + __set_blocks(i, i->keys + 15, b->c)
130 > btree_blocks(b));
131}
132
133#define insert_lock(s, b) ((b)->level <= (s)->lock)
134
135/*
136 * These macros are for recursing down the btree - they handle the details of
137 * locking and looking up nodes in the cache for you. They're best treated as
138 * mere syntax when reading code that uses them.
139 *
140 * op->lock determines whether we take a read or a write lock at a given depth.
141 * If you've got a read lock and find that you need a write lock (i.e. you're
142 * going to have to split), set op->lock and return -EINTR; btree_root() will
143 * call you again and you'll have the correct lock.
144 */
145
146/**
147 * btree - recurse down the btree on a specified key
148 * @fn: function to call, which will be passed the child node
149 * @key: key to recurse on
150 * @b: parent btree node
151 * @op: pointer to struct btree_op
152 */
153#define btree(fn, key, b, op, ...) \
154({ \
155 int _r, l = (b)->level - 1; \
156 bool _w = l <= (op)->lock; \
157 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
158 if (!IS_ERR(_child)) { \
159 _child->parent = (b); \
160 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
161 rw_unlock(_w, _child); \
162 } else \
163 _r = PTR_ERR(_child); \
164 _r; \
165})
166
167/**
168 * btree_root - call a function on the root of the btree
169 * @fn: function to call, which will be passed the child node
170 * @c: cache set
171 * @op: pointer to struct btree_op
172 */
173#define btree_root(fn, c, op, ...) \
174({ \
175 int _r = -EINTR; \
176 do { \
177 struct btree *_b = (c)->root; \
178 bool _w = insert_lock(op, _b); \
179 rw_lock(_w, _b, _b->level); \
180 if (_b == (c)->root && \
181 _w == insert_lock(op, _b)) { \
182 _b->parent = NULL; \
183 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
184 } \
185 rw_unlock(_w, _b); \
186 bch_cannibalize_unlock(c); \
187 if (_r == -ENOSPC) { \
188 wait_event((c)->try_wait, \
189 !(c)->try_harder); \
190 _r = -EINTR; \
191 } \
192 } while (_r == -EINTR); \
193 \
194 _r; \
195})
196
Kent Overstreetcafe5632013-03-23 16:11:31 -0700197/* Btree key manipulation */
198
Kent Overstreete7c590e2013-09-10 18:39:16 -0700199void __bkey_put(struct cache_set *c, struct bkey *k)
200{
201 unsigned i;
202
203 for (i = 0; i < KEY_PTRS(k); i++)
204 if (ptr_available(c, k, i))
205 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
206}
207
Kent Overstreetcafe5632013-03-23 16:11:31 -0700208static void bkey_put(struct cache_set *c, struct bkey *k, int level)
209{
210 if ((level && KEY_OFFSET(k)) || !level)
211 __bkey_put(c, k);
212}
213
214/* Btree IO */
215
216static uint64_t btree_csum_set(struct btree *b, struct bset *i)
217{
218 uint64_t crc = b->key.ptr[0];
219 void *data = (void *) i + 8, *end = end(i);
220
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600221 crc = bch_crc64_update(crc, data, end - data);
Kent Overstreetc19ed232013-03-26 13:49:02 -0700222 return crc ^ 0xffffffffffffffffULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700223}
224
Kent Overstreetf3059a52013-05-15 17:13:45 -0700225static void bch_btree_node_read_done(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700226{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700227 const char *err = "bad btree header";
Kent Overstreet57943512013-04-25 13:58:35 -0700228 struct bset *i = b->sets[0].data;
229 struct btree_iter *iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700230
Kent Overstreet57943512013-04-25 13:58:35 -0700231 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
232 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700233 iter->used = 0;
234
Kent Overstreet57943512013-04-25 13:58:35 -0700235 if (!i->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700236 goto err;
237
238 for (;
239 b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
240 i = write_block(b)) {
241 err = "unsupported bset version";
242 if (i->version > BCACHE_BSET_VERSION)
243 goto err;
244
245 err = "bad btree header";
246 if (b->written + set_blocks(i, b->c) > btree_blocks(b))
247 goto err;
248
249 err = "bad magic";
250 if (i->magic != bset_magic(b->c))
251 goto err;
252
253 err = "bad checksum";
254 switch (i->version) {
255 case 0:
256 if (i->csum != csum_set(i))
257 goto err;
258 break;
259 case BCACHE_BSET_VERSION:
260 if (i->csum != btree_csum_set(b, i))
261 goto err;
262 break;
263 }
264
265 err = "empty set";
266 if (i != b->sets[0].data && !i->keys)
267 goto err;
268
269 bch_btree_iter_push(iter, i->start, end(i));
270
271 b->written += set_blocks(i, b->c);
272 }
273
274 err = "corrupted btree";
275 for (i = write_block(b);
276 index(i, b) < btree_blocks(b);
277 i = ((void *) i) + block_bytes(b->c))
278 if (i->seq == b->sets[0].data->seq)
279 goto err;
280
281 bch_btree_sort_and_fix_extents(b, iter);
282
283 i = b->sets[0].data;
284 err = "short btree key";
285 if (b->sets[0].size &&
286 bkey_cmp(&b->key, &b->sets[0].end) < 0)
287 goto err;
288
289 if (b->written < btree_blocks(b))
290 bch_bset_init_next(b);
291out:
Kent Overstreet57943512013-04-25 13:58:35 -0700292 mempool_free(iter, b->c->fill_iter);
293 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700294err:
295 set_btree_node_io_error(b);
Kent Overstreet07e86cc2013-03-25 11:46:43 -0700296 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
Kent Overstreetcafe5632013-03-23 16:11:31 -0700297 err, PTR_BUCKET_NR(b->c, &b->key, 0),
298 index(i, b), i->keys);
299 goto out;
300}
301
Kent Overstreet57943512013-04-25 13:58:35 -0700302static void btree_node_read_endio(struct bio *bio, int error)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700303{
Kent Overstreet57943512013-04-25 13:58:35 -0700304 struct closure *cl = bio->bi_private;
305 closure_put(cl);
306}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700307
Kent Overstreet57943512013-04-25 13:58:35 -0700308void bch_btree_node_read(struct btree *b)
309{
310 uint64_t start_time = local_clock();
311 struct closure cl;
312 struct bio *bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700313
Kent Overstreetc37511b2013-04-26 15:39:55 -0700314 trace_bcache_btree_read(b);
315
Kent Overstreet57943512013-04-25 13:58:35 -0700316 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700317
Kent Overstreet57943512013-04-25 13:58:35 -0700318 bio = bch_bbio_alloc(b->c);
319 bio->bi_rw = REQ_META|READ_SYNC;
320 bio->bi_size = KEY_SIZE(&b->key) << 9;
321 bio->bi_end_io = btree_node_read_endio;
322 bio->bi_private = &cl;
323
324 bch_bio_map(bio, b->sets[0].data);
325
Kent Overstreet57943512013-04-25 13:58:35 -0700326 bch_submit_bbio(bio, b->c, &b->key, 0);
327 closure_sync(&cl);
328
329 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
330 set_btree_node_io_error(b);
331
332 bch_bbio_free(bio, b->c);
333
334 if (btree_node_io_error(b))
335 goto err;
336
337 bch_btree_node_read_done(b);
338
339 spin_lock(&b->c->btree_read_time_lock);
340 bch_time_stats_update(&b->c->btree_read_time, start_time);
341 spin_unlock(&b->c->btree_read_time_lock);
342
343 return;
344err:
Geert Uytterhoeven61cbd252013-09-23 23:17:30 -0700345 bch_cache_set_error(b->c, "io error reading bucket %zu",
Kent Overstreet57943512013-04-25 13:58:35 -0700346 PTR_BUCKET_NR(b->c, &b->key, 0));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700347}
348
349static void btree_complete_write(struct btree *b, struct btree_write *w)
350{
351 if (w->prio_blocked &&
352 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700353 wake_up_allocators(b->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700354
355 if (w->journal) {
356 atomic_dec_bug(w->journal);
357 __closure_wake_up(&b->c->journal.wait);
358 }
359
Kent Overstreetcafe5632013-03-23 16:11:31 -0700360 w->prio_blocked = 0;
361 w->journal = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700362}
363
Kent Overstreet57943512013-04-25 13:58:35 -0700364static void __btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700365{
366 struct btree *b = container_of(cl, struct btree, io.cl);
367 struct btree_write *w = btree_prev_write(b);
368
369 bch_bbio_free(b->bio, b->c);
370 b->bio = NULL;
371 btree_complete_write(b, w);
372
373 if (btree_node_dirty(b))
374 queue_delayed_work(btree_io_wq, &b->work,
375 msecs_to_jiffies(30000));
376
377 closure_return(cl);
378}
379
Kent Overstreet57943512013-04-25 13:58:35 -0700380static void btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700381{
382 struct btree *b = container_of(cl, struct btree, io.cl);
383 struct bio_vec *bv;
384 int n;
385
386 __bio_for_each_segment(bv, b->bio, n, 0)
387 __free_page(bv->bv_page);
388
Kent Overstreet57943512013-04-25 13:58:35 -0700389 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700390}
391
Kent Overstreet57943512013-04-25 13:58:35 -0700392static void btree_node_write_endio(struct bio *bio, int error)
393{
394 struct closure *cl = bio->bi_private;
395 struct btree *b = container_of(cl, struct btree, io.cl);
396
397 if (error)
398 set_btree_node_io_error(b);
399
400 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
401 closure_put(cl);
402}
403
404static void do_btree_node_write(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700405{
406 struct closure *cl = &b->io.cl;
407 struct bset *i = b->sets[b->nsets].data;
408 BKEY_PADDED(key) k;
409
410 i->version = BCACHE_BSET_VERSION;
411 i->csum = btree_csum_set(b, i);
412
Kent Overstreet57943512013-04-25 13:58:35 -0700413 BUG_ON(b->bio);
414 b->bio = bch_bbio_alloc(b->c);
415
416 b->bio->bi_end_io = btree_node_write_endio;
417 b->bio->bi_private = &b->io.cl;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700418 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
419 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600420 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700421
Kent Overstreete49c7c32013-06-26 17:25:38 -0700422 /*
423 * If we're appending to a leaf node, we don't technically need FUA -
424 * this write just needs to be persisted before the next journal write,
425 * which will be marked FLUSH|FUA.
426 *
427 * Similarly if we're writing a new btree root - the pointer is going to
428 * be in the next journal entry.
429 *
430 * But if we're writing a new btree node (that isn't a root) or
431 * appending to a non leaf btree node, we need either FUA or a flush
432 * when we write the parent with the new pointer. FUA is cheaper than a
433 * flush, and writes appending to leaf nodes aren't blocking anything so
434 * just make all btree node writes FUA to keep things sane.
435 */
436
Kent Overstreetcafe5632013-03-23 16:11:31 -0700437 bkey_copy(&k.key, &b->key);
438 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
439
Kent Overstreet8e51e412013-06-06 18:15:57 -0700440 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700441 int j;
442 struct bio_vec *bv;
443 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
444
445 bio_for_each_segment(bv, b->bio, j)
446 memcpy(page_address(bv->bv_page),
447 base + j * PAGE_SIZE, PAGE_SIZE);
448
Kent Overstreetcafe5632013-03-23 16:11:31 -0700449 bch_submit_bbio(b->bio, b->c, &k.key, 0);
450
Kent Overstreet57943512013-04-25 13:58:35 -0700451 continue_at(cl, btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700452 } else {
453 b->bio->bi_vcnt = 0;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600454 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700455
Kent Overstreetcafe5632013-03-23 16:11:31 -0700456 bch_submit_bbio(b->bio, b->c, &k.key, 0);
457
458 closure_sync(cl);
Kent Overstreet57943512013-04-25 13:58:35 -0700459 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700460 }
461}
462
Kent Overstreet57943512013-04-25 13:58:35 -0700463void bch_btree_node_write(struct btree *b, struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700464{
465 struct bset *i = b->sets[b->nsets].data;
466
Kent Overstreetc37511b2013-04-26 15:39:55 -0700467 trace_bcache_btree_write(b);
468
Kent Overstreetcafe5632013-03-23 16:11:31 -0700469 BUG_ON(current->bio_list);
Kent Overstreet57943512013-04-25 13:58:35 -0700470 BUG_ON(b->written >= btree_blocks(b));
471 BUG_ON(b->written && !i->keys);
472 BUG_ON(b->sets->data->seq != i->seq);
Kent Overstreetc37511b2013-04-26 15:39:55 -0700473 bch_check_key_order(b, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700474
Kent Overstreetcafe5632013-03-23 16:11:31 -0700475 cancel_delayed_work(&b->work);
476
Kent Overstreet57943512013-04-25 13:58:35 -0700477 /* If caller isn't waiting for write, parent refcount is cache set */
478 closure_lock(&b->io, parent ?: &b->c->cl);
479
Kent Overstreetcafe5632013-03-23 16:11:31 -0700480 clear_bit(BTREE_NODE_dirty, &b->flags);
481 change_bit(BTREE_NODE_write_idx, &b->flags);
482
Kent Overstreet57943512013-04-25 13:58:35 -0700483 do_btree_node_write(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700484
Kent Overstreetcafe5632013-03-23 16:11:31 -0700485 b->written += set_blocks(i, b->c);
486 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
487 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
488
489 bch_btree_sort_lazy(b);
490
491 if (b->written < btree_blocks(b))
492 bch_bset_init_next(b);
493}
494
Kent Overstreet57943512013-04-25 13:58:35 -0700495static void btree_node_write_work(struct work_struct *w)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700496{
497 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
498
Kent Overstreet57943512013-04-25 13:58:35 -0700499 rw_lock(true, b, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700500
501 if (btree_node_dirty(b))
Kent Overstreet57943512013-04-25 13:58:35 -0700502 bch_btree_node_write(b, NULL);
503 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700504}
505
Kent Overstreetc18536a2013-07-24 17:44:17 -0700506static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700507{
508 struct bset *i = b->sets[b->nsets].data;
509 struct btree_write *w = btree_current_write(b);
510
Kent Overstreet57943512013-04-25 13:58:35 -0700511 BUG_ON(!b->written);
512 BUG_ON(!i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700513
Kent Overstreet57943512013-04-25 13:58:35 -0700514 if (!btree_node_dirty(b))
515 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700516
Kent Overstreet57943512013-04-25 13:58:35 -0700517 set_btree_node_dirty(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700518
Kent Overstreetc18536a2013-07-24 17:44:17 -0700519 if (journal_ref) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700520 if (w->journal &&
Kent Overstreetc18536a2013-07-24 17:44:17 -0700521 journal_pin_cmp(b->c, w->journal, journal_ref)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700522 atomic_dec_bug(w->journal);
523 w->journal = NULL;
524 }
525
526 if (!w->journal) {
Kent Overstreetc18536a2013-07-24 17:44:17 -0700527 w->journal = journal_ref;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700528 atomic_inc(w->journal);
529 }
530 }
531
Kent Overstreetcafe5632013-03-23 16:11:31 -0700532 /* Force write if set is too big */
Kent Overstreet57943512013-04-25 13:58:35 -0700533 if (set_bytes(i) > PAGE_SIZE - 48 &&
534 !current->bio_list)
535 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700536}
537
538/*
539 * Btree in memory cache - allocation/freeing
540 * mca -> memory cache
541 */
542
543static void mca_reinit(struct btree *b)
544{
545 unsigned i;
546
547 b->flags = 0;
548 b->written = 0;
549 b->nsets = 0;
550
551 for (i = 0; i < MAX_BSETS; i++)
552 b->sets[i].size = 0;
553 /*
554 * Second loop starts at 1 because b->sets[0]->data is the memory we
555 * allocated
556 */
557 for (i = 1; i < MAX_BSETS; i++)
558 b->sets[i].data = NULL;
559}
560
561#define mca_reserve(c) (((c->root && c->root->level) \
562 ? c->root->level : 1) * 8 + 16)
563#define mca_can_free(c) \
564 max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
565
566static void mca_data_free(struct btree *b)
567{
568 struct bset_tree *t = b->sets;
569 BUG_ON(!closure_is_unlocked(&b->io.cl));
570
571 if (bset_prev_bytes(b) < PAGE_SIZE)
572 kfree(t->prev);
573 else
574 free_pages((unsigned long) t->prev,
575 get_order(bset_prev_bytes(b)));
576
577 if (bset_tree_bytes(b) < PAGE_SIZE)
578 kfree(t->tree);
579 else
580 free_pages((unsigned long) t->tree,
581 get_order(bset_tree_bytes(b)));
582
583 free_pages((unsigned long) t->data, b->page_order);
584
585 t->prev = NULL;
586 t->tree = NULL;
587 t->data = NULL;
588 list_move(&b->list, &b->c->btree_cache_freed);
589 b->c->bucket_cache_used--;
590}
591
592static void mca_bucket_free(struct btree *b)
593{
594 BUG_ON(btree_node_dirty(b));
595
596 b->key.ptr[0] = 0;
597 hlist_del_init_rcu(&b->hash);
598 list_move(&b->list, &b->c->btree_cache_freeable);
599}
600
601static unsigned btree_order(struct bkey *k)
602{
603 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
604}
605
606static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
607{
608 struct bset_tree *t = b->sets;
609 BUG_ON(t->data);
610
611 b->page_order = max_t(unsigned,
612 ilog2(b->c->btree_pages),
613 btree_order(k));
614
615 t->data = (void *) __get_free_pages(gfp, b->page_order);
616 if (!t->data)
617 goto err;
618
619 t->tree = bset_tree_bytes(b) < PAGE_SIZE
620 ? kmalloc(bset_tree_bytes(b), gfp)
621 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
622 if (!t->tree)
623 goto err;
624
625 t->prev = bset_prev_bytes(b) < PAGE_SIZE
626 ? kmalloc(bset_prev_bytes(b), gfp)
627 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
628 if (!t->prev)
629 goto err;
630
631 list_move(&b->list, &b->c->btree_cache);
632 b->c->bucket_cache_used++;
633 return;
634err:
635 mca_data_free(b);
636}
637
638static struct btree *mca_bucket_alloc(struct cache_set *c,
639 struct bkey *k, gfp_t gfp)
640{
641 struct btree *b = kzalloc(sizeof(struct btree), gfp);
642 if (!b)
643 return NULL;
644
645 init_rwsem(&b->lock);
646 lockdep_set_novalidate_class(&b->lock);
647 INIT_LIST_HEAD(&b->list);
Kent Overstreet57943512013-04-25 13:58:35 -0700648 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700649 b->c = c;
650 closure_init_unlocked(&b->io);
651
652 mca_data_alloc(b, k, gfp);
653 return b;
654}
655
Kent Overstreete8e1d462013-07-24 17:27:07 -0700656static int mca_reap(struct btree *b, unsigned min_order, bool flush)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700657{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700658 struct closure cl;
659
660 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700661 lockdep_assert_held(&b->c->bucket_lock);
662
663 if (!down_write_trylock(&b->lock))
664 return -ENOMEM;
665
Kent Overstreete8e1d462013-07-24 17:27:07 -0700666 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
667
668 if (b->page_order < min_order ||
669 (!flush &&
670 (btree_node_dirty(b) ||
671 atomic_read(&b->io.cl.remaining) != -1))) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700672 rw_unlock(true, b);
673 return -ENOMEM;
674 }
675
Kent Overstreete8e1d462013-07-24 17:27:07 -0700676 if (btree_node_dirty(b)) {
677 bch_btree_node_write(b, &cl);
678 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700679 }
680
Kent Overstreete8e1d462013-07-24 17:27:07 -0700681 /* wait for any in flight btree write */
682 closure_wait_event_sync(&b->io.wait, &cl,
683 atomic_read(&b->io.cl.remaining) == -1);
684
Kent Overstreetcafe5632013-03-23 16:11:31 -0700685 return 0;
686}
687
Dave Chinner7dc19d52013-08-28 10:18:11 +1000688static unsigned long bch_mca_scan(struct shrinker *shrink,
689 struct shrink_control *sc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700690{
691 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
692 struct btree *b, *t;
693 unsigned long i, nr = sc->nr_to_scan;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000694 unsigned long freed = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700695
696 if (c->shrinker_disabled)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000697 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700698
699 if (c->try_harder)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000700 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700701
702 /* Return -1 if we can't do anything right now */
Kent Overstreeta698e082013-09-23 23:17:34 -0700703 if (sc->gfp_mask & __GFP_IO)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700704 mutex_lock(&c->bucket_lock);
705 else if (!mutex_trylock(&c->bucket_lock))
706 return -1;
707
Kent Overstreet36c9ea92013-06-03 13:04:56 -0700708 /*
709 * It's _really_ critical that we don't free too many btree nodes - we
710 * have to always leave ourselves a reserve. The reserve is how we
711 * guarantee that allocating memory for a new btree node can always
712 * succeed, so that inserting keys into the btree can always succeed and
713 * IO can always make forward progress:
714 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700715 nr /= c->btree_pages;
716 nr = min_t(unsigned long, nr, mca_can_free(c));
717
718 i = 0;
719 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
Dave Chinner7dc19d52013-08-28 10:18:11 +1000720 if (freed >= nr)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700721 break;
722
723 if (++i > 3 &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700724 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700725 mca_data_free(b);
726 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000727 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700728 }
729 }
730
731 /*
732 * Can happen right when we first start up, before we've read in any
733 * btree nodes
734 */
735 if (list_empty(&c->btree_cache))
736 goto out;
737
Dave Chinner7dc19d52013-08-28 10:18:11 +1000738 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700739 b = list_first_entry(&c->btree_cache, struct btree, list);
740 list_rotate_left(&c->btree_cache);
741
742 if (!b->accessed &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700743 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700744 mca_bucket_free(b);
745 mca_data_free(b);
746 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000747 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700748 } else
749 b->accessed = 0;
750 }
751out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700752 mutex_unlock(&c->bucket_lock);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000753 return freed;
754}
755
756static unsigned long bch_mca_count(struct shrinker *shrink,
757 struct shrink_control *sc)
758{
759 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
760
761 if (c->shrinker_disabled)
762 return 0;
763
764 if (c->try_harder)
765 return 0;
766
767 return mca_can_free(c) * c->btree_pages;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700768}
769
770void bch_btree_cache_free(struct cache_set *c)
771{
772 struct btree *b;
773 struct closure cl;
774 closure_init_stack(&cl);
775
776 if (c->shrink.list.next)
777 unregister_shrinker(&c->shrink);
778
779 mutex_lock(&c->bucket_lock);
780
781#ifdef CONFIG_BCACHE_DEBUG
782 if (c->verify_data)
783 list_move(&c->verify_data->list, &c->btree_cache);
784#endif
785
786 list_splice(&c->btree_cache_freeable,
787 &c->btree_cache);
788
789 while (!list_empty(&c->btree_cache)) {
790 b = list_first_entry(&c->btree_cache, struct btree, list);
791
792 if (btree_node_dirty(b))
793 btree_complete_write(b, btree_current_write(b));
794 clear_bit(BTREE_NODE_dirty, &b->flags);
795
796 mca_data_free(b);
797 }
798
799 while (!list_empty(&c->btree_cache_freed)) {
800 b = list_first_entry(&c->btree_cache_freed,
801 struct btree, list);
802 list_del(&b->list);
803 cancel_delayed_work_sync(&b->work);
804 kfree(b);
805 }
806
807 mutex_unlock(&c->bucket_lock);
808}
809
810int bch_btree_cache_alloc(struct cache_set *c)
811{
812 unsigned i;
813
Kent Overstreetcafe5632013-03-23 16:11:31 -0700814 for (i = 0; i < mca_reserve(c); i++)
Kent Overstreet72a44512013-10-24 17:19:26 -0700815 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
816 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700817
818 list_splice_init(&c->btree_cache,
819 &c->btree_cache_freeable);
820
821#ifdef CONFIG_BCACHE_DEBUG
822 mutex_init(&c->verify_lock);
823
824 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
825
826 if (c->verify_data &&
827 c->verify_data->sets[0].data)
828 list_del_init(&c->verify_data->list);
829 else
830 c->verify_data = NULL;
831#endif
832
Dave Chinner7dc19d52013-08-28 10:18:11 +1000833 c->shrink.count_objects = bch_mca_count;
834 c->shrink.scan_objects = bch_mca_scan;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700835 c->shrink.seeks = 4;
836 c->shrink.batch = c->btree_pages * 2;
837 register_shrinker(&c->shrink);
838
839 return 0;
840}
841
842/* Btree in memory cache - hash table */
843
844static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
845{
846 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
847}
848
849static struct btree *mca_find(struct cache_set *c, struct bkey *k)
850{
851 struct btree *b;
852
853 rcu_read_lock();
854 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
855 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
856 goto out;
857 b = NULL;
858out:
859 rcu_read_unlock();
860 return b;
861}
862
Kent Overstreete8e1d462013-07-24 17:27:07 -0700863static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700864{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700865 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700866
Kent Overstreetc37511b2013-04-26 15:39:55 -0700867 trace_bcache_btree_cache_cannibalize(c);
868
Kent Overstreete8e1d462013-07-24 17:27:07 -0700869 if (!c->try_harder) {
870 c->try_harder = current;
871 c->try_harder_start = local_clock();
872 } else if (c->try_harder != current)
873 return ERR_PTR(-ENOSPC);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700874
Kent Overstreete8e1d462013-07-24 17:27:07 -0700875 list_for_each_entry_reverse(b, &c->btree_cache, list)
876 if (!mca_reap(b, btree_order(k), false))
877 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700878
Kent Overstreete8e1d462013-07-24 17:27:07 -0700879 list_for_each_entry_reverse(b, &c->btree_cache, list)
880 if (!mca_reap(b, btree_order(k), true))
881 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700882
Kent Overstreete8e1d462013-07-24 17:27:07 -0700883 return ERR_PTR(-ENOMEM);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700884}
885
886/*
887 * We can only have one thread cannibalizing other cached btree nodes at a time,
888 * or we'll deadlock. We use an open coded mutex to ensure that, which a
889 * cannibalize_bucket() will take. This means every time we unlock the root of
890 * the btree, we need to release this lock if we have it held.
891 */
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700892static void bch_cannibalize_unlock(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700893{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700894 if (c->try_harder == current) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600895 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700896 c->try_harder = NULL;
Kent Overstreete8e1d462013-07-24 17:27:07 -0700897 wake_up(&c->try_wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700898 }
899}
900
Kent Overstreete8e1d462013-07-24 17:27:07 -0700901static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700902{
903 struct btree *b;
904
Kent Overstreete8e1d462013-07-24 17:27:07 -0700905 BUG_ON(current->bio_list);
906
Kent Overstreetcafe5632013-03-23 16:11:31 -0700907 lockdep_assert_held(&c->bucket_lock);
908
909 if (mca_find(c, k))
910 return NULL;
911
912 /* btree_free() doesn't free memory; it sticks the node on the end of
913 * the list. Check if there's any freed nodes there:
914 */
915 list_for_each_entry(b, &c->btree_cache_freeable, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700916 if (!mca_reap(b, btree_order(k), false))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700917 goto out;
918
919 /* We never free struct btree itself, just the memory that holds the on
920 * disk node. Check the freed list before allocating a new one:
921 */
922 list_for_each_entry(b, &c->btree_cache_freed, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700923 if (!mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700924 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
925 if (!b->sets[0].data)
926 goto err;
927 else
928 goto out;
929 }
930
931 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
932 if (!b)
933 goto err;
934
935 BUG_ON(!down_write_trylock(&b->lock));
936 if (!b->sets->data)
937 goto err;
938out:
939 BUG_ON(!closure_is_unlocked(&b->io.cl));
940
941 bkey_copy(&b->key, k);
942 list_move(&b->list, &c->btree_cache);
943 hlist_del_init_rcu(&b->hash);
944 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
945
946 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
947 b->level = level;
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700948 b->parent = (void *) ~0UL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700949
950 mca_reinit(b);
951
952 return b;
953err:
954 if (b)
955 rw_unlock(true, b);
956
Kent Overstreete8e1d462013-07-24 17:27:07 -0700957 b = mca_cannibalize(c, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700958 if (!IS_ERR(b))
959 goto out;
960
961 return b;
962}
963
964/**
965 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
966 * in from disk if necessary.
967 *
968 * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
969 * if that closure is in non blocking mode, will return -EAGAIN.
970 *
971 * The btree node will have either a read or a write lock held, depending on
972 * level and op->lock.
973 */
974struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
Kent Overstreete8e1d462013-07-24 17:27:07 -0700975 int level, bool write)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700976{
977 int i = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700978 struct btree *b;
979
980 BUG_ON(level < 0);
981retry:
982 b = mca_find(c, k);
983
984 if (!b) {
Kent Overstreet57943512013-04-25 13:58:35 -0700985 if (current->bio_list)
986 return ERR_PTR(-EAGAIN);
987
Kent Overstreetcafe5632013-03-23 16:11:31 -0700988 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700989 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700990 mutex_unlock(&c->bucket_lock);
991
992 if (!b)
993 goto retry;
994 if (IS_ERR(b))
995 return b;
996
Kent Overstreet57943512013-04-25 13:58:35 -0700997 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700998
999 if (!write)
1000 downgrade_write(&b->lock);
1001 } else {
1002 rw_lock(write, b, level);
1003 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1004 rw_unlock(write, b);
1005 goto retry;
1006 }
1007 BUG_ON(b->level != level);
1008 }
1009
1010 b->accessed = 1;
1011
1012 for (; i <= b->nsets && b->sets[i].size; i++) {
1013 prefetch(b->sets[i].tree);
1014 prefetch(b->sets[i].data);
1015 }
1016
1017 for (; i <= b->nsets; i++)
1018 prefetch(b->sets[i].data);
1019
Kent Overstreet57943512013-04-25 13:58:35 -07001020 if (btree_node_io_error(b)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001021 rw_unlock(write, b);
Kent Overstreet57943512013-04-25 13:58:35 -07001022 return ERR_PTR(-EIO);
1023 }
1024
1025 BUG_ON(!b->written);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001026
1027 return b;
1028}
1029
1030static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
1031{
1032 struct btree *b;
1033
1034 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001035 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001036 mutex_unlock(&c->bucket_lock);
1037
1038 if (!IS_ERR_OR_NULL(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -07001039 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001040 rw_unlock(true, b);
1041 }
1042}
1043
1044/* Btree alloc */
1045
Kent Overstreete8e1d462013-07-24 17:27:07 -07001046static void btree_node_free(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001047{
1048 unsigned i;
1049
Kent Overstreetc37511b2013-04-26 15:39:55 -07001050 trace_bcache_btree_node_free(b);
1051
Kent Overstreetcafe5632013-03-23 16:11:31 -07001052 BUG_ON(b == b->c->root);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001053
1054 if (btree_node_dirty(b))
1055 btree_complete_write(b, btree_current_write(b));
1056 clear_bit(BTREE_NODE_dirty, &b->flags);
1057
Kent Overstreetcafe5632013-03-23 16:11:31 -07001058 cancel_delayed_work(&b->work);
1059
1060 mutex_lock(&b->c->bucket_lock);
1061
1062 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1063 BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
1064
1065 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1066 PTR_BUCKET(b->c, &b->key, i));
1067 }
1068
1069 bch_bucket_free(b->c, &b->key);
1070 mca_bucket_free(b);
1071 mutex_unlock(&b->c->bucket_lock);
1072}
1073
Kent Overstreet35fcd842013-07-24 17:29:09 -07001074struct btree *bch_btree_node_alloc(struct cache_set *c, int level)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001075{
1076 BKEY_PADDED(key) k;
1077 struct btree *b = ERR_PTR(-EAGAIN);
1078
1079 mutex_lock(&c->bucket_lock);
1080retry:
Kent Overstreet35fcd842013-07-24 17:29:09 -07001081 if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001082 goto err;
1083
1084 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1085
Kent Overstreete8e1d462013-07-24 17:27:07 -07001086 b = mca_alloc(c, &k.key, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001087 if (IS_ERR(b))
1088 goto err_free;
1089
1090 if (!b) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001091 cache_bug(c,
1092 "Tried to allocate bucket that was in btree cache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001093 __bkey_put(c, &k.key);
1094 goto retry;
1095 }
1096
Kent Overstreetcafe5632013-03-23 16:11:31 -07001097 b->accessed = 1;
1098 bch_bset_init_next(b);
1099
1100 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001101
1102 trace_bcache_btree_node_alloc(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001103 return b;
1104err_free:
1105 bch_bucket_free(c, &k.key);
1106 __bkey_put(c, &k.key);
1107err:
1108 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001109
1110 trace_bcache_btree_node_alloc_fail(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001111 return b;
1112}
1113
Kent Overstreet35fcd842013-07-24 17:29:09 -07001114static struct btree *btree_node_alloc_replacement(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001115{
Kent Overstreet35fcd842013-07-24 17:29:09 -07001116 struct btree *n = bch_btree_node_alloc(b->c, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001117 if (!IS_ERR_OR_NULL(n))
1118 bch_btree_sort_into(b, n);
1119
1120 return n;
1121}
1122
1123/* Garbage collection */
1124
1125uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1126{
1127 uint8_t stale = 0;
1128 unsigned i;
1129 struct bucket *g;
1130
1131 /*
1132 * ptr_invalid() can't return true for the keys that mark btree nodes as
1133 * freed, but since ptr_bad() returns true we'll never actually use them
1134 * for anything and thus we don't want mark their pointers here
1135 */
1136 if (!bkey_cmp(k, &ZERO_KEY))
1137 return stale;
1138
1139 for (i = 0; i < KEY_PTRS(k); i++) {
1140 if (!ptr_available(c, k, i))
1141 continue;
1142
1143 g = PTR_BUCKET(c, k, i);
1144
1145 if (gen_after(g->gc_gen, PTR_GEN(k, i)))
1146 g->gc_gen = PTR_GEN(k, i);
1147
1148 if (ptr_stale(c, k, i)) {
1149 stale = max(stale, ptr_stale(c, k, i));
1150 continue;
1151 }
1152
1153 cache_bug_on(GC_MARK(g) &&
1154 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1155 c, "inconsistent ptrs: mark = %llu, level = %i",
1156 GC_MARK(g), level);
1157
1158 if (level)
1159 SET_GC_MARK(g, GC_MARK_METADATA);
1160 else if (KEY_DIRTY(k))
1161 SET_GC_MARK(g, GC_MARK_DIRTY);
1162
1163 /* guard against overflow */
1164 SET_GC_SECTORS_USED(g, min_t(unsigned,
1165 GC_SECTORS_USED(g) + KEY_SIZE(k),
1166 (1 << 14) - 1));
1167
1168 BUG_ON(!GC_SECTORS_USED(g));
1169 }
1170
1171 return stale;
1172}
1173
1174#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1175
1176static int btree_gc_mark_node(struct btree *b, unsigned *keys,
1177 struct gc_stat *gc)
1178{
1179 uint8_t stale = 0;
1180 unsigned last_dev = -1;
1181 struct bcache_device *d = NULL;
1182 struct bkey *k;
1183 struct btree_iter iter;
1184 struct bset_tree *t;
1185
1186 gc->nodes++;
1187
1188 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1189 if (last_dev != KEY_INODE(k)) {
1190 last_dev = KEY_INODE(k);
1191
1192 d = KEY_INODE(k) < b->c->nr_uuids
1193 ? b->c->devices[last_dev]
1194 : NULL;
1195 }
1196
1197 stale = max(stale, btree_mark_key(b, k));
1198
1199 if (bch_ptr_bad(b, k))
1200 continue;
1201
1202 *keys += bkey_u64s(k);
1203
1204 gc->key_bytes += bkey_u64s(k);
1205 gc->nkeys++;
1206
1207 gc->data += KEY_SIZE(k);
Kent Overstreet444fc0b2013-05-11 17:07:26 -07001208 if (KEY_DIRTY(k))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001209 gc->dirty += KEY_SIZE(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001210 }
1211
1212 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
1213 btree_bug_on(t->size &&
1214 bset_written(b, t) &&
1215 bkey_cmp(&b->key, &t->end) < 0,
1216 b, "found short btree key in gc");
1217
1218 return stale;
1219}
1220
Kent Overstreete8e1d462013-07-24 17:27:07 -07001221static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001222{
1223 /*
1224 * We block priorities from being written for the duration of garbage
1225 * collection, so we can't sleep in btree_alloc() ->
1226 * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
1227 * our closure.
1228 */
Kent Overstreet35fcd842013-07-24 17:29:09 -07001229 struct btree *n = btree_node_alloc_replacement(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001230
1231 if (!IS_ERR_OR_NULL(n)) {
1232 swap(b, n);
Kent Overstreet57943512013-04-25 13:58:35 -07001233 __bkey_put(b->c, &b->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001234
1235 memcpy(k->ptr, b->key.ptr,
1236 sizeof(uint64_t) * KEY_PTRS(&b->key));
1237
Kent Overstreete8e1d462013-07-24 17:27:07 -07001238 btree_node_free(n);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001239 up_write(&n->lock);
1240 }
1241
1242 return b;
1243}
1244
1245/*
1246 * Leaving this at 2 until we've got incremental garbage collection done; it
1247 * could be higher (and has been tested with 4) except that garbage collection
1248 * could take much longer, adversely affecting latency.
1249 */
1250#define GC_MERGE_NODES 2U
1251
1252struct gc_merge_info {
1253 struct btree *b;
1254 struct bkey *k;
1255 unsigned keys;
1256};
1257
Kent Overstreete8e1d462013-07-24 17:27:07 -07001258static void btree_gc_coalesce(struct btree *b, struct gc_stat *gc,
1259 struct gc_merge_info *r)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001260{
1261 unsigned nodes = 0, keys = 0, blocks;
1262 int i;
1263
1264 while (nodes < GC_MERGE_NODES && r[nodes].b)
1265 keys += r[nodes++].keys;
1266
1267 blocks = btree_default_blocks(b->c) * 2 / 3;
1268
1269 if (nodes < 2 ||
1270 __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
1271 return;
1272
1273 for (i = nodes - 1; i >= 0; --i) {
1274 if (r[i].b->written)
Kent Overstreete8e1d462013-07-24 17:27:07 -07001275 r[i].b = btree_gc_alloc(r[i].b, r[i].k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001276
1277 if (r[i].b->written)
1278 return;
1279 }
1280
1281 for (i = nodes - 1; i > 0; --i) {
1282 struct bset *n1 = r[i].b->sets->data;
1283 struct bset *n2 = r[i - 1].b->sets->data;
1284 struct bkey *k, *last = NULL;
1285
1286 keys = 0;
1287
1288 if (i == 1) {
1289 /*
1290 * Last node we're not getting rid of - we're getting
1291 * rid of the node at r[0]. Have to try and fit all of
1292 * the remaining keys into this node; we can't ensure
1293 * they will always fit due to rounding and variable
1294 * length keys (shouldn't be possible in practice,
1295 * though)
1296 */
1297 if (__set_blocks(n1, n1->keys + r->keys,
1298 b->c) > btree_blocks(r[i].b))
1299 return;
1300
1301 keys = n2->keys;
1302 last = &r->b->key;
1303 } else
1304 for (k = n2->start;
1305 k < end(n2);
1306 k = bkey_next(k)) {
1307 if (__set_blocks(n1, n1->keys + keys +
1308 bkey_u64s(k), b->c) > blocks)
1309 break;
1310
1311 last = k;
1312 keys += bkey_u64s(k);
1313 }
1314
1315 BUG_ON(__set_blocks(n1, n1->keys + keys,
1316 b->c) > btree_blocks(r[i].b));
1317
1318 if (last) {
1319 bkey_copy_key(&r[i].b->key, last);
1320 bkey_copy_key(r[i].k, last);
1321 }
1322
1323 memcpy(end(n1),
1324 n2->start,
1325 (void *) node(n2, keys) - (void *) n2->start);
1326
1327 n1->keys += keys;
1328
1329 memmove(n2->start,
1330 node(n2, keys),
1331 (void *) end(n2) - (void *) node(n2, keys));
1332
1333 n2->keys -= keys;
1334
1335 r[i].keys = n1->keys;
1336 r[i - 1].keys = n2->keys;
1337 }
1338
Kent Overstreete8e1d462013-07-24 17:27:07 -07001339 btree_node_free(r->b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001340 up_write(&r->b->lock);
1341
Kent Overstreetc37511b2013-04-26 15:39:55 -07001342 trace_bcache_btree_gc_coalesce(nodes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001343
1344 gc->nodes--;
1345 nodes--;
1346
1347 memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
1348 memset(&r[nodes], 0, sizeof(struct gc_merge_info));
1349}
1350
1351static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1352 struct closure *writes, struct gc_stat *gc)
1353{
1354 void write(struct btree *r)
1355 {
1356 if (!r->written)
Kent Overstreet57943512013-04-25 13:58:35 -07001357 bch_btree_node_write(r, &op->cl);
1358 else if (btree_node_dirty(r))
1359 bch_btree_node_write(r, writes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001360
1361 up_write(&r->lock);
1362 }
1363
1364 int ret = 0, stale;
1365 unsigned i;
1366 struct gc_merge_info r[GC_MERGE_NODES];
1367
1368 memset(r, 0, sizeof(r));
1369
1370 while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
Kent Overstreete8e1d462013-07-24 17:27:07 -07001371 r->b = bch_btree_node_get(b->c, r->k, b->level - 1, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001372
1373 if (IS_ERR(r->b)) {
1374 ret = PTR_ERR(r->b);
1375 break;
1376 }
1377
1378 r->keys = 0;
1379 stale = btree_gc_mark_node(r->b, &r->keys, gc);
1380
1381 if (!b->written &&
1382 (r->b->level || stale > 10 ||
1383 b->c->gc_always_rewrite))
Kent Overstreete8e1d462013-07-24 17:27:07 -07001384 r->b = btree_gc_alloc(r->b, r->k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001385
1386 if (r->b->level)
1387 ret = btree_gc_recurse(r->b, op, writes, gc);
1388
1389 if (ret) {
1390 write(r->b);
1391 break;
1392 }
1393
1394 bkey_copy_key(&b->c->gc_done, r->k);
1395
1396 if (!b->written)
Kent Overstreete8e1d462013-07-24 17:27:07 -07001397 btree_gc_coalesce(b, gc, r);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001398
1399 if (r[GC_MERGE_NODES - 1].b)
1400 write(r[GC_MERGE_NODES - 1].b);
1401
1402 memmove(&r[1], &r[0],
1403 sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
1404
1405 /* When we've got incremental GC working, we'll want to do
1406 * if (should_resched())
1407 * return -EAGAIN;
1408 */
1409 cond_resched();
1410#if 0
1411 if (need_resched()) {
1412 ret = -EAGAIN;
1413 break;
1414 }
1415#endif
1416 }
1417
1418 for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
1419 write(r[i].b);
1420
1421 /* Might have freed some children, must remove their keys */
1422 if (!b->written)
1423 bch_btree_sort(b);
1424
1425 return ret;
1426}
1427
1428static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1429 struct closure *writes, struct gc_stat *gc)
1430{
1431 struct btree *n = NULL;
1432 unsigned keys = 0;
1433 int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
1434
1435 if (b->level || stale > 10)
Kent Overstreet35fcd842013-07-24 17:29:09 -07001436 n = btree_node_alloc_replacement(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001437
1438 if (!IS_ERR_OR_NULL(n))
1439 swap(b, n);
1440
1441 if (b->level)
1442 ret = btree_gc_recurse(b, op, writes, gc);
1443
1444 if (!b->written || btree_node_dirty(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -07001445 bch_btree_node_write(b, n ? &op->cl : NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001446 }
1447
1448 if (!IS_ERR_OR_NULL(n)) {
1449 closure_sync(&op->cl);
1450 bch_btree_set_root(b);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001451 btree_node_free(n);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001452 rw_unlock(true, b);
1453 }
1454
1455 return ret;
1456}
1457
1458static void btree_gc_start(struct cache_set *c)
1459{
1460 struct cache *ca;
1461 struct bucket *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001462 unsigned i;
1463
1464 if (!c->gc_mark_valid)
1465 return;
1466
1467 mutex_lock(&c->bucket_lock);
1468
1469 c->gc_mark_valid = 0;
1470 c->gc_done = ZERO_KEY;
1471
1472 for_each_cache(ca, c, i)
1473 for_each_bucket(b, ca) {
1474 b->gc_gen = b->gen;
Kent Overstreet29ebf462013-07-11 19:43:21 -07001475 if (!atomic_read(&b->pin)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001476 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
Kent Overstreet29ebf462013-07-11 19:43:21 -07001477 SET_GC_SECTORS_USED(b, 0);
1478 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001479 }
1480
Kent Overstreetcafe5632013-03-23 16:11:31 -07001481 mutex_unlock(&c->bucket_lock);
1482}
1483
1484size_t bch_btree_gc_finish(struct cache_set *c)
1485{
1486 size_t available = 0;
1487 struct bucket *b;
1488 struct cache *ca;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001489 unsigned i;
1490
1491 mutex_lock(&c->bucket_lock);
1492
1493 set_gc_sectors(c);
1494 c->gc_mark_valid = 1;
1495 c->need_gc = 0;
1496
1497 if (c->root)
1498 for (i = 0; i < KEY_PTRS(&c->root->key); i++)
1499 SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
1500 GC_MARK_METADATA);
1501
1502 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1503 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1504 GC_MARK_METADATA);
1505
1506 for_each_cache(ca, c, i) {
1507 uint64_t *i;
1508
1509 ca->invalidate_needs_gc = 0;
1510
1511 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1512 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1513
1514 for (i = ca->prio_buckets;
1515 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1516 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1517
1518 for_each_bucket(b, ca) {
1519 b->last_gc = b->gc_gen;
1520 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1521
1522 if (!atomic_read(&b->pin) &&
1523 GC_MARK(b) == GC_MARK_RECLAIMABLE) {
1524 available++;
1525 if (!GC_SECTORS_USED(b))
1526 bch_bucket_add_unused(ca, b);
1527 }
1528 }
1529 }
1530
Kent Overstreetcafe5632013-03-23 16:11:31 -07001531 mutex_unlock(&c->bucket_lock);
1532 return available;
1533}
1534
Kent Overstreet72a44512013-10-24 17:19:26 -07001535static void bch_btree_gc(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001536{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001537 int ret;
1538 unsigned long available;
1539 struct gc_stat stats;
1540 struct closure writes;
1541 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001542 uint64_t start_time = local_clock();
Kent Overstreet57943512013-04-25 13:58:35 -07001543
Kent Overstreetc37511b2013-04-26 15:39:55 -07001544 trace_bcache_gc_start(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001545
1546 memset(&stats, 0, sizeof(struct gc_stat));
1547 closure_init_stack(&writes);
1548 bch_btree_op_init_stack(&op);
1549 op.lock = SHRT_MAX;
1550
1551 btree_gc_start(c);
1552
Kent Overstreet57943512013-04-25 13:58:35 -07001553 atomic_inc(&c->prio_blocked);
1554
Kent Overstreetcafe5632013-03-23 16:11:31 -07001555 ret = btree_root(gc_root, c, &op, &writes, &stats);
1556 closure_sync(&op.cl);
1557 closure_sync(&writes);
1558
1559 if (ret) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001560 pr_warn("gc failed!");
Kent Overstreet72a44512013-10-24 17:19:26 -07001561 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001562 }
1563
1564 /* Possibly wait for new UUIDs or whatever to hit disk */
1565 bch_journal_meta(c, &op.cl);
1566 closure_sync(&op.cl);
1567
1568 available = bch_btree_gc_finish(c);
1569
Kent Overstreet57943512013-04-25 13:58:35 -07001570 atomic_dec(&c->prio_blocked);
1571 wake_up_allocators(c);
1572
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001573 bch_time_stats_update(&c->btree_gc_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001574
1575 stats.key_bytes *= sizeof(uint64_t);
1576 stats.dirty <<= 9;
1577 stats.data <<= 9;
1578 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1579 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001580
Kent Overstreetc37511b2013-04-26 15:39:55 -07001581 trace_bcache_gc_end(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001582
Kent Overstreet72a44512013-10-24 17:19:26 -07001583 bch_moving_gc(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001584}
1585
Kent Overstreet72a44512013-10-24 17:19:26 -07001586static int bch_gc_thread(void *arg)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001587{
Kent Overstreet72a44512013-10-24 17:19:26 -07001588 struct cache_set *c = arg;
1589
1590 while (1) {
1591 bch_btree_gc(c);
1592
1593 set_current_state(TASK_INTERRUPTIBLE);
1594 if (kthread_should_stop())
1595 break;
1596
1597 try_to_freeze();
1598 schedule();
1599 }
1600
1601 return 0;
1602}
1603
1604int bch_gc_thread_start(struct cache_set *c)
1605{
1606 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1607 if (IS_ERR(c->gc_thread))
1608 return PTR_ERR(c->gc_thread);
1609
1610 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1611 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001612}
1613
1614/* Initial partial gc */
1615
1616static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
1617 unsigned long **seen)
1618{
1619 int ret;
1620 unsigned i;
1621 struct bkey *k;
1622 struct bucket *g;
1623 struct btree_iter iter;
1624
1625 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1626 for (i = 0; i < KEY_PTRS(k); i++) {
1627 if (!ptr_available(b->c, k, i))
1628 continue;
1629
1630 g = PTR_BUCKET(b->c, k, i);
1631
1632 if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
1633 seen[PTR_DEV(k, i)]) ||
1634 !ptr_stale(b->c, k, i)) {
1635 g->gen = PTR_GEN(k, i);
1636
1637 if (b->level)
1638 g->prio = BTREE_PRIO;
1639 else if (g->prio == BTREE_PRIO)
1640 g->prio = INITIAL_PRIO;
1641 }
1642 }
1643
1644 btree_mark_key(b, k);
1645 }
1646
1647 if (b->level) {
1648 k = bch_next_recurse_key(b, &ZERO_KEY);
1649
1650 while (k) {
1651 struct bkey *p = bch_next_recurse_key(b, k);
1652 if (p)
1653 btree_node_prefetch(b->c, p, b->level - 1);
1654
1655 ret = btree(check_recurse, k, b, op, seen);
1656 if (ret)
1657 return ret;
1658
1659 k = p;
1660 }
1661 }
1662
1663 return 0;
1664}
1665
Kent Overstreetc18536a2013-07-24 17:44:17 -07001666int bch_btree_check(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001667{
1668 int ret = -ENOMEM;
1669 unsigned i;
1670 unsigned long *seen[MAX_CACHES_PER_SET];
Kent Overstreetc18536a2013-07-24 17:44:17 -07001671 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001672
1673 memset(seen, 0, sizeof(seen));
Kent Overstreetc18536a2013-07-24 17:44:17 -07001674 bch_btree_op_init_stack(&op);
1675 op.lock = SHRT_MAX;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001676
1677 for (i = 0; c->cache[i]; i++) {
1678 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
1679 seen[i] = kmalloc(n, GFP_KERNEL);
1680 if (!seen[i])
1681 goto err;
1682
1683 /* Disables the seen array until prio_read() uses it too */
1684 memset(seen[i], 0xFF, n);
1685 }
1686
Kent Overstreetc18536a2013-07-24 17:44:17 -07001687 ret = btree_root(check_recurse, c, &op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001688err:
1689 for (i = 0; i < MAX_CACHES_PER_SET; i++)
1690 kfree(seen[i]);
1691 return ret;
1692}
1693
1694/* Btree insertion */
1695
1696static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
1697{
1698 struct bset *i = b->sets[b->nsets].data;
1699
1700 memmove((uint64_t *) where + bkey_u64s(insert),
1701 where,
1702 (void *) end(i) - (void *) where);
1703
1704 i->keys += bkey_u64s(insert);
1705 bkey_copy(where, insert);
1706 bch_bset_fix_lookup_table(b, where);
1707}
1708
1709static bool fix_overlapping_extents(struct btree *b,
1710 struct bkey *insert,
1711 struct btree_iter *iter,
1712 struct btree_op *op)
1713{
Kent Overstreet279afba2013-06-05 06:21:07 -07001714 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001715 {
Kent Overstreet279afba2013-06-05 06:21:07 -07001716 if (KEY_DIRTY(k))
1717 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1718 offset, -sectors);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001719 }
1720
Kent Overstreet279afba2013-06-05 06:21:07 -07001721 uint64_t old_offset;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001722 unsigned old_size, sectors_found = 0;
1723
1724 while (1) {
1725 struct bkey *k = bch_btree_iter_next(iter);
1726 if (!k ||
1727 bkey_cmp(&START_KEY(k), insert) >= 0)
1728 break;
1729
1730 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1731 continue;
1732
Kent Overstreet279afba2013-06-05 06:21:07 -07001733 old_offset = KEY_START(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001734 old_size = KEY_SIZE(k);
1735
1736 /*
1737 * We might overlap with 0 size extents; we can't skip these
1738 * because if they're in the set we're inserting to we have to
1739 * adjust them so they don't overlap with the key we're
1740 * inserting. But we don't want to check them for BTREE_REPLACE
1741 * operations.
1742 */
1743
1744 if (op->type == BTREE_REPLACE &&
1745 KEY_SIZE(k)) {
1746 /*
1747 * k might have been split since we inserted/found the
1748 * key we're replacing
1749 */
1750 unsigned i;
1751 uint64_t offset = KEY_START(k) -
1752 KEY_START(&op->replace);
1753
1754 /* But it must be a subset of the replace key */
1755 if (KEY_START(k) < KEY_START(&op->replace) ||
1756 KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
1757 goto check_failed;
1758
1759 /* We didn't find a key that we were supposed to */
1760 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1761 goto check_failed;
1762
1763 if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
1764 goto check_failed;
1765
1766 /* skip past gen */
1767 offset <<= 8;
1768
1769 BUG_ON(!KEY_PTRS(&op->replace));
1770
1771 for (i = 0; i < KEY_PTRS(&op->replace); i++)
1772 if (k->ptr[i] != op->replace.ptr[i] + offset)
1773 goto check_failed;
1774
1775 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1776 }
1777
1778 if (bkey_cmp(insert, k) < 0 &&
1779 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1780 /*
1781 * We overlapped in the middle of an existing key: that
1782 * means we have to split the old key. But we have to do
1783 * slightly different things depending on whether the
1784 * old key has been written out yet.
1785 */
1786
1787 struct bkey *top;
1788
Kent Overstreet279afba2013-06-05 06:21:07 -07001789 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001790
1791 if (bkey_written(b, k)) {
1792 /*
1793 * We insert a new key to cover the top of the
1794 * old key, and the old key is modified in place
1795 * to represent the bottom split.
1796 *
1797 * It's completely arbitrary whether the new key
1798 * is the top or the bottom, but it has to match
1799 * up with what btree_sort_fixup() does - it
1800 * doesn't check for this kind of overlap, it
1801 * depends on us inserting a new key for the top
1802 * here.
1803 */
1804 top = bch_bset_search(b, &b->sets[b->nsets],
1805 insert);
1806 shift_keys(b, top, k);
1807 } else {
1808 BKEY_PADDED(key) temp;
1809 bkey_copy(&temp.key, k);
1810 shift_keys(b, k, &temp.key);
1811 top = bkey_next(k);
1812 }
1813
1814 bch_cut_front(insert, top);
1815 bch_cut_back(&START_KEY(insert), k);
1816 bch_bset_fix_invalidated_key(b, k);
1817 return false;
1818 }
1819
1820 if (bkey_cmp(insert, k) < 0) {
1821 bch_cut_front(insert, k);
1822 } else {
Kent Overstreet1fa84552013-11-10 21:55:27 -08001823 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1824 old_offset = KEY_START(insert);
1825
Kent Overstreetcafe5632013-03-23 16:11:31 -07001826 if (bkey_written(b, k) &&
1827 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1828 /*
1829 * Completely overwrote, so we don't have to
1830 * invalidate the binary search tree
1831 */
1832 bch_cut_front(k, k);
1833 } else {
1834 __bch_cut_back(&START_KEY(insert), k);
1835 bch_bset_fix_invalidated_key(b, k);
1836 }
1837 }
1838
Kent Overstreet279afba2013-06-05 06:21:07 -07001839 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001840 }
1841
1842check_failed:
1843 if (op->type == BTREE_REPLACE) {
1844 if (!sectors_found) {
1845 op->insert_collision = true;
1846 return true;
1847 } else if (sectors_found < KEY_SIZE(insert)) {
1848 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1849 (KEY_SIZE(insert) - sectors_found));
1850 SET_KEY_SIZE(insert, sectors_found);
1851 }
1852 }
1853
1854 return false;
1855}
1856
1857static bool btree_insert_key(struct btree *b, struct btree_op *op,
1858 struct bkey *k)
1859{
1860 struct bset *i = b->sets[b->nsets].data;
1861 struct bkey *m, *prev;
Kent Overstreet85b14922013-05-14 20:33:16 -07001862 unsigned status = BTREE_INSERT_STATUS_INSERT;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001863
1864 BUG_ON(bkey_cmp(k, &b->key) > 0);
1865 BUG_ON(b->level && !KEY_PTRS(k));
1866 BUG_ON(!b->level && !KEY_OFFSET(k));
1867
1868 if (!b->level) {
1869 struct btree_iter iter;
1870 struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
1871
1872 /*
1873 * bset_search() returns the first key that is strictly greater
1874 * than the search key - but for back merging, we want to find
1875 * the first key that is greater than or equal to KEY_START(k) -
1876 * unless KEY_START(k) is 0.
1877 */
1878 if (KEY_OFFSET(&search))
1879 SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
1880
1881 prev = NULL;
1882 m = bch_btree_iter_init(b, &iter, &search);
1883
1884 if (fix_overlapping_extents(b, k, &iter, op))
1885 return false;
1886
Kent Overstreet1fa84552013-11-10 21:55:27 -08001887 if (KEY_DIRTY(k))
1888 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1889 KEY_START(k), KEY_SIZE(k));
1890
Kent Overstreetcafe5632013-03-23 16:11:31 -07001891 while (m != end(i) &&
1892 bkey_cmp(k, &START_KEY(m)) > 0)
1893 prev = m, m = bkey_next(m);
1894
1895 if (key_merging_disabled(b->c))
1896 goto insert;
1897
1898 /* prev is in the tree, if we merge we're done */
Kent Overstreet85b14922013-05-14 20:33:16 -07001899 status = BTREE_INSERT_STATUS_BACK_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001900 if (prev &&
1901 bch_bkey_try_merge(b, prev, k))
1902 goto merged;
1903
Kent Overstreet85b14922013-05-14 20:33:16 -07001904 status = BTREE_INSERT_STATUS_OVERWROTE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001905 if (m != end(i) &&
1906 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1907 goto copy;
1908
Kent Overstreet85b14922013-05-14 20:33:16 -07001909 status = BTREE_INSERT_STATUS_FRONT_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001910 if (m != end(i) &&
1911 bch_bkey_try_merge(b, k, m))
1912 goto copy;
1913 } else
1914 m = bch_bset_search(b, &b->sets[b->nsets], k);
1915
1916insert: shift_keys(b, m, k);
1917copy: bkey_copy(m, k);
1918merged:
Kent Overstreet85b14922013-05-14 20:33:16 -07001919 bch_check_keys(b, "%u for %s", status, op_type(op));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001920
1921 if (b->level && !KEY_OFFSET(k))
Kent Overstreet57943512013-04-25 13:58:35 -07001922 btree_current_write(b)->prio_blocked++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001923
Kent Overstreet85b14922013-05-14 20:33:16 -07001924 trace_bcache_btree_insert_key(b, k, op->type, status);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001925
1926 return true;
1927}
1928
Kent Overstreet26c949f2013-09-10 18:41:15 -07001929static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1930 struct keylist *insert_keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001931{
1932 bool ret = false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001933 unsigned oldsize = bch_count_data(b);
1934
Kent Overstreet26c949f2013-09-10 18:41:15 -07001935 while (!bch_keylist_empty(insert_keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07001936 struct bset *i = write_block(b);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07001937 struct bkey *k = insert_keys->keys;
Kent Overstreet26c949f2013-09-10 18:41:15 -07001938
Kent Overstreet403b6cd2013-07-24 17:22:44 -07001939 if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
1940 > btree_blocks(b))
1941 break;
1942
1943 if (bkey_cmp(k, &b->key) <= 0) {
Kent Overstreet26c949f2013-09-10 18:41:15 -07001944 bkey_put(b->c, k, b->level);
1945
1946 ret |= btree_insert_key(b, op, k);
1947 bch_keylist_pop_front(insert_keys);
1948 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1949#if 0
1950 if (op->type == BTREE_REPLACE) {
1951 bkey_put(b->c, k, b->level);
1952 bch_keylist_pop_front(insert_keys);
1953 op->insert_collision = true;
1954 break;
1955 }
1956#endif
1957 BKEY_PADDED(key) temp;
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07001958 bkey_copy(&temp.key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07001959
1960 bch_cut_back(&b->key, &temp.key);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07001961 bch_cut_front(&b->key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07001962
1963 ret |= btree_insert_key(b, op, &temp.key);
1964 break;
1965 } else {
1966 break;
1967 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001968 }
1969
Kent Overstreet403b6cd2013-07-24 17:22:44 -07001970 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1971
Kent Overstreetcafe5632013-03-23 16:11:31 -07001972 BUG_ON(bch_count_data(b) < oldsize);
1973 return ret;
1974}
1975
Kent Overstreet26c949f2013-09-10 18:41:15 -07001976static int btree_split(struct btree *b, struct btree_op *op,
1977 struct keylist *insert_keys,
1978 struct keylist *parent_keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001979{
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07001980 bool split;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001981 struct btree *n1, *n2 = NULL, *n3 = NULL;
1982 uint64_t start_time = local_clock();
1983
Kent Overstreet35fcd842013-07-24 17:29:09 -07001984 n1 = btree_node_alloc_replacement(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001985 if (IS_ERR(n1))
1986 goto err;
1987
1988 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
1989
Kent Overstreetcafe5632013-03-23 16:11:31 -07001990 if (split) {
1991 unsigned keys = 0;
1992
Kent Overstreetc37511b2013-04-26 15:39:55 -07001993 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
1994
Kent Overstreet35fcd842013-07-24 17:29:09 -07001995 n2 = bch_btree_node_alloc(b->c, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001996 if (IS_ERR(n2))
1997 goto err_free1;
1998
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07001999 if (!b->parent) {
Kent Overstreet35fcd842013-07-24 17:29:09 -07002000 n3 = bch_btree_node_alloc(b->c, b->level + 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002001 if (IS_ERR(n3))
2002 goto err_free2;
2003 }
2004
Kent Overstreet26c949f2013-09-10 18:41:15 -07002005 bch_btree_insert_keys(n1, op, insert_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002006
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002007 /*
2008 * Has to be a linear search because we don't have an auxiliary
Kent Overstreetcafe5632013-03-23 16:11:31 -07002009 * search tree yet
2010 */
2011
2012 while (keys < (n1->sets[0].data->keys * 3) / 5)
2013 keys += bkey_u64s(node(n1->sets[0].data, keys));
2014
2015 bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
2016 keys += bkey_u64s(node(n1->sets[0].data, keys));
2017
2018 n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
2019 n1->sets[0].data->keys = keys;
2020
2021 memcpy(n2->sets[0].data->start,
2022 end(n1->sets[0].data),
2023 n2->sets[0].data->keys * sizeof(uint64_t));
2024
2025 bkey_copy_key(&n2->key, &b->key);
2026
Kent Overstreet26c949f2013-09-10 18:41:15 -07002027 bch_keylist_add(parent_keys, &n2->key);
Kent Overstreet57943512013-04-25 13:58:35 -07002028 bch_btree_node_write(n2, &op->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002029 rw_unlock(true, n2);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002030 } else {
2031 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
2032
Kent Overstreet26c949f2013-09-10 18:41:15 -07002033 bch_btree_insert_keys(n1, op, insert_keys);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002034 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002035
Kent Overstreet26c949f2013-09-10 18:41:15 -07002036 bch_keylist_add(parent_keys, &n1->key);
Kent Overstreet57943512013-04-25 13:58:35 -07002037 bch_btree_node_write(n1, &op->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002038
2039 if (n3) {
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002040 /* Depth increases, make a new root */
2041
Kent Overstreetcafe5632013-03-23 16:11:31 -07002042 bkey_copy_key(&n3->key, &MAX_KEY);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002043 bch_btree_insert_keys(n3, op, parent_keys);
Kent Overstreet57943512013-04-25 13:58:35 -07002044 bch_btree_node_write(n3, &op->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002045
2046 closure_sync(&op->cl);
2047 bch_btree_set_root(n3);
2048 rw_unlock(true, n3);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002049 } else if (!b->parent) {
2050 /* Root filled up but didn't need to be split */
2051
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002052 bch_keylist_reset(parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002053 closure_sync(&op->cl);
2054 bch_btree_set_root(n1);
2055 } else {
2056 unsigned i;
2057
Kent Overstreet26c949f2013-09-10 18:41:15 -07002058 bkey_copy(parent_keys->top, &b->key);
2059 bkey_copy_key(parent_keys->top, &ZERO_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002060
2061 for (i = 0; i < KEY_PTRS(&b->key); i++) {
2062 uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
2063
Kent Overstreet26c949f2013-09-10 18:41:15 -07002064 SET_PTR_GEN(parent_keys->top, i, g);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002065 }
2066
Kent Overstreet26c949f2013-09-10 18:41:15 -07002067 bch_keylist_push(parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002068 closure_sync(&op->cl);
2069 atomic_inc(&b->c->prio_blocked);
2070 }
2071
2072 rw_unlock(true, n1);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002073 btree_node_free(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002074
Kent Overstreet169ef1c2013-03-28 12:50:55 -06002075 bch_time_stats_update(&b->c->btree_split_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002076
2077 return 0;
2078err_free2:
2079 __bkey_put(n2->c, &n2->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002080 btree_node_free(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002081 rw_unlock(true, n2);
2082err_free1:
2083 __bkey_put(n1->c, &n1->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002084 btree_node_free(n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002085 rw_unlock(true, n1);
2086err:
2087 if (n3 == ERR_PTR(-EAGAIN) ||
2088 n2 == ERR_PTR(-EAGAIN) ||
2089 n1 == ERR_PTR(-EAGAIN))
2090 return -EAGAIN;
2091
2092 pr_warn("couldn't split");
2093 return -ENOMEM;
2094}
2095
Kent Overstreet26c949f2013-09-10 18:41:15 -07002096static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002097 struct keylist *insert_keys,
2098 atomic_t *journal_ref)
Kent Overstreet26c949f2013-09-10 18:41:15 -07002099{
2100 int ret = 0;
2101 struct keylist split_keys;
2102
2103 bch_keylist_init(&split_keys);
2104
2105 BUG_ON(b->level);
2106
2107 do {
2108 if (should_split(b)) {
2109 if (current->bio_list) {
2110 op->lock = b->c->root->level + 1;
2111 ret = -EAGAIN;
2112 } else if (op->lock <= b->c->root->level) {
2113 op->lock = b->c->root->level + 1;
2114 ret = -EINTR;
2115 } else {
2116 struct btree *parent = b->parent;
2117
2118 ret = btree_split(b, op, insert_keys,
2119 &split_keys);
2120 insert_keys = &split_keys;
2121 b = parent;
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002122 if (!ret)
2123 ret = -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002124 }
2125 } else {
2126 BUG_ON(write_block(b) != b->sets[b->nsets].data);
2127
2128 if (bch_btree_insert_keys(b, op, insert_keys)) {
2129 if (!b->level)
Kent Overstreetc18536a2013-07-24 17:44:17 -07002130 bch_btree_leaf_dirty(b, journal_ref);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002131 else
2132 bch_btree_node_write(b, &op->cl);
2133 }
2134 }
2135 } while (!bch_keylist_empty(&split_keys));
2136
2137 return ret;
2138}
2139
Kent Overstreete7c590e2013-09-10 18:39:16 -07002140int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2141 struct bkey *check_key)
2142{
2143 int ret = -EINTR;
2144 uint64_t btree_ptr = b->key.ptr[0];
2145 unsigned long seq = b->seq;
2146 struct keylist insert;
2147 bool upgrade = op->lock == -1;
2148
2149 bch_keylist_init(&insert);
2150
2151 if (upgrade) {
2152 rw_unlock(false, b);
2153 rw_lock(true, b, b->level);
2154
2155 if (b->key.ptr[0] != btree_ptr ||
2156 b->seq != seq + 1)
2157 goto out;
2158 }
2159
2160 SET_KEY_PTRS(check_key, 1);
2161 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2162
2163 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2164
2165 bch_keylist_add(&insert, check_key);
2166
2167 BUG_ON(op->type != BTREE_INSERT);
2168
Kent Overstreetc18536a2013-07-24 17:44:17 -07002169 ret = bch_btree_insert_node(b, op, &insert, NULL);
Kent Overstreete7c590e2013-09-10 18:39:16 -07002170
2171 BUG_ON(!ret && !bch_keylist_empty(&insert));
2172out:
2173 if (upgrade)
2174 downgrade_write(&b->lock);
2175 return ret;
2176}
2177
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002178static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002179 struct keylist *keys, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002180{
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002181 if (bch_keylist_empty(keys))
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002182 return 0;
2183
Kent Overstreetcafe5632013-03-23 16:11:31 -07002184 if (b->level) {
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002185 struct bkey *k;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002186
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002187 k = bch_next_recurse_key(b, &START_KEY(keys->keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002188 if (!k) {
2189 btree_bug(b, "no key to recurse on at level %i/%i",
2190 b->level, b->c->root->level);
2191
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002192 bch_keylist_reset(keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002193 return -EIO;
2194 }
2195
Kent Overstreetc18536a2013-07-24 17:44:17 -07002196 return btree(insert_recurse, k, b, op, keys, journal_ref);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002197 } else {
Kent Overstreetc18536a2013-07-24 17:44:17 -07002198 return bch_btree_insert_node(b, op, keys, journal_ref);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002199 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002200}
2201
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002202int bch_btree_insert(struct btree_op *op, struct cache_set *c,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002203 struct keylist *keys, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002204{
2205 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002206
2207 /*
2208 * Don't want to block with the btree locked unless we have to,
2209 * otherwise we get deadlocks with try_harder and between split/gc
2210 */
2211 clear_closure_blocking(&op->cl);
2212
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002213 BUG_ON(bch_keylist_empty(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002214
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002215 while (!bch_keylist_empty(keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002216 op->lock = 0;
Kent Overstreetc18536a2013-07-24 17:44:17 -07002217 ret = btree_root(insert_recurse, c, op, keys, journal_ref);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002218
2219 if (ret == -EAGAIN) {
2220 ret = 0;
2221 closure_sync(&op->cl);
2222 } else if (ret) {
2223 struct bkey *k;
2224
2225 pr_err("error %i trying to insert key for %s",
2226 ret, op_type(op));
2227
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002228 while ((k = bch_keylist_pop(keys)))
Kent Overstreetcafe5632013-03-23 16:11:31 -07002229 bkey_put(c, k, 0);
2230 }
2231 }
2232
Kent Overstreetcafe5632013-03-23 16:11:31 -07002233 return ret;
2234}
2235
2236void bch_btree_set_root(struct btree *b)
2237{
2238 unsigned i;
Kent Overstreete49c7c32013-06-26 17:25:38 -07002239 struct closure cl;
2240
2241 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002242
Kent Overstreetc37511b2013-04-26 15:39:55 -07002243 trace_bcache_btree_set_root(b);
2244
Kent Overstreetcafe5632013-03-23 16:11:31 -07002245 BUG_ON(!b->written);
2246
2247 for (i = 0; i < KEY_PTRS(&b->key); i++)
2248 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2249
2250 mutex_lock(&b->c->bucket_lock);
2251 list_del_init(&b->list);
2252 mutex_unlock(&b->c->bucket_lock);
2253
2254 b->c->root = b;
2255 __bkey_put(b->c, &b->key);
2256
Kent Overstreete49c7c32013-06-26 17:25:38 -07002257 bch_journal_meta(b->c, &cl);
2258 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002259}
2260
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002261/* Map across nodes or keys */
2262
2263static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2264 struct bkey *from,
2265 btree_map_nodes_fn *fn, int flags)
2266{
2267 int ret = MAP_CONTINUE;
2268
2269 if (b->level) {
2270 struct bkey *k;
2271 struct btree_iter iter;
2272
2273 bch_btree_iter_init(b, &iter, from);
2274
2275 while ((k = bch_btree_iter_next_filter(&iter, b,
2276 bch_ptr_bad))) {
2277 ret = btree(map_nodes_recurse, k, b,
2278 op, from, fn, flags);
2279 from = NULL;
2280
2281 if (ret != MAP_CONTINUE)
2282 return ret;
2283 }
2284 }
2285
2286 if (!b->level || flags == MAP_ALL_NODES)
2287 ret = fn(op, b);
2288
2289 return ret;
2290}
2291
2292int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2293 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2294{
2295 int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags);
2296 if (closure_blocking(&op->cl))
2297 closure_sync(&op->cl);
2298 return ret;
2299}
2300
2301static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2302 struct bkey *from, btree_map_keys_fn *fn,
2303 int flags)
2304{
2305 int ret = MAP_CONTINUE;
2306 struct bkey *k;
2307 struct btree_iter iter;
2308
2309 bch_btree_iter_init(b, &iter, from);
2310
2311 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
2312 ret = !b->level
2313 ? fn(op, b, k)
2314 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2315 from = NULL;
2316
2317 if (ret != MAP_CONTINUE)
2318 return ret;
2319 }
2320
2321 if (!b->level && (flags & MAP_END_KEY))
2322 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2323 KEY_OFFSET(&b->key), 0));
2324
2325 return ret;
2326}
2327
2328int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2329 struct bkey *from, btree_map_keys_fn *fn, int flags)
2330{
2331 int ret = btree_root(map_keys_recurse, c, op, from, fn, flags);
2332 if (closure_blocking(&op->cl))
2333 closure_sync(&op->cl);
2334 return ret;
2335}
2336
Kent Overstreetcafe5632013-03-23 16:11:31 -07002337/* Keybuf code */
2338
2339static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2340{
2341 /* Overlapping keys compare equal */
2342 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2343 return -1;
2344 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2345 return 1;
2346 return 0;
2347}
2348
2349static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2350 struct keybuf_key *r)
2351{
2352 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2353}
2354
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002355struct refill {
2356 struct btree_op op;
2357 struct keybuf *buf;
2358 struct bkey *end;
2359 keybuf_pred_fn *pred;
2360};
2361
2362static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2363 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002364{
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002365 struct refill *refill = container_of(op, struct refill, op);
2366 struct keybuf *buf = refill->buf;
2367 int ret = MAP_CONTINUE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002368
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002369 if (bkey_cmp(k, refill->end) >= 0) {
2370 ret = MAP_DONE;
2371 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002372 }
2373
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002374 if (!KEY_SIZE(k)) /* end key */
2375 goto out;
2376
2377 if (refill->pred(buf, k)) {
2378 struct keybuf_key *w;
2379
2380 spin_lock(&buf->lock);
2381
2382 w = array_alloc(&buf->freelist);
2383 if (!w) {
2384 spin_unlock(&buf->lock);
2385 return MAP_DONE;
2386 }
2387
2388 w->private = NULL;
2389 bkey_copy(&w->key, k);
2390
2391 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2392 array_free(&buf->freelist, w);
2393
2394 if (array_freelist_empty(&buf->freelist))
2395 ret = MAP_DONE;
2396
2397 spin_unlock(&buf->lock);
2398 }
2399out:
2400 buf->last_scanned = *k;
2401 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002402}
2403
2404void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002405 struct bkey *end, keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002406{
2407 struct bkey start = buf->last_scanned;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002408 struct refill refill;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002409
2410 cond_resched();
2411
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002412 bch_btree_op_init_stack(&refill.op);
2413 refill.buf = buf;
2414 refill.end = end;
2415 refill.pred = pred;
2416
2417 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2418 refill_keybuf_fn, MAP_END_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002419
2420 pr_debug("found %s keys from %llu:%llu to %llu:%llu",
2421 RB_EMPTY_ROOT(&buf->keys) ? "no" :
2422 array_freelist_empty(&buf->freelist) ? "some" : "a few",
2423 KEY_INODE(&start), KEY_OFFSET(&start),
2424 KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
2425
2426 spin_lock(&buf->lock);
2427
2428 if (!RB_EMPTY_ROOT(&buf->keys)) {
2429 struct keybuf_key *w;
2430 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2431 buf->start = START_KEY(&w->key);
2432
2433 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2434 buf->end = w->key;
2435 } else {
2436 buf->start = MAX_KEY;
2437 buf->end = MAX_KEY;
2438 }
2439
2440 spin_unlock(&buf->lock);
2441}
2442
2443static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2444{
2445 rb_erase(&w->node, &buf->keys);
2446 array_free(&buf->freelist, w);
2447}
2448
2449void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2450{
2451 spin_lock(&buf->lock);
2452 __bch_keybuf_del(buf, w);
2453 spin_unlock(&buf->lock);
2454}
2455
2456bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2457 struct bkey *end)
2458{
2459 bool ret = false;
2460 struct keybuf_key *p, *w, s;
2461 s.key = *start;
2462
2463 if (bkey_cmp(end, &buf->start) <= 0 ||
2464 bkey_cmp(start, &buf->end) >= 0)
2465 return false;
2466
2467 spin_lock(&buf->lock);
2468 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2469
2470 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2471 p = w;
2472 w = RB_NEXT(w, node);
2473
2474 if (p->private)
2475 ret = true;
2476 else
2477 __bch_keybuf_del(buf, p);
2478 }
2479
2480 spin_unlock(&buf->lock);
2481 return ret;
2482}
2483
2484struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2485{
2486 struct keybuf_key *w;
2487 spin_lock(&buf->lock);
2488
2489 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2490
2491 while (w && w->private)
2492 w = RB_NEXT(w, node);
2493
2494 if (w)
2495 w->private = ERR_PTR(-EINTR);
2496
2497 spin_unlock(&buf->lock);
2498 return w;
2499}
2500
2501struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002502 struct keybuf *buf,
2503 struct bkey *end,
2504 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002505{
2506 struct keybuf_key *ret;
2507
2508 while (1) {
2509 ret = bch_keybuf_next(buf);
2510 if (ret)
2511 break;
2512
2513 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2514 pr_debug("scan finished");
2515 break;
2516 }
2517
Kent Overstreet72c27062013-06-05 06:24:39 -07002518 bch_refill_keybuf(c, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002519 }
2520
2521 return ret;
2522}
2523
Kent Overstreet72c27062013-06-05 06:24:39 -07002524void bch_keybuf_init(struct keybuf *buf)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002525{
Kent Overstreetcafe5632013-03-23 16:11:31 -07002526 buf->last_scanned = MAX_KEY;
2527 buf->keys = RB_ROOT;
2528
2529 spin_lock_init(&buf->lock);
2530 array_allocator_init(&buf->freelist);
2531}
2532
2533void bch_btree_exit(void)
2534{
2535 if (btree_io_wq)
2536 destroy_workqueue(btree_io_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002537}
2538
2539int __init bch_btree_init(void)
2540{
Kent Overstreet72a44512013-10-24 17:19:26 -07002541 btree_io_wq = create_singlethread_workqueue("bch_btree_io");
2542 if (!btree_io_wq)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002543 return -ENOMEM;
2544
2545 return 0;
2546}