blob: 78ba0b67ac16782a42b3c5a909f3d46cbc1315f4 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070026#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070027
28#include <linux/slab.h>
29#include <linux/bitops.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070030#include <linux/freezer.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070031#include <linux/hash.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070032#include <linux/kthread.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010033#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070034#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <trace/events/bcache.h>
37
38/*
39 * Todo:
40 * register_bcache: Return errors out to userspace correctly
41 *
42 * Writeback: don't undirty key until after a cache flush
43 *
44 * Create an iterator for key pointers
45 *
46 * On btree write error, mark bucket such that it won't be freed from the cache
47 *
48 * Journalling:
49 * Check for bad keys in replay
50 * Propagate barriers
51 * Refcount journal entries in journal_replay
52 *
53 * Garbage collection:
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
56 *
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
60 *
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
63 * from being starved
64 *
65 * Add a tracepoint or somesuch to watch for writeback starvation
66 *
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious.
70 *
71 * Make sure all allocations get charged to the root cgroup
72 *
73 * Plugging?
74 *
75 * If data write is less than hard sector size of ssd, round up offset in open
76 * bucket to the next whole sector
77 *
78 * Also lookup by cgroup in get_open_bucket()
79 *
80 * Superblock needs to be fleshed out for multiple cache devices
81 *
82 * Add a sysfs tunable for the number of writeback IOs in flight
83 *
84 * Add a sysfs tunable for the number of open data buckets
85 *
86 * IO tracking: Can we track when one process is doing io on behalf of another?
87 * IO tracking: Don't use just an average, weigh more recent stuff higher
88 *
89 * Test module load/unload
90 */
91
Kent Overstreetdf8e8972013-07-24 17:37:59 -070092enum {
93 BTREE_INSERT_STATUS_INSERT,
94 BTREE_INSERT_STATUS_BACK_MERGE,
95 BTREE_INSERT_STATUS_OVERWROTE,
96 BTREE_INSERT_STATUS_FRONT_MERGE,
97};
98
Kent Overstreetcafe5632013-03-23 16:11:31 -070099#define MAX_NEED_GC 64
100#define MAX_SAVE_PRIO 72
101
102#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
103
104#define PTR_HASH(c, k) \
105 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
106
Kent Overstreetcafe5632013-03-23 16:11:31 -0700107static struct workqueue_struct *btree_io_wq;
108
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700109static inline bool should_split(struct btree *b)
110{
111 struct bset *i = write_block(b);
112 return b->written >= btree_blocks(b) ||
113 (b->written + __set_blocks(i, i->keys + 15, b->c)
114 > btree_blocks(b));
115}
116
117#define insert_lock(s, b) ((b)->level <= (s)->lock)
118
119/*
120 * These macros are for recursing down the btree - they handle the details of
121 * locking and looking up nodes in the cache for you. They're best treated as
122 * mere syntax when reading code that uses them.
123 *
124 * op->lock determines whether we take a read or a write lock at a given depth.
125 * If you've got a read lock and find that you need a write lock (i.e. you're
126 * going to have to split), set op->lock and return -EINTR; btree_root() will
127 * call you again and you'll have the correct lock.
128 */
129
130/**
131 * btree - recurse down the btree on a specified key
132 * @fn: function to call, which will be passed the child node
133 * @key: key to recurse on
134 * @b: parent btree node
135 * @op: pointer to struct btree_op
136 */
137#define btree(fn, key, b, op, ...) \
138({ \
139 int _r, l = (b)->level - 1; \
140 bool _w = l <= (op)->lock; \
141 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
142 if (!IS_ERR(_child)) { \
143 _child->parent = (b); \
144 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
145 rw_unlock(_w, _child); \
146 } else \
147 _r = PTR_ERR(_child); \
148 _r; \
149})
150
151/**
152 * btree_root - call a function on the root of the btree
153 * @fn: function to call, which will be passed the child node
154 * @c: cache set
155 * @op: pointer to struct btree_op
156 */
157#define btree_root(fn, c, op, ...) \
158({ \
159 int _r = -EINTR; \
160 do { \
161 struct btree *_b = (c)->root; \
162 bool _w = insert_lock(op, _b); \
163 rw_lock(_w, _b, _b->level); \
164 if (_b == (c)->root && \
165 _w == insert_lock(op, _b)) { \
166 _b->parent = NULL; \
167 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
168 } \
169 rw_unlock(_w, _b); \
Kent Overstreet78365412013-12-17 01:29:34 -0800170 if (_r == -EINTR) \
171 schedule(); \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700172 bch_cannibalize_unlock(c); \
173 if (_r == -ENOSPC) { \
174 wait_event((c)->try_wait, \
175 !(c)->try_harder); \
176 _r = -EINTR; \
177 } \
178 } while (_r == -EINTR); \
179 \
Kent Overstreet78365412013-12-17 01:29:34 -0800180 finish_wait(&(c)->bucket_wait, &(op)->wait); \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700181 _r; \
182})
183
Kent Overstreetcafe5632013-03-23 16:11:31 -0700184/* Btree key manipulation */
185
Kent Overstreet3a3b6a42013-07-24 16:46:42 -0700186void bkey_put(struct cache_set *c, struct bkey *k)
Kent Overstreete7c590e2013-09-10 18:39:16 -0700187{
188 unsigned i;
189
190 for (i = 0; i < KEY_PTRS(k); i++)
191 if (ptr_available(c, k, i))
192 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
193}
194
Kent Overstreetcafe5632013-03-23 16:11:31 -0700195/* Btree IO */
196
197static uint64_t btree_csum_set(struct btree *b, struct bset *i)
198{
199 uint64_t crc = b->key.ptr[0];
Kent Overstreetfafff812013-12-17 21:56:21 -0800200 void *data = (void *) i + 8, *end = bset_bkey_last(i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700201
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600202 crc = bch_crc64_update(crc, data, end - data);
Kent Overstreetc19ed232013-03-26 13:49:02 -0700203 return crc ^ 0xffffffffffffffffULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700204}
205
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800206void bch_btree_node_read_done(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700207{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700208 const char *err = "bad btree header";
Kent Overstreet57943512013-04-25 13:58:35 -0700209 struct bset *i = b->sets[0].data;
210 struct btree_iter *iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700211
Kent Overstreet57943512013-04-25 13:58:35 -0700212 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
213 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700214 iter->used = 0;
215
Kent Overstreet280481d2013-10-24 16:36:03 -0700216#ifdef CONFIG_BCACHE_DEBUG
217 iter->b = b;
218#endif
219
Kent Overstreet57943512013-04-25 13:58:35 -0700220 if (!i->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700221 goto err;
222
223 for (;
224 b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
225 i = write_block(b)) {
226 err = "unsupported bset version";
227 if (i->version > BCACHE_BSET_VERSION)
228 goto err;
229
230 err = "bad btree header";
231 if (b->written + set_blocks(i, b->c) > btree_blocks(b))
232 goto err;
233
234 err = "bad magic";
Kent Overstreet81ab4192013-10-31 15:46:42 -0700235 if (i->magic != bset_magic(&b->c->sb))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700236 goto err;
237
238 err = "bad checksum";
239 switch (i->version) {
240 case 0:
241 if (i->csum != csum_set(i))
242 goto err;
243 break;
244 case BCACHE_BSET_VERSION:
245 if (i->csum != btree_csum_set(b, i))
246 goto err;
247 break;
248 }
249
250 err = "empty set";
251 if (i != b->sets[0].data && !i->keys)
252 goto err;
253
Kent Overstreetfafff812013-12-17 21:56:21 -0800254 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700255
256 b->written += set_blocks(i, b->c);
257 }
258
259 err = "corrupted btree";
260 for (i = write_block(b);
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800261 bset_sector_offset(b, i) < KEY_SIZE(&b->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700262 i = ((void *) i) + block_bytes(b->c))
263 if (i->seq == b->sets[0].data->seq)
264 goto err;
265
Kent Overstreet67539e82013-09-10 22:53:34 -0700266 bch_btree_sort_and_fix_extents(b, iter, &b->c->sort);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700267
268 i = b->sets[0].data;
269 err = "short btree key";
270 if (b->sets[0].size &&
271 bkey_cmp(&b->key, &b->sets[0].end) < 0)
272 goto err;
273
274 if (b->written < btree_blocks(b))
275 bch_bset_init_next(b);
276out:
Kent Overstreet57943512013-04-25 13:58:35 -0700277 mempool_free(iter, b->c->fill_iter);
278 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700279err:
280 set_btree_node_io_error(b);
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800281 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
Kent Overstreetcafe5632013-03-23 16:11:31 -0700282 err, PTR_BUCKET_NR(b->c, &b->key, 0),
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800283 bset_block_offset(b, i), i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700284 goto out;
285}
286
Kent Overstreet57943512013-04-25 13:58:35 -0700287static void btree_node_read_endio(struct bio *bio, int error)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700288{
Kent Overstreet57943512013-04-25 13:58:35 -0700289 struct closure *cl = bio->bi_private;
290 closure_put(cl);
291}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700292
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800293static void bch_btree_node_read(struct btree *b)
Kent Overstreet57943512013-04-25 13:58:35 -0700294{
295 uint64_t start_time = local_clock();
296 struct closure cl;
297 struct bio *bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700298
Kent Overstreetc37511b2013-04-26 15:39:55 -0700299 trace_bcache_btree_read(b);
300
Kent Overstreet57943512013-04-25 13:58:35 -0700301 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700302
Kent Overstreet57943512013-04-25 13:58:35 -0700303 bio = bch_bbio_alloc(b->c);
304 bio->bi_rw = REQ_META|READ_SYNC;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700305 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
Kent Overstreet57943512013-04-25 13:58:35 -0700306 bio->bi_end_io = btree_node_read_endio;
307 bio->bi_private = &cl;
308
309 bch_bio_map(bio, b->sets[0].data);
310
Kent Overstreet57943512013-04-25 13:58:35 -0700311 bch_submit_bbio(bio, b->c, &b->key, 0);
312 closure_sync(&cl);
313
314 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
315 set_btree_node_io_error(b);
316
317 bch_bbio_free(bio, b->c);
318
319 if (btree_node_io_error(b))
320 goto err;
321
322 bch_btree_node_read_done(b);
Kent Overstreet57943512013-04-25 13:58:35 -0700323 bch_time_stats_update(&b->c->btree_read_time, start_time);
Kent Overstreet57943512013-04-25 13:58:35 -0700324
325 return;
326err:
Geert Uytterhoeven61cbd252013-09-23 23:17:30 -0700327 bch_cache_set_error(b->c, "io error reading bucket %zu",
Kent Overstreet57943512013-04-25 13:58:35 -0700328 PTR_BUCKET_NR(b->c, &b->key, 0));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700329}
330
331static void btree_complete_write(struct btree *b, struct btree_write *w)
332{
333 if (w->prio_blocked &&
334 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700335 wake_up_allocators(b->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700336
337 if (w->journal) {
338 atomic_dec_bug(w->journal);
339 __closure_wake_up(&b->c->journal.wait);
340 }
341
Kent Overstreetcafe5632013-03-23 16:11:31 -0700342 w->prio_blocked = 0;
343 w->journal = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700344}
345
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800346static void btree_node_write_unlock(struct closure *cl)
347{
348 struct btree *b = container_of(cl, struct btree, io);
349
350 up(&b->io_mutex);
351}
352
Kent Overstreet57943512013-04-25 13:58:35 -0700353static void __btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700354{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800355 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700356 struct btree_write *w = btree_prev_write(b);
357
358 bch_bbio_free(b->bio, b->c);
359 b->bio = NULL;
360 btree_complete_write(b, w);
361
362 if (btree_node_dirty(b))
363 queue_delayed_work(btree_io_wq, &b->work,
364 msecs_to_jiffies(30000));
365
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800366 closure_return_with_destructor(cl, btree_node_write_unlock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700367}
368
Kent Overstreet57943512013-04-25 13:58:35 -0700369static void btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700370{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800371 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700372 struct bio_vec *bv;
373 int n;
374
Kent Overstreet79886132013-11-23 17:19:00 -0800375 bio_for_each_segment_all(bv, b->bio, n)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700376 __free_page(bv->bv_page);
377
Kent Overstreet57943512013-04-25 13:58:35 -0700378 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700379}
380
Kent Overstreet57943512013-04-25 13:58:35 -0700381static void btree_node_write_endio(struct bio *bio, int error)
382{
383 struct closure *cl = bio->bi_private;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800384 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreet57943512013-04-25 13:58:35 -0700385
386 if (error)
387 set_btree_node_io_error(b);
388
389 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
390 closure_put(cl);
391}
392
393static void do_btree_node_write(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700394{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800395 struct closure *cl = &b->io;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700396 struct bset *i = b->sets[b->nsets].data;
397 BKEY_PADDED(key) k;
398
399 i->version = BCACHE_BSET_VERSION;
400 i->csum = btree_csum_set(b, i);
401
Kent Overstreet57943512013-04-25 13:58:35 -0700402 BUG_ON(b->bio);
403 b->bio = bch_bbio_alloc(b->c);
404
405 b->bio->bi_end_io = btree_node_write_endio;
Kent Overstreetfaadf0c2013-11-01 18:03:08 -0700406 b->bio->bi_private = cl;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700407 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700408 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600409 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700410
Kent Overstreete49c7c32013-06-26 17:25:38 -0700411 /*
412 * If we're appending to a leaf node, we don't technically need FUA -
413 * this write just needs to be persisted before the next journal write,
414 * which will be marked FLUSH|FUA.
415 *
416 * Similarly if we're writing a new btree root - the pointer is going to
417 * be in the next journal entry.
418 *
419 * But if we're writing a new btree node (that isn't a root) or
420 * appending to a non leaf btree node, we need either FUA or a flush
421 * when we write the parent with the new pointer. FUA is cheaper than a
422 * flush, and writes appending to leaf nodes aren't blocking anything so
423 * just make all btree node writes FUA to keep things sane.
424 */
425
Kent Overstreetcafe5632013-03-23 16:11:31 -0700426 bkey_copy(&k.key, &b->key);
427 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
428
Kent Overstreet8e51e412013-06-06 18:15:57 -0700429 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700430 int j;
431 struct bio_vec *bv;
432 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
433
Kent Overstreet79886132013-11-23 17:19:00 -0800434 bio_for_each_segment_all(bv, b->bio, j)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700435 memcpy(page_address(bv->bv_page),
436 base + j * PAGE_SIZE, PAGE_SIZE);
437
Kent Overstreetcafe5632013-03-23 16:11:31 -0700438 bch_submit_bbio(b->bio, b->c, &k.key, 0);
439
Kent Overstreet57943512013-04-25 13:58:35 -0700440 continue_at(cl, btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700441 } else {
442 b->bio->bi_vcnt = 0;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600443 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700444
Kent Overstreetcafe5632013-03-23 16:11:31 -0700445 bch_submit_bbio(b->bio, b->c, &k.key, 0);
446
447 closure_sync(cl);
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800448 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700449 }
450}
451
Kent Overstreet57943512013-04-25 13:58:35 -0700452void bch_btree_node_write(struct btree *b, struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700453{
454 struct bset *i = b->sets[b->nsets].data;
455
Kent Overstreetc37511b2013-04-26 15:39:55 -0700456 trace_bcache_btree_write(b);
457
Kent Overstreetcafe5632013-03-23 16:11:31 -0700458 BUG_ON(current->bio_list);
Kent Overstreet57943512013-04-25 13:58:35 -0700459 BUG_ON(b->written >= btree_blocks(b));
460 BUG_ON(b->written && !i->keys);
461 BUG_ON(b->sets->data->seq != i->seq);
Kent Overstreet280481d2013-10-24 16:36:03 -0700462 bch_check_keys(b, "writing");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700463
Kent Overstreetcafe5632013-03-23 16:11:31 -0700464 cancel_delayed_work(&b->work);
465
Kent Overstreet57943512013-04-25 13:58:35 -0700466 /* If caller isn't waiting for write, parent refcount is cache set */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800467 down(&b->io_mutex);
468 closure_init(&b->io, parent ?: &b->c->cl);
Kent Overstreet57943512013-04-25 13:58:35 -0700469
Kent Overstreetcafe5632013-03-23 16:11:31 -0700470 clear_bit(BTREE_NODE_dirty, &b->flags);
471 change_bit(BTREE_NODE_write_idx, &b->flags);
472
Kent Overstreet57943512013-04-25 13:58:35 -0700473 do_btree_node_write(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700474
Kent Overstreetcafe5632013-03-23 16:11:31 -0700475 b->written += set_blocks(i, b->c);
476 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
477 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
478
Kent Overstreet67539e82013-09-10 22:53:34 -0700479 /* If not a leaf node, always sort */
480 if (b->level && b->nsets)
481 bch_btree_sort(b, &b->c->sort);
482 else
483 bch_btree_sort_lazy(b, &b->c->sort);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700484
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800485 /*
486 * do verify if there was more than one set initially (i.e. we did a
487 * sort) and we sorted down to a single set:
488 */
489 if (i != b->sets->data && !b->nsets)
490 bch_btree_verify(b);
491
Kent Overstreetcafe5632013-03-23 16:11:31 -0700492 if (b->written < btree_blocks(b))
493 bch_bset_init_next(b);
494}
495
Kent Overstreetf269af52013-07-23 20:48:29 -0700496static void bch_btree_node_write_sync(struct btree *b)
497{
498 struct closure cl;
499
500 closure_init_stack(&cl);
501 bch_btree_node_write(b, &cl);
502 closure_sync(&cl);
503}
504
Kent Overstreet57943512013-04-25 13:58:35 -0700505static void btree_node_write_work(struct work_struct *w)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700506{
507 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
508
Kent Overstreet57943512013-04-25 13:58:35 -0700509 rw_lock(true, b, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700510
511 if (btree_node_dirty(b))
Kent Overstreet57943512013-04-25 13:58:35 -0700512 bch_btree_node_write(b, NULL);
513 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700514}
515
Kent Overstreetc18536a2013-07-24 17:44:17 -0700516static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700517{
518 struct bset *i = b->sets[b->nsets].data;
519 struct btree_write *w = btree_current_write(b);
520
Kent Overstreet57943512013-04-25 13:58:35 -0700521 BUG_ON(!b->written);
522 BUG_ON(!i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700523
Kent Overstreet57943512013-04-25 13:58:35 -0700524 if (!btree_node_dirty(b))
525 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700526
Kent Overstreet57943512013-04-25 13:58:35 -0700527 set_btree_node_dirty(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700528
Kent Overstreetc18536a2013-07-24 17:44:17 -0700529 if (journal_ref) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700530 if (w->journal &&
Kent Overstreetc18536a2013-07-24 17:44:17 -0700531 journal_pin_cmp(b->c, w->journal, journal_ref)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700532 atomic_dec_bug(w->journal);
533 w->journal = NULL;
534 }
535
536 if (!w->journal) {
Kent Overstreetc18536a2013-07-24 17:44:17 -0700537 w->journal = journal_ref;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700538 atomic_inc(w->journal);
539 }
540 }
541
Kent Overstreetcafe5632013-03-23 16:11:31 -0700542 /* Force write if set is too big */
Kent Overstreet57943512013-04-25 13:58:35 -0700543 if (set_bytes(i) > PAGE_SIZE - 48 &&
544 !current->bio_list)
545 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700546}
547
548/*
549 * Btree in memory cache - allocation/freeing
550 * mca -> memory cache
551 */
552
553static void mca_reinit(struct btree *b)
554{
555 unsigned i;
556
557 b->flags = 0;
558 b->written = 0;
559 b->nsets = 0;
560
561 for (i = 0; i < MAX_BSETS; i++)
562 b->sets[i].size = 0;
563 /*
564 * Second loop starts at 1 because b->sets[0]->data is the memory we
565 * allocated
566 */
567 for (i = 1; i < MAX_BSETS; i++)
568 b->sets[i].data = NULL;
569}
570
571#define mca_reserve(c) (((c->root && c->root->level) \
572 ? c->root->level : 1) * 8 + 16)
573#define mca_can_free(c) \
574 max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
575
576static void mca_data_free(struct btree *b)
577{
578 struct bset_tree *t = b->sets;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800579
580 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700581
582 if (bset_prev_bytes(b) < PAGE_SIZE)
583 kfree(t->prev);
584 else
585 free_pages((unsigned long) t->prev,
586 get_order(bset_prev_bytes(b)));
587
588 if (bset_tree_bytes(b) < PAGE_SIZE)
589 kfree(t->tree);
590 else
591 free_pages((unsigned long) t->tree,
592 get_order(bset_tree_bytes(b)));
593
594 free_pages((unsigned long) t->data, b->page_order);
595
596 t->prev = NULL;
597 t->tree = NULL;
598 t->data = NULL;
599 list_move(&b->list, &b->c->btree_cache_freed);
600 b->c->bucket_cache_used--;
601}
602
603static void mca_bucket_free(struct btree *b)
604{
605 BUG_ON(btree_node_dirty(b));
606
607 b->key.ptr[0] = 0;
608 hlist_del_init_rcu(&b->hash);
609 list_move(&b->list, &b->c->btree_cache_freeable);
610}
611
612static unsigned btree_order(struct bkey *k)
613{
614 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
615}
616
617static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
618{
619 struct bset_tree *t = b->sets;
620 BUG_ON(t->data);
621
622 b->page_order = max_t(unsigned,
623 ilog2(b->c->btree_pages),
624 btree_order(k));
625
626 t->data = (void *) __get_free_pages(gfp, b->page_order);
627 if (!t->data)
628 goto err;
629
630 t->tree = bset_tree_bytes(b) < PAGE_SIZE
631 ? kmalloc(bset_tree_bytes(b), gfp)
632 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
633 if (!t->tree)
634 goto err;
635
636 t->prev = bset_prev_bytes(b) < PAGE_SIZE
637 ? kmalloc(bset_prev_bytes(b), gfp)
638 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
639 if (!t->prev)
640 goto err;
641
642 list_move(&b->list, &b->c->btree_cache);
643 b->c->bucket_cache_used++;
644 return;
645err:
646 mca_data_free(b);
647}
648
649static struct btree *mca_bucket_alloc(struct cache_set *c,
650 struct bkey *k, gfp_t gfp)
651{
652 struct btree *b = kzalloc(sizeof(struct btree), gfp);
653 if (!b)
654 return NULL;
655
656 init_rwsem(&b->lock);
657 lockdep_set_novalidate_class(&b->lock);
658 INIT_LIST_HEAD(&b->list);
Kent Overstreet57943512013-04-25 13:58:35 -0700659 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700660 b->c = c;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800661 sema_init(&b->io_mutex, 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700662
663 mca_data_alloc(b, k, gfp);
664 return b;
665}
666
Kent Overstreete8e1d462013-07-24 17:27:07 -0700667static int mca_reap(struct btree *b, unsigned min_order, bool flush)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700668{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700669 struct closure cl;
670
671 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700672 lockdep_assert_held(&b->c->bucket_lock);
673
674 if (!down_write_trylock(&b->lock))
675 return -ENOMEM;
676
Kent Overstreete8e1d462013-07-24 17:27:07 -0700677 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
678
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800679 if (b->page_order < min_order)
680 goto out_unlock;
681
682 if (!flush) {
683 if (btree_node_dirty(b))
684 goto out_unlock;
685
686 if (down_trylock(&b->io_mutex))
687 goto out_unlock;
688 up(&b->io_mutex);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700689 }
690
Kent Overstreetf269af52013-07-23 20:48:29 -0700691 if (btree_node_dirty(b))
692 bch_btree_node_write_sync(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700693
Kent Overstreete8e1d462013-07-24 17:27:07 -0700694 /* wait for any in flight btree write */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800695 down(&b->io_mutex);
696 up(&b->io_mutex);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700697
Kent Overstreetcafe5632013-03-23 16:11:31 -0700698 return 0;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800699out_unlock:
700 rw_unlock(true, b);
701 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700702}
703
Dave Chinner7dc19d52013-08-28 10:18:11 +1000704static unsigned long bch_mca_scan(struct shrinker *shrink,
705 struct shrink_control *sc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700706{
707 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
708 struct btree *b, *t;
709 unsigned long i, nr = sc->nr_to_scan;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000710 unsigned long freed = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700711
712 if (c->shrinker_disabled)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000713 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700714
715 if (c->try_harder)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000716 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700717
718 /* Return -1 if we can't do anything right now */
Kent Overstreeta698e082013-09-23 23:17:34 -0700719 if (sc->gfp_mask & __GFP_IO)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700720 mutex_lock(&c->bucket_lock);
721 else if (!mutex_trylock(&c->bucket_lock))
722 return -1;
723
Kent Overstreet36c9ea92013-06-03 13:04:56 -0700724 /*
725 * It's _really_ critical that we don't free too many btree nodes - we
726 * have to always leave ourselves a reserve. The reserve is how we
727 * guarantee that allocating memory for a new btree node can always
728 * succeed, so that inserting keys into the btree can always succeed and
729 * IO can always make forward progress:
730 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700731 nr /= c->btree_pages;
732 nr = min_t(unsigned long, nr, mca_can_free(c));
733
734 i = 0;
735 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
Dave Chinner7dc19d52013-08-28 10:18:11 +1000736 if (freed >= nr)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700737 break;
738
739 if (++i > 3 &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700740 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700741 mca_data_free(b);
742 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000743 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700744 }
745 }
746
Dave Chinner7dc19d52013-08-28 10:18:11 +1000747 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
Kent Overstreetb0f32a52013-12-10 13:24:26 -0800748 if (list_empty(&c->btree_cache))
749 goto out;
750
Kent Overstreetcafe5632013-03-23 16:11:31 -0700751 b = list_first_entry(&c->btree_cache, struct btree, list);
752 list_rotate_left(&c->btree_cache);
753
754 if (!b->accessed &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700755 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700756 mca_bucket_free(b);
757 mca_data_free(b);
758 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000759 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700760 } else
761 b->accessed = 0;
762 }
763out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700764 mutex_unlock(&c->bucket_lock);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000765 return freed;
766}
767
768static unsigned long bch_mca_count(struct shrinker *shrink,
769 struct shrink_control *sc)
770{
771 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
772
773 if (c->shrinker_disabled)
774 return 0;
775
776 if (c->try_harder)
777 return 0;
778
779 return mca_can_free(c) * c->btree_pages;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700780}
781
782void bch_btree_cache_free(struct cache_set *c)
783{
784 struct btree *b;
785 struct closure cl;
786 closure_init_stack(&cl);
787
788 if (c->shrink.list.next)
789 unregister_shrinker(&c->shrink);
790
791 mutex_lock(&c->bucket_lock);
792
793#ifdef CONFIG_BCACHE_DEBUG
794 if (c->verify_data)
795 list_move(&c->verify_data->list, &c->btree_cache);
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800796
797 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700798#endif
799
800 list_splice(&c->btree_cache_freeable,
801 &c->btree_cache);
802
803 while (!list_empty(&c->btree_cache)) {
804 b = list_first_entry(&c->btree_cache, struct btree, list);
805
806 if (btree_node_dirty(b))
807 btree_complete_write(b, btree_current_write(b));
808 clear_bit(BTREE_NODE_dirty, &b->flags);
809
810 mca_data_free(b);
811 }
812
813 while (!list_empty(&c->btree_cache_freed)) {
814 b = list_first_entry(&c->btree_cache_freed,
815 struct btree, list);
816 list_del(&b->list);
817 cancel_delayed_work_sync(&b->work);
818 kfree(b);
819 }
820
821 mutex_unlock(&c->bucket_lock);
822}
823
824int bch_btree_cache_alloc(struct cache_set *c)
825{
826 unsigned i;
827
Kent Overstreetcafe5632013-03-23 16:11:31 -0700828 for (i = 0; i < mca_reserve(c); i++)
Kent Overstreet72a44512013-10-24 17:19:26 -0700829 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
830 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700831
832 list_splice_init(&c->btree_cache,
833 &c->btree_cache_freeable);
834
835#ifdef CONFIG_BCACHE_DEBUG
836 mutex_init(&c->verify_lock);
837
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800838 c->verify_ondisk = (void *)
839 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
840
Kent Overstreetcafe5632013-03-23 16:11:31 -0700841 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
842
843 if (c->verify_data &&
844 c->verify_data->sets[0].data)
845 list_del_init(&c->verify_data->list);
846 else
847 c->verify_data = NULL;
848#endif
849
Dave Chinner7dc19d52013-08-28 10:18:11 +1000850 c->shrink.count_objects = bch_mca_count;
851 c->shrink.scan_objects = bch_mca_scan;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700852 c->shrink.seeks = 4;
853 c->shrink.batch = c->btree_pages * 2;
854 register_shrinker(&c->shrink);
855
856 return 0;
857}
858
859/* Btree in memory cache - hash table */
860
861static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
862{
863 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
864}
865
866static struct btree *mca_find(struct cache_set *c, struct bkey *k)
867{
868 struct btree *b;
869
870 rcu_read_lock();
871 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
872 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
873 goto out;
874 b = NULL;
875out:
876 rcu_read_unlock();
877 return b;
878}
879
Kent Overstreete8e1d462013-07-24 17:27:07 -0700880static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700881{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700882 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700883
Kent Overstreetc37511b2013-04-26 15:39:55 -0700884 trace_bcache_btree_cache_cannibalize(c);
885
Kent Overstreete8e1d462013-07-24 17:27:07 -0700886 if (!c->try_harder) {
887 c->try_harder = current;
888 c->try_harder_start = local_clock();
889 } else if (c->try_harder != current)
890 return ERR_PTR(-ENOSPC);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700891
Kent Overstreete8e1d462013-07-24 17:27:07 -0700892 list_for_each_entry_reverse(b, &c->btree_cache, list)
893 if (!mca_reap(b, btree_order(k), false))
894 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700895
Kent Overstreete8e1d462013-07-24 17:27:07 -0700896 list_for_each_entry_reverse(b, &c->btree_cache, list)
897 if (!mca_reap(b, btree_order(k), true))
898 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700899
Kent Overstreete8e1d462013-07-24 17:27:07 -0700900 return ERR_PTR(-ENOMEM);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700901}
902
903/*
904 * We can only have one thread cannibalizing other cached btree nodes at a time,
905 * or we'll deadlock. We use an open coded mutex to ensure that, which a
906 * cannibalize_bucket() will take. This means every time we unlock the root of
907 * the btree, we need to release this lock if we have it held.
908 */
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700909static void bch_cannibalize_unlock(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700910{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700911 if (c->try_harder == current) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600912 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700913 c->try_harder = NULL;
Kent Overstreete8e1d462013-07-24 17:27:07 -0700914 wake_up(&c->try_wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700915 }
916}
917
Kent Overstreete8e1d462013-07-24 17:27:07 -0700918static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700919{
920 struct btree *b;
921
Kent Overstreete8e1d462013-07-24 17:27:07 -0700922 BUG_ON(current->bio_list);
923
Kent Overstreetcafe5632013-03-23 16:11:31 -0700924 lockdep_assert_held(&c->bucket_lock);
925
926 if (mca_find(c, k))
927 return NULL;
928
929 /* btree_free() doesn't free memory; it sticks the node on the end of
930 * the list. Check if there's any freed nodes there:
931 */
932 list_for_each_entry(b, &c->btree_cache_freeable, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700933 if (!mca_reap(b, btree_order(k), false))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700934 goto out;
935
936 /* We never free struct btree itself, just the memory that holds the on
937 * disk node. Check the freed list before allocating a new one:
938 */
939 list_for_each_entry(b, &c->btree_cache_freed, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700940 if (!mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700941 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
942 if (!b->sets[0].data)
943 goto err;
944 else
945 goto out;
946 }
947
948 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
949 if (!b)
950 goto err;
951
952 BUG_ON(!down_write_trylock(&b->lock));
953 if (!b->sets->data)
954 goto err;
955out:
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800956 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700957
958 bkey_copy(&b->key, k);
959 list_move(&b->list, &c->btree_cache);
960 hlist_del_init_rcu(&b->hash);
961 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
962
963 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
964 b->level = level;
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700965 b->parent = (void *) ~0UL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700966
967 mca_reinit(b);
968
969 return b;
970err:
971 if (b)
972 rw_unlock(true, b);
973
Kent Overstreete8e1d462013-07-24 17:27:07 -0700974 b = mca_cannibalize(c, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700975 if (!IS_ERR(b))
976 goto out;
977
978 return b;
979}
980
981/**
982 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
983 * in from disk if necessary.
984 *
Kent Overstreetb54d6932013-07-24 18:04:18 -0700985 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
Kent Overstreetcafe5632013-03-23 16:11:31 -0700986 *
987 * The btree node will have either a read or a write lock held, depending on
988 * level and op->lock.
989 */
990struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
Kent Overstreete8e1d462013-07-24 17:27:07 -0700991 int level, bool write)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700992{
993 int i = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700994 struct btree *b;
995
996 BUG_ON(level < 0);
997retry:
998 b = mca_find(c, k);
999
1000 if (!b) {
Kent Overstreet57943512013-04-25 13:58:35 -07001001 if (current->bio_list)
1002 return ERR_PTR(-EAGAIN);
1003
Kent Overstreetcafe5632013-03-23 16:11:31 -07001004 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001005 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001006 mutex_unlock(&c->bucket_lock);
1007
1008 if (!b)
1009 goto retry;
1010 if (IS_ERR(b))
1011 return b;
1012
Kent Overstreet57943512013-04-25 13:58:35 -07001013 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001014
1015 if (!write)
1016 downgrade_write(&b->lock);
1017 } else {
1018 rw_lock(write, b, level);
1019 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1020 rw_unlock(write, b);
1021 goto retry;
1022 }
1023 BUG_ON(b->level != level);
1024 }
1025
1026 b->accessed = 1;
1027
1028 for (; i <= b->nsets && b->sets[i].size; i++) {
1029 prefetch(b->sets[i].tree);
1030 prefetch(b->sets[i].data);
1031 }
1032
1033 for (; i <= b->nsets; i++)
1034 prefetch(b->sets[i].data);
1035
Kent Overstreet57943512013-04-25 13:58:35 -07001036 if (btree_node_io_error(b)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001037 rw_unlock(write, b);
Kent Overstreet57943512013-04-25 13:58:35 -07001038 return ERR_PTR(-EIO);
1039 }
1040
1041 BUG_ON(!b->written);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001042
1043 return b;
1044}
1045
1046static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
1047{
1048 struct btree *b;
1049
1050 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001051 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001052 mutex_unlock(&c->bucket_lock);
1053
1054 if (!IS_ERR_OR_NULL(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -07001055 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001056 rw_unlock(true, b);
1057 }
1058}
1059
1060/* Btree alloc */
1061
Kent Overstreete8e1d462013-07-24 17:27:07 -07001062static void btree_node_free(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001063{
1064 unsigned i;
1065
Kent Overstreetc37511b2013-04-26 15:39:55 -07001066 trace_bcache_btree_node_free(b);
1067
Kent Overstreetcafe5632013-03-23 16:11:31 -07001068 BUG_ON(b == b->c->root);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001069
1070 if (btree_node_dirty(b))
1071 btree_complete_write(b, btree_current_write(b));
1072 clear_bit(BTREE_NODE_dirty, &b->flags);
1073
Kent Overstreetcafe5632013-03-23 16:11:31 -07001074 cancel_delayed_work(&b->work);
1075
1076 mutex_lock(&b->c->bucket_lock);
1077
1078 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1079 BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
1080
1081 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1082 PTR_BUCKET(b->c, &b->key, i));
1083 }
1084
1085 bch_bucket_free(b->c, &b->key);
1086 mca_bucket_free(b);
1087 mutex_unlock(&b->c->bucket_lock);
1088}
1089
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001090struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001091{
1092 BKEY_PADDED(key) k;
1093 struct btree *b = ERR_PTR(-EAGAIN);
1094
1095 mutex_lock(&c->bucket_lock);
1096retry:
Kent Overstreet78365412013-12-17 01:29:34 -08001097 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001098 goto err;
1099
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07001100 bkey_put(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001101 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1102
Kent Overstreete8e1d462013-07-24 17:27:07 -07001103 b = mca_alloc(c, &k.key, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001104 if (IS_ERR(b))
1105 goto err_free;
1106
1107 if (!b) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001108 cache_bug(c,
1109 "Tried to allocate bucket that was in btree cache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001110 goto retry;
1111 }
1112
Kent Overstreetcafe5632013-03-23 16:11:31 -07001113 b->accessed = 1;
1114 bch_bset_init_next(b);
1115
1116 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001117
1118 trace_bcache_btree_node_alloc(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001119 return b;
1120err_free:
1121 bch_bucket_free(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001122err:
1123 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001124
1125 trace_bcache_btree_node_alloc_fail(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001126 return b;
1127}
1128
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001129static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001130{
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001131 struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
Kent Overstreet67539e82013-09-10 22:53:34 -07001132 if (!IS_ERR_OR_NULL(n)) {
1133 bch_btree_sort_into(b, n, &b->c->sort);
1134 bkey_copy_key(&n->key, &b->key);
1135 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001136
1137 return n;
1138}
1139
Kent Overstreet8835c122013-07-24 23:18:05 -07001140static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1141{
1142 unsigned i;
1143
1144 bkey_copy(k, &b->key);
1145 bkey_copy_key(k, &ZERO_KEY);
1146
1147 for (i = 0; i < KEY_PTRS(k); i++) {
1148 uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
1149
1150 SET_PTR_GEN(k, i, g);
1151 }
1152
1153 atomic_inc(&b->c->prio_blocked);
1154}
1155
Kent Overstreet78365412013-12-17 01:29:34 -08001156static int btree_check_reserve(struct btree *b, struct btree_op *op)
1157{
1158 struct cache_set *c = b->c;
1159 struct cache *ca;
1160 unsigned i, reserve = c->root->level * 2 + 1;
1161 int ret = 0;
1162
1163 mutex_lock(&c->bucket_lock);
1164
1165 for_each_cache(ca, c, i)
1166 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1167 if (op)
1168 prepare_to_wait(&c->bucket_wait, &op->wait,
1169 TASK_UNINTERRUPTIBLE);
1170 ret = -EINTR;
1171 break;
1172 }
1173
1174 mutex_unlock(&c->bucket_lock);
1175 return ret;
1176}
1177
Kent Overstreetcafe5632013-03-23 16:11:31 -07001178/* Garbage collection */
1179
1180uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1181{
1182 uint8_t stale = 0;
1183 unsigned i;
1184 struct bucket *g;
1185
1186 /*
1187 * ptr_invalid() can't return true for the keys that mark btree nodes as
1188 * freed, but since ptr_bad() returns true we'll never actually use them
1189 * for anything and thus we don't want mark their pointers here
1190 */
1191 if (!bkey_cmp(k, &ZERO_KEY))
1192 return stale;
1193
1194 for (i = 0; i < KEY_PTRS(k); i++) {
1195 if (!ptr_available(c, k, i))
1196 continue;
1197
1198 g = PTR_BUCKET(c, k, i);
1199
1200 if (gen_after(g->gc_gen, PTR_GEN(k, i)))
1201 g->gc_gen = PTR_GEN(k, i);
1202
1203 if (ptr_stale(c, k, i)) {
1204 stale = max(stale, ptr_stale(c, k, i));
1205 continue;
1206 }
1207
1208 cache_bug_on(GC_MARK(g) &&
1209 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1210 c, "inconsistent ptrs: mark = %llu, level = %i",
1211 GC_MARK(g), level);
1212
1213 if (level)
1214 SET_GC_MARK(g, GC_MARK_METADATA);
1215 else if (KEY_DIRTY(k))
1216 SET_GC_MARK(g, GC_MARK_DIRTY);
1217
1218 /* guard against overflow */
1219 SET_GC_SECTORS_USED(g, min_t(unsigned,
1220 GC_SECTORS_USED(g) + KEY_SIZE(k),
1221 (1 << 14) - 1));
1222
1223 BUG_ON(!GC_SECTORS_USED(g));
1224 }
1225
1226 return stale;
1227}
1228
1229#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1230
Kent Overstreeta1f03582013-09-10 19:07:00 -07001231static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001232{
1233 uint8_t stale = 0;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001234 unsigned keys = 0, good_keys = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001235 struct bkey *k;
1236 struct btree_iter iter;
1237 struct bset_tree *t;
1238
1239 gc->nodes++;
1240
1241 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001242 stale = max(stale, btree_mark_key(b, k));
Kent Overstreeta1f03582013-09-10 19:07:00 -07001243 keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001244
1245 if (bch_ptr_bad(b, k))
1246 continue;
1247
Kent Overstreetcafe5632013-03-23 16:11:31 -07001248 gc->key_bytes += bkey_u64s(k);
1249 gc->nkeys++;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001250 good_keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001251
1252 gc->data += KEY_SIZE(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001253 }
1254
1255 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
1256 btree_bug_on(t->size &&
1257 bset_written(b, t) &&
1258 bkey_cmp(&b->key, &t->end) < 0,
1259 b, "found short btree key in gc");
1260
Kent Overstreeta1f03582013-09-10 19:07:00 -07001261 if (b->c->gc_always_rewrite)
1262 return true;
1263
1264 if (stale > 10)
1265 return true;
1266
1267 if ((keys - good_keys) * 2 > keys)
1268 return true;
1269
1270 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001271}
1272
Kent Overstreeta1f03582013-09-10 19:07:00 -07001273#define GC_MERGE_NODES 4U
Kent Overstreetcafe5632013-03-23 16:11:31 -07001274
1275struct gc_merge_info {
1276 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001277 unsigned keys;
1278};
1279
Kent Overstreeta1f03582013-09-10 19:07:00 -07001280static int bch_btree_insert_node(struct btree *, struct btree_op *,
1281 struct keylist *, atomic_t *, struct bkey *);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001282
Kent Overstreeta1f03582013-09-10 19:07:00 -07001283static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1284 struct keylist *keylist, struct gc_stat *gc,
1285 struct gc_merge_info *r)
1286{
1287 unsigned i, nodes = 0, keys = 0, blocks;
1288 struct btree *new_nodes[GC_MERGE_NODES];
1289 struct closure cl;
1290 struct bkey *k;
1291
1292 memset(new_nodes, 0, sizeof(new_nodes));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001293 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001294
Kent Overstreeta1f03582013-09-10 19:07:00 -07001295 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001296 keys += r[nodes++].keys;
1297
1298 blocks = btree_default_blocks(b->c) * 2 / 3;
1299
1300 if (nodes < 2 ||
1301 __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001302 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001303
Kent Overstreeta1f03582013-09-10 19:07:00 -07001304 for (i = 0; i < nodes; i++) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001305 new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001306 if (IS_ERR_OR_NULL(new_nodes[i]))
1307 goto out_nocoalesce;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001308 }
1309
1310 for (i = nodes - 1; i > 0; --i) {
Kent Overstreeta1f03582013-09-10 19:07:00 -07001311 struct bset *n1 = new_nodes[i]->sets->data;
1312 struct bset *n2 = new_nodes[i - 1]->sets->data;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001313 struct bkey *k, *last = NULL;
1314
1315 keys = 0;
1316
Kent Overstreeta1f03582013-09-10 19:07:00 -07001317 if (i > 1) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001318 for (k = n2->start;
Kent Overstreetfafff812013-12-17 21:56:21 -08001319 k < bset_bkey_last(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001320 k = bkey_next(k)) {
1321 if (__set_blocks(n1, n1->keys + keys +
1322 bkey_u64s(k), b->c) > blocks)
1323 break;
1324
1325 last = k;
1326 keys += bkey_u64s(k);
1327 }
Kent Overstreeta1f03582013-09-10 19:07:00 -07001328 } else {
1329 /*
1330 * Last node we're not getting rid of - we're getting
1331 * rid of the node at r[0]. Have to try and fit all of
1332 * the remaining keys into this node; we can't ensure
1333 * they will always fit due to rounding and variable
1334 * length keys (shouldn't be possible in practice,
1335 * though)
1336 */
1337 if (__set_blocks(n1, n1->keys + n2->keys,
1338 b->c) > btree_blocks(new_nodes[i]))
1339 goto out_nocoalesce;
1340
1341 keys = n2->keys;
1342 /* Take the key of the node we're getting rid of */
1343 last = &r->b->key;
1344 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001345
1346 BUG_ON(__set_blocks(n1, n1->keys + keys,
Kent Overstreeta1f03582013-09-10 19:07:00 -07001347 b->c) > btree_blocks(new_nodes[i]));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001348
Kent Overstreeta1f03582013-09-10 19:07:00 -07001349 if (last)
1350 bkey_copy_key(&new_nodes[i]->key, last);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001351
Kent Overstreetfafff812013-12-17 21:56:21 -08001352 memcpy(bset_bkey_last(n1),
Kent Overstreetcafe5632013-03-23 16:11:31 -07001353 n2->start,
Kent Overstreetfafff812013-12-17 21:56:21 -08001354 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001355
1356 n1->keys += keys;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001357 r[i].keys = n1->keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001358
1359 memmove(n2->start,
Kent Overstreetfafff812013-12-17 21:56:21 -08001360 bset_bkey_idx(n2, keys),
1361 (void *) bset_bkey_last(n2) -
1362 (void *) bset_bkey_idx(n2, keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001363
1364 n2->keys -= keys;
1365
Kent Overstreet085d2a32013-11-11 18:20:51 -08001366 if (__bch_keylist_realloc(keylist,
1367 bkey_u64s(&new_nodes[i]->key)))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001368 goto out_nocoalesce;
1369
1370 bch_btree_node_write(new_nodes[i], &cl);
1371 bch_keylist_add(keylist, &new_nodes[i]->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001372 }
1373
Kent Overstreeta1f03582013-09-10 19:07:00 -07001374 for (i = 0; i < nodes; i++) {
Kent Overstreet085d2a32013-11-11 18:20:51 -08001375 if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001376 goto out_nocoalesce;
1377
1378 make_btree_freeing_key(r[i].b, keylist->top);
1379 bch_keylist_push(keylist);
1380 }
1381
1382 /* We emptied out this node */
1383 BUG_ON(new_nodes[0]->sets->data->keys);
1384 btree_node_free(new_nodes[0]);
1385 rw_unlock(true, new_nodes[0]);
1386
1387 closure_sync(&cl);
1388
1389 for (i = 0; i < nodes; i++) {
1390 btree_node_free(r[i].b);
1391 rw_unlock(true, r[i].b);
1392
1393 r[i].b = new_nodes[i];
1394 }
1395
1396 bch_btree_insert_node(b, op, keylist, NULL, NULL);
1397 BUG_ON(!bch_keylist_empty(keylist));
1398
1399 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1400 r[nodes - 1].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001401
Kent Overstreetc37511b2013-04-26 15:39:55 -07001402 trace_bcache_btree_gc_coalesce(nodes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001403 gc->nodes--;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001404
Kent Overstreeta1f03582013-09-10 19:07:00 -07001405 /* Invalidated our iterator */
1406 return -EINTR;
1407
1408out_nocoalesce:
1409 closure_sync(&cl);
1410
1411 while ((k = bch_keylist_pop(keylist)))
1412 if (!bkey_cmp(k, &ZERO_KEY))
1413 atomic_dec(&b->c->prio_blocked);
1414
1415 for (i = 0; i < nodes; i++)
1416 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1417 btree_node_free(new_nodes[i]);
1418 rw_unlock(true, new_nodes[i]);
1419 }
1420 return 0;
1421}
1422
1423static unsigned btree_gc_count_keys(struct btree *b)
1424{
1425 struct bkey *k;
1426 struct btree_iter iter;
1427 unsigned ret = 0;
1428
1429 for_each_key_filter(b, k, &iter, bch_ptr_bad)
1430 ret += bkey_u64s(k);
1431
1432 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001433}
1434
1435static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1436 struct closure *writes, struct gc_stat *gc)
1437{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001438 unsigned i;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001439 int ret = 0;
1440 bool should_rewrite;
1441 struct btree *n;
1442 struct bkey *k;
1443 struct keylist keys;
1444 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001445 struct gc_merge_info r[GC_MERGE_NODES];
Kent Overstreeta1f03582013-09-10 19:07:00 -07001446 struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001447
Kent Overstreeta1f03582013-09-10 19:07:00 -07001448 bch_keylist_init(&keys);
1449 bch_btree_iter_init(b, &iter, &b->c->gc_done);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001450
Kent Overstreeta1f03582013-09-10 19:07:00 -07001451 for (i = 0; i < GC_MERGE_NODES; i++)
1452 r[i].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001453
Kent Overstreeta1f03582013-09-10 19:07:00 -07001454 while (1) {
1455 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1456 if (k) {
1457 r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
1458 if (IS_ERR(r->b)) {
1459 ret = PTR_ERR(r->b);
1460 break;
1461 }
1462
1463 r->keys = btree_gc_count_keys(r->b);
1464
1465 ret = btree_gc_coalesce(b, op, &keys, gc, r);
1466 if (ret)
1467 break;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001468 }
1469
Kent Overstreeta1f03582013-09-10 19:07:00 -07001470 if (!last->b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001471 break;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001472
1473 if (!IS_ERR(last->b)) {
1474 should_rewrite = btree_gc_mark_node(last->b, gc);
Kent Overstreet78365412013-12-17 01:29:34 -08001475 if (should_rewrite &&
1476 !btree_check_reserve(b, NULL)) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001477 n = btree_node_alloc_replacement(last->b,
1478 false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001479
1480 if (!IS_ERR_OR_NULL(n)) {
1481 bch_btree_node_write_sync(n);
1482 bch_keylist_add(&keys, &n->key);
1483
1484 make_btree_freeing_key(last->b,
1485 keys.top);
1486 bch_keylist_push(&keys);
1487
1488 btree_node_free(last->b);
1489
1490 bch_btree_insert_node(b, op, &keys,
1491 NULL, NULL);
1492 BUG_ON(!bch_keylist_empty(&keys));
1493
1494 rw_unlock(true, last->b);
1495 last->b = n;
1496
1497 /* Invalidated our iterator */
1498 ret = -EINTR;
1499 break;
1500 }
1501 }
1502
1503 if (last->b->level) {
1504 ret = btree_gc_recurse(last->b, op, writes, gc);
1505 if (ret)
1506 break;
1507 }
1508
1509 bkey_copy_key(&b->c->gc_done, &last->b->key);
1510
1511 /*
1512 * Must flush leaf nodes before gc ends, since replace
1513 * operations aren't journalled
1514 */
1515 if (btree_node_dirty(last->b))
1516 bch_btree_node_write(last->b, writes);
1517 rw_unlock(true, last->b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001518 }
1519
Kent Overstreeta1f03582013-09-10 19:07:00 -07001520 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1521 r->b = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001522
Kent Overstreetcafe5632013-03-23 16:11:31 -07001523 if (need_resched()) {
1524 ret = -EAGAIN;
1525 break;
1526 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001527 }
1528
Kent Overstreeta1f03582013-09-10 19:07:00 -07001529 for (i = 0; i < GC_MERGE_NODES; i++)
1530 if (!IS_ERR_OR_NULL(r[i].b)) {
1531 if (btree_node_dirty(r[i].b))
1532 bch_btree_node_write(r[i].b, writes);
1533 rw_unlock(true, r[i].b);
1534 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001535
Kent Overstreeta1f03582013-09-10 19:07:00 -07001536 bch_keylist_free(&keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001537
1538 return ret;
1539}
1540
1541static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1542 struct closure *writes, struct gc_stat *gc)
1543{
1544 struct btree *n = NULL;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001545 int ret = 0;
1546 bool should_rewrite;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001547
Kent Overstreeta1f03582013-09-10 19:07:00 -07001548 should_rewrite = btree_gc_mark_node(b, gc);
1549 if (should_rewrite) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001550 n = btree_node_alloc_replacement(b, false);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001551
Kent Overstreeta1f03582013-09-10 19:07:00 -07001552 if (!IS_ERR_OR_NULL(n)) {
1553 bch_btree_node_write_sync(n);
1554 bch_btree_set_root(n);
1555 btree_node_free(b);
1556 rw_unlock(true, n);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001557
Kent Overstreeta1f03582013-09-10 19:07:00 -07001558 return -EINTR;
1559 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001560 }
1561
Kent Overstreeta1f03582013-09-10 19:07:00 -07001562 if (b->level) {
1563 ret = btree_gc_recurse(b, op, writes, gc);
1564 if (ret)
1565 return ret;
1566 }
1567
1568 bkey_copy_key(&b->c->gc_done, &b->key);
1569
Kent Overstreetcafe5632013-03-23 16:11:31 -07001570 return ret;
1571}
1572
1573static void btree_gc_start(struct cache_set *c)
1574{
1575 struct cache *ca;
1576 struct bucket *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001577 unsigned i;
1578
1579 if (!c->gc_mark_valid)
1580 return;
1581
1582 mutex_lock(&c->bucket_lock);
1583
1584 c->gc_mark_valid = 0;
1585 c->gc_done = ZERO_KEY;
1586
1587 for_each_cache(ca, c, i)
1588 for_each_bucket(b, ca) {
1589 b->gc_gen = b->gen;
Kent Overstreet29ebf462013-07-11 19:43:21 -07001590 if (!atomic_read(&b->pin)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001591 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
Kent Overstreet29ebf462013-07-11 19:43:21 -07001592 SET_GC_SECTORS_USED(b, 0);
1593 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001594 }
1595
Kent Overstreetcafe5632013-03-23 16:11:31 -07001596 mutex_unlock(&c->bucket_lock);
1597}
1598
1599size_t bch_btree_gc_finish(struct cache_set *c)
1600{
1601 size_t available = 0;
1602 struct bucket *b;
1603 struct cache *ca;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001604 unsigned i;
1605
1606 mutex_lock(&c->bucket_lock);
1607
1608 set_gc_sectors(c);
1609 c->gc_mark_valid = 1;
1610 c->need_gc = 0;
1611
1612 if (c->root)
1613 for (i = 0; i < KEY_PTRS(&c->root->key); i++)
1614 SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
1615 GC_MARK_METADATA);
1616
1617 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1618 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1619 GC_MARK_METADATA);
1620
Nicholas Swensonbf0a6282013-11-26 19:14:23 -08001621 /* don't reclaim buckets to which writeback keys point */
1622 rcu_read_lock();
1623 for (i = 0; i < c->nr_uuids; i++) {
1624 struct bcache_device *d = c->devices[i];
1625 struct cached_dev *dc;
1626 struct keybuf_key *w, *n;
1627 unsigned j;
1628
1629 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1630 continue;
1631 dc = container_of(d, struct cached_dev, disk);
1632
1633 spin_lock(&dc->writeback_keys.lock);
1634 rbtree_postorder_for_each_entry_safe(w, n,
1635 &dc->writeback_keys.keys, node)
1636 for (j = 0; j < KEY_PTRS(&w->key); j++)
1637 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1638 GC_MARK_DIRTY);
1639 spin_unlock(&dc->writeback_keys.lock);
1640 }
1641 rcu_read_unlock();
1642
Kent Overstreetcafe5632013-03-23 16:11:31 -07001643 for_each_cache(ca, c, i) {
1644 uint64_t *i;
1645
1646 ca->invalidate_needs_gc = 0;
1647
1648 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1649 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1650
1651 for (i = ca->prio_buckets;
1652 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1653 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1654
1655 for_each_bucket(b, ca) {
1656 b->last_gc = b->gc_gen;
1657 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1658
1659 if (!atomic_read(&b->pin) &&
1660 GC_MARK(b) == GC_MARK_RECLAIMABLE) {
1661 available++;
1662 if (!GC_SECTORS_USED(b))
1663 bch_bucket_add_unused(ca, b);
1664 }
1665 }
1666 }
1667
Kent Overstreetcafe5632013-03-23 16:11:31 -07001668 mutex_unlock(&c->bucket_lock);
1669 return available;
1670}
1671
Kent Overstreet72a44512013-10-24 17:19:26 -07001672static void bch_btree_gc(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001673{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001674 int ret;
1675 unsigned long available;
1676 struct gc_stat stats;
1677 struct closure writes;
1678 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001679 uint64_t start_time = local_clock();
Kent Overstreet57943512013-04-25 13:58:35 -07001680
Kent Overstreetc37511b2013-04-26 15:39:55 -07001681 trace_bcache_gc_start(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001682
1683 memset(&stats, 0, sizeof(struct gc_stat));
1684 closure_init_stack(&writes);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001685 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001686
1687 btree_gc_start(c);
1688
Kent Overstreeta1f03582013-09-10 19:07:00 -07001689 do {
1690 ret = btree_root(gc_root, c, &op, &writes, &stats);
1691 closure_sync(&writes);
Kent Overstreet57943512013-04-25 13:58:35 -07001692
Kent Overstreeta1f03582013-09-10 19:07:00 -07001693 if (ret && ret != -EAGAIN)
1694 pr_warn("gc failed!");
1695 } while (ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001696
1697 available = bch_btree_gc_finish(c);
Kent Overstreet57943512013-04-25 13:58:35 -07001698 wake_up_allocators(c);
1699
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001700 bch_time_stats_update(&c->btree_gc_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001701
1702 stats.key_bytes *= sizeof(uint64_t);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001703 stats.data <<= 9;
1704 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1705 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001706
Kent Overstreetc37511b2013-04-26 15:39:55 -07001707 trace_bcache_gc_end(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001708
Kent Overstreet72a44512013-10-24 17:19:26 -07001709 bch_moving_gc(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001710}
1711
Kent Overstreet72a44512013-10-24 17:19:26 -07001712static int bch_gc_thread(void *arg)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001713{
Kent Overstreet72a44512013-10-24 17:19:26 -07001714 struct cache_set *c = arg;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001715 struct cache *ca;
1716 unsigned i;
Kent Overstreet72a44512013-10-24 17:19:26 -07001717
1718 while (1) {
Kent Overstreeta1f03582013-09-10 19:07:00 -07001719again:
Kent Overstreet72a44512013-10-24 17:19:26 -07001720 bch_btree_gc(c);
1721
1722 set_current_state(TASK_INTERRUPTIBLE);
1723 if (kthread_should_stop())
1724 break;
1725
Kent Overstreeta1f03582013-09-10 19:07:00 -07001726 mutex_lock(&c->bucket_lock);
1727
1728 for_each_cache(ca, c, i)
1729 if (ca->invalidate_needs_gc) {
1730 mutex_unlock(&c->bucket_lock);
1731 set_current_state(TASK_RUNNING);
1732 goto again;
1733 }
1734
1735 mutex_unlock(&c->bucket_lock);
1736
Kent Overstreet72a44512013-10-24 17:19:26 -07001737 try_to_freeze();
1738 schedule();
1739 }
1740
1741 return 0;
1742}
1743
1744int bch_gc_thread_start(struct cache_set *c)
1745{
1746 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1747 if (IS_ERR(c->gc_thread))
1748 return PTR_ERR(c->gc_thread);
1749
1750 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1751 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001752}
1753
1754/* Initial partial gc */
1755
1756static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
1757 unsigned long **seen)
1758{
Kent Overstreet50310162013-09-10 17:18:59 -07001759 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001760 unsigned i;
Kent Overstreet50310162013-09-10 17:18:59 -07001761 struct bkey *k, *p = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001762 struct bucket *g;
1763 struct btree_iter iter;
1764
1765 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1766 for (i = 0; i < KEY_PTRS(k); i++) {
1767 if (!ptr_available(b->c, k, i))
1768 continue;
1769
1770 g = PTR_BUCKET(b->c, k, i);
1771
1772 if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
1773 seen[PTR_DEV(k, i)]) ||
1774 !ptr_stale(b->c, k, i)) {
1775 g->gen = PTR_GEN(k, i);
1776
1777 if (b->level)
1778 g->prio = BTREE_PRIO;
1779 else if (g->prio == BTREE_PRIO)
1780 g->prio = INITIAL_PRIO;
1781 }
1782 }
1783
1784 btree_mark_key(b, k);
1785 }
1786
1787 if (b->level) {
Kent Overstreet50310162013-09-10 17:18:59 -07001788 bch_btree_iter_init(b, &iter, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001789
Kent Overstreet50310162013-09-10 17:18:59 -07001790 do {
1791 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1792 if (k)
1793 btree_node_prefetch(b->c, k, b->level - 1);
1794
Kent Overstreetcafe5632013-03-23 16:11:31 -07001795 if (p)
Kent Overstreet50310162013-09-10 17:18:59 -07001796 ret = btree(check_recurse, p, b, op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001797
Kent Overstreet50310162013-09-10 17:18:59 -07001798 p = k;
1799 } while (p && !ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001800 }
1801
1802 return 0;
1803}
1804
Kent Overstreetc18536a2013-07-24 17:44:17 -07001805int bch_btree_check(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001806{
1807 int ret = -ENOMEM;
1808 unsigned i;
1809 unsigned long *seen[MAX_CACHES_PER_SET];
Kent Overstreetc18536a2013-07-24 17:44:17 -07001810 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001811
1812 memset(seen, 0, sizeof(seen));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001813 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001814
1815 for (i = 0; c->cache[i]; i++) {
1816 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
1817 seen[i] = kmalloc(n, GFP_KERNEL);
1818 if (!seen[i])
1819 goto err;
1820
1821 /* Disables the seen array until prio_read() uses it too */
1822 memset(seen[i], 0xFF, n);
1823 }
1824
Kent Overstreetc18536a2013-07-24 17:44:17 -07001825 ret = btree_root(check_recurse, c, &op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001826err:
1827 for (i = 0; i < MAX_CACHES_PER_SET; i++)
1828 kfree(seen[i]);
1829 return ret;
1830}
1831
1832/* Btree insertion */
1833
1834static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
1835{
1836 struct bset *i = b->sets[b->nsets].data;
1837
1838 memmove((uint64_t *) where + bkey_u64s(insert),
1839 where,
Kent Overstreetfafff812013-12-17 21:56:21 -08001840 (void *) bset_bkey_last(i) - (void *) where);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001841
1842 i->keys += bkey_u64s(insert);
1843 bkey_copy(where, insert);
1844 bch_bset_fix_lookup_table(b, where);
1845}
1846
Kent Overstreet1b207d82013-09-10 18:52:54 -07001847static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
Kent Overstreetcafe5632013-03-23 16:11:31 -07001848 struct btree_iter *iter,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001849 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001850{
Kent Overstreet279afba2013-06-05 06:21:07 -07001851 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001852 {
Kent Overstreet279afba2013-06-05 06:21:07 -07001853 if (KEY_DIRTY(k))
1854 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1855 offset, -sectors);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001856 }
1857
Kent Overstreet279afba2013-06-05 06:21:07 -07001858 uint64_t old_offset;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001859 unsigned old_size, sectors_found = 0;
1860
1861 while (1) {
1862 struct bkey *k = bch_btree_iter_next(iter);
Kent Overstreet911c9612013-07-28 18:35:09 -07001863 if (!k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001864 break;
1865
Kent Overstreet911c9612013-07-28 18:35:09 -07001866 if (bkey_cmp(&START_KEY(k), insert) >= 0) {
1867 if (KEY_SIZE(k))
1868 break;
1869 else
1870 continue;
1871 }
1872
Kent Overstreetcafe5632013-03-23 16:11:31 -07001873 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1874 continue;
1875
Kent Overstreet279afba2013-06-05 06:21:07 -07001876 old_offset = KEY_START(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001877 old_size = KEY_SIZE(k);
1878
1879 /*
1880 * We might overlap with 0 size extents; we can't skip these
1881 * because if they're in the set we're inserting to we have to
1882 * adjust them so they don't overlap with the key we're
Kent Overstreet1b207d82013-09-10 18:52:54 -07001883 * inserting. But we don't want to check them for replace
Kent Overstreetcafe5632013-03-23 16:11:31 -07001884 * operations.
1885 */
1886
Kent Overstreet1b207d82013-09-10 18:52:54 -07001887 if (replace_key && KEY_SIZE(k)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001888 /*
1889 * k might have been split since we inserted/found the
1890 * key we're replacing
1891 */
1892 unsigned i;
1893 uint64_t offset = KEY_START(k) -
Kent Overstreet1b207d82013-09-10 18:52:54 -07001894 KEY_START(replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001895
1896 /* But it must be a subset of the replace key */
Kent Overstreet1b207d82013-09-10 18:52:54 -07001897 if (KEY_START(k) < KEY_START(replace_key) ||
1898 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001899 goto check_failed;
1900
1901 /* We didn't find a key that we were supposed to */
1902 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1903 goto check_failed;
1904
Kent Overstreetd24a6e12013-11-10 21:55:27 -08001905 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1906 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001907 goto check_failed;
1908
1909 /* skip past gen */
1910 offset <<= 8;
1911
Kent Overstreet1b207d82013-09-10 18:52:54 -07001912 BUG_ON(!KEY_PTRS(replace_key));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001913
Kent Overstreet1b207d82013-09-10 18:52:54 -07001914 for (i = 0; i < KEY_PTRS(replace_key); i++)
1915 if (k->ptr[i] != replace_key->ptr[i] + offset)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001916 goto check_failed;
1917
1918 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1919 }
1920
1921 if (bkey_cmp(insert, k) < 0 &&
1922 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1923 /*
1924 * We overlapped in the middle of an existing key: that
1925 * means we have to split the old key. But we have to do
1926 * slightly different things depending on whether the
1927 * old key has been written out yet.
1928 */
1929
1930 struct bkey *top;
1931
Kent Overstreet279afba2013-06-05 06:21:07 -07001932 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001933
1934 if (bkey_written(b, k)) {
1935 /*
1936 * We insert a new key to cover the top of the
1937 * old key, and the old key is modified in place
1938 * to represent the bottom split.
1939 *
1940 * It's completely arbitrary whether the new key
1941 * is the top or the bottom, but it has to match
1942 * up with what btree_sort_fixup() does - it
1943 * doesn't check for this kind of overlap, it
1944 * depends on us inserting a new key for the top
1945 * here.
1946 */
1947 top = bch_bset_search(b, &b->sets[b->nsets],
1948 insert);
1949 shift_keys(b, top, k);
1950 } else {
1951 BKEY_PADDED(key) temp;
1952 bkey_copy(&temp.key, k);
1953 shift_keys(b, k, &temp.key);
1954 top = bkey_next(k);
1955 }
1956
1957 bch_cut_front(insert, top);
1958 bch_cut_back(&START_KEY(insert), k);
1959 bch_bset_fix_invalidated_key(b, k);
1960 return false;
1961 }
1962
1963 if (bkey_cmp(insert, k) < 0) {
1964 bch_cut_front(insert, k);
1965 } else {
Kent Overstreet1fa84552013-11-10 21:55:27 -08001966 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1967 old_offset = KEY_START(insert);
1968
Kent Overstreetcafe5632013-03-23 16:11:31 -07001969 if (bkey_written(b, k) &&
1970 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1971 /*
1972 * Completely overwrote, so we don't have to
1973 * invalidate the binary search tree
1974 */
1975 bch_cut_front(k, k);
1976 } else {
1977 __bch_cut_back(&START_KEY(insert), k);
1978 bch_bset_fix_invalidated_key(b, k);
1979 }
1980 }
1981
Kent Overstreet279afba2013-06-05 06:21:07 -07001982 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001983 }
1984
1985check_failed:
Kent Overstreet1b207d82013-09-10 18:52:54 -07001986 if (replace_key) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001987 if (!sectors_found) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001988 return true;
1989 } else if (sectors_found < KEY_SIZE(insert)) {
1990 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1991 (KEY_SIZE(insert) - sectors_found));
1992 SET_KEY_SIZE(insert, sectors_found);
1993 }
1994 }
1995
1996 return false;
1997}
1998
1999static bool btree_insert_key(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002000 struct bkey *k, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002001{
2002 struct bset *i = b->sets[b->nsets].data;
2003 struct bkey *m, *prev;
Kent Overstreet85b14922013-05-14 20:33:16 -07002004 unsigned status = BTREE_INSERT_STATUS_INSERT;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002005
2006 BUG_ON(bkey_cmp(k, &b->key) > 0);
2007 BUG_ON(b->level && !KEY_PTRS(k));
2008 BUG_ON(!b->level && !KEY_OFFSET(k));
2009
2010 if (!b->level) {
2011 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002012
2013 /*
2014 * bset_search() returns the first key that is strictly greater
2015 * than the search key - but for back merging, we want to find
Kent Overstreet0eacac22013-07-01 19:29:05 -07002016 * the previous key.
Kent Overstreetcafe5632013-03-23 16:11:31 -07002017 */
Kent Overstreetcafe5632013-03-23 16:11:31 -07002018 prev = NULL;
Kent Overstreet0eacac22013-07-01 19:29:05 -07002019 m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002020
Kent Overstreet1b207d82013-09-10 18:52:54 -07002021 if (fix_overlapping_extents(b, k, &iter, replace_key)) {
2022 op->insert_collision = true;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002023 return false;
Kent Overstreet1b207d82013-09-10 18:52:54 -07002024 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002025
Kent Overstreet1fa84552013-11-10 21:55:27 -08002026 if (KEY_DIRTY(k))
2027 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
2028 KEY_START(k), KEY_SIZE(k));
2029
Kent Overstreetfafff812013-12-17 21:56:21 -08002030 while (m != bset_bkey_last(i) &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07002031 bkey_cmp(k, &START_KEY(m)) > 0)
2032 prev = m, m = bkey_next(m);
2033
2034 if (key_merging_disabled(b->c))
2035 goto insert;
2036
2037 /* prev is in the tree, if we merge we're done */
Kent Overstreet85b14922013-05-14 20:33:16 -07002038 status = BTREE_INSERT_STATUS_BACK_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002039 if (prev &&
2040 bch_bkey_try_merge(b, prev, k))
2041 goto merged;
2042
Kent Overstreet85b14922013-05-14 20:33:16 -07002043 status = BTREE_INSERT_STATUS_OVERWROTE;
Kent Overstreetfafff812013-12-17 21:56:21 -08002044 if (m != bset_bkey_last(i) &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07002045 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
2046 goto copy;
2047
Kent Overstreet85b14922013-05-14 20:33:16 -07002048 status = BTREE_INSERT_STATUS_FRONT_MERGE;
Kent Overstreetfafff812013-12-17 21:56:21 -08002049 if (m != bset_bkey_last(i) &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07002050 bch_bkey_try_merge(b, k, m))
2051 goto copy;
Kent Overstreet1b207d82013-09-10 18:52:54 -07002052 } else {
2053 BUG_ON(replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002054 m = bch_bset_search(b, &b->sets[b->nsets], k);
Kent Overstreet1b207d82013-09-10 18:52:54 -07002055 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002056
2057insert: shift_keys(b, m, k);
2058copy: bkey_copy(m, k);
2059merged:
Kent Overstreet1b207d82013-09-10 18:52:54 -07002060 bch_check_keys(b, "%u for %s", status,
2061 replace_key ? "replace" : "insert");
Kent Overstreetcafe5632013-03-23 16:11:31 -07002062
2063 if (b->level && !KEY_OFFSET(k))
Kent Overstreet57943512013-04-25 13:58:35 -07002064 btree_current_write(b)->prio_blocked++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002065
Kent Overstreet1b207d82013-09-10 18:52:54 -07002066 trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002067
2068 return true;
2069}
2070
Kent Overstreet26c949f2013-09-10 18:41:15 -07002071static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002072 struct keylist *insert_keys,
2073 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002074{
2075 bool ret = false;
Kent Overstreet280481d2013-10-24 16:36:03 -07002076 int oldsize = bch_count_data(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002077
Kent Overstreet26c949f2013-09-10 18:41:15 -07002078 while (!bch_keylist_empty(insert_keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002079 struct bset *i = write_block(b);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002080 struct bkey *k = insert_keys->keys;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002081
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002082 if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
2083 > btree_blocks(b))
2084 break;
2085
2086 if (bkey_cmp(k, &b->key) <= 0) {
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002087 if (!b->level)
2088 bkey_put(b->c, k);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002089
Kent Overstreet1b207d82013-09-10 18:52:54 -07002090 ret |= btree_insert_key(b, op, k, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002091 bch_keylist_pop_front(insert_keys);
2092 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
Kent Overstreet26c949f2013-09-10 18:41:15 -07002093 BKEY_PADDED(key) temp;
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002094 bkey_copy(&temp.key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002095
2096 bch_cut_back(&b->key, &temp.key);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002097 bch_cut_front(&b->key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002098
Kent Overstreet1b207d82013-09-10 18:52:54 -07002099 ret |= btree_insert_key(b, op, &temp.key, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002100 break;
2101 } else {
2102 break;
2103 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002104 }
2105
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002106 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2107
Kent Overstreetcafe5632013-03-23 16:11:31 -07002108 BUG_ON(bch_count_data(b) < oldsize);
2109 return ret;
2110}
2111
Kent Overstreet26c949f2013-09-10 18:41:15 -07002112static int btree_split(struct btree *b, struct btree_op *op,
2113 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002114 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002115{
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002116 bool split;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002117 struct btree *n1, *n2 = NULL, *n3 = NULL;
2118 uint64_t start_time = local_clock();
Kent Overstreetb54d6932013-07-24 18:04:18 -07002119 struct closure cl;
Kent Overstreet17e21a92013-07-26 12:32:38 -07002120 struct keylist parent_keys;
Kent Overstreetb54d6932013-07-24 18:04:18 -07002121
2122 closure_init_stack(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002123 bch_keylist_init(&parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002124
Kent Overstreet78365412013-12-17 01:29:34 -08002125 if (!b->level &&
2126 btree_check_reserve(b, op))
2127 return -EINTR;
2128
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002129 n1 = btree_node_alloc_replacement(b, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002130 if (IS_ERR(n1))
2131 goto err;
2132
2133 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
2134
Kent Overstreetcafe5632013-03-23 16:11:31 -07002135 if (split) {
2136 unsigned keys = 0;
2137
Kent Overstreetc37511b2013-04-26 15:39:55 -07002138 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
2139
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002140 n2 = bch_btree_node_alloc(b->c, b->level, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002141 if (IS_ERR(n2))
2142 goto err_free1;
2143
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002144 if (!b->parent) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002145 n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002146 if (IS_ERR(n3))
2147 goto err_free2;
2148 }
2149
Kent Overstreet1b207d82013-09-10 18:52:54 -07002150 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002151
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002152 /*
2153 * Has to be a linear search because we don't have an auxiliary
Kent Overstreetcafe5632013-03-23 16:11:31 -07002154 * search tree yet
2155 */
2156
2157 while (keys < (n1->sets[0].data->keys * 3) / 5)
Kent Overstreetfafff812013-12-17 21:56:21 -08002158 keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data,
2159 keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002160
Kent Overstreetfafff812013-12-17 21:56:21 -08002161 bkey_copy_key(&n1->key,
2162 bset_bkey_idx(n1->sets[0].data, keys));
2163 keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data, keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002164
2165 n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
2166 n1->sets[0].data->keys = keys;
2167
2168 memcpy(n2->sets[0].data->start,
Kent Overstreetfafff812013-12-17 21:56:21 -08002169 bset_bkey_last(n1->sets[0].data),
Kent Overstreetcafe5632013-03-23 16:11:31 -07002170 n2->sets[0].data->keys * sizeof(uint64_t));
2171
2172 bkey_copy_key(&n2->key, &b->key);
2173
Kent Overstreet17e21a92013-07-26 12:32:38 -07002174 bch_keylist_add(&parent_keys, &n2->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002175 bch_btree_node_write(n2, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002176 rw_unlock(true, n2);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002177 } else {
2178 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
2179
Kent Overstreet1b207d82013-09-10 18:52:54 -07002180 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002181 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002182
Kent Overstreet17e21a92013-07-26 12:32:38 -07002183 bch_keylist_add(&parent_keys, &n1->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002184 bch_btree_node_write(n1, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002185
2186 if (n3) {
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002187 /* Depth increases, make a new root */
Kent Overstreetcafe5632013-03-23 16:11:31 -07002188 bkey_copy_key(&n3->key, &MAX_KEY);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002189 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002190 bch_btree_node_write(n3, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002191
Kent Overstreetb54d6932013-07-24 18:04:18 -07002192 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002193 bch_btree_set_root(n3);
2194 rw_unlock(true, n3);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002195
2196 btree_node_free(b);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002197 } else if (!b->parent) {
2198 /* Root filled up but didn't need to be split */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002199 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002200 bch_btree_set_root(n1);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002201
2202 btree_node_free(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002203 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002204 /* Split a non root node */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002205 closure_sync(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002206 make_btree_freeing_key(b, parent_keys.top);
2207 bch_keylist_push(&parent_keys);
2208
2209 btree_node_free(b);
2210
2211 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2212 BUG_ON(!bch_keylist_empty(&parent_keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002213 }
2214
2215 rw_unlock(true, n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002216
Kent Overstreet169ef1c2013-03-28 12:50:55 -06002217 bch_time_stats_update(&b->c->btree_split_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002218
2219 return 0;
2220err_free2:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002221 bkey_put(b->c, &n2->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002222 btree_node_free(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002223 rw_unlock(true, n2);
2224err_free1:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002225 bkey_put(b->c, &n1->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002226 btree_node_free(n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002227 rw_unlock(true, n1);
2228err:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002229 WARN(1, "bcache: btree split failed");
2230
Kent Overstreetcafe5632013-03-23 16:11:31 -07002231 if (n3 == ERR_PTR(-EAGAIN) ||
2232 n2 == ERR_PTR(-EAGAIN) ||
2233 n1 == ERR_PTR(-EAGAIN))
2234 return -EAGAIN;
2235
Kent Overstreetcafe5632013-03-23 16:11:31 -07002236 return -ENOMEM;
2237}
2238
Kent Overstreet26c949f2013-09-10 18:41:15 -07002239static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002240 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002241 atomic_t *journal_ref,
2242 struct bkey *replace_key)
Kent Overstreet26c949f2013-09-10 18:41:15 -07002243{
Kent Overstreet17e21a92013-07-26 12:32:38 -07002244 BUG_ON(b->level && replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002245
Kent Overstreet17e21a92013-07-26 12:32:38 -07002246 if (should_split(b)) {
2247 if (current->bio_list) {
2248 op->lock = b->c->root->level + 1;
2249 return -EAGAIN;
2250 } else if (op->lock <= b->c->root->level) {
2251 op->lock = b->c->root->level + 1;
2252 return -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002253 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002254 /* Invalidated all iterators */
2255 return btree_split(b, op, insert_keys, replace_key) ?:
2256 -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002257 }
Kent Overstreet17e21a92013-07-26 12:32:38 -07002258 } else {
2259 BUG_ON(write_block(b) != b->sets[b->nsets].data);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002260
Kent Overstreet17e21a92013-07-26 12:32:38 -07002261 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2262 if (!b->level)
2263 bch_btree_leaf_dirty(b, journal_ref);
2264 else
2265 bch_btree_node_write_sync(b);
2266 }
2267
2268 return 0;
2269 }
Kent Overstreet26c949f2013-09-10 18:41:15 -07002270}
2271
Kent Overstreete7c590e2013-09-10 18:39:16 -07002272int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2273 struct bkey *check_key)
2274{
2275 int ret = -EINTR;
2276 uint64_t btree_ptr = b->key.ptr[0];
2277 unsigned long seq = b->seq;
2278 struct keylist insert;
2279 bool upgrade = op->lock == -1;
2280
2281 bch_keylist_init(&insert);
2282
2283 if (upgrade) {
2284 rw_unlock(false, b);
2285 rw_lock(true, b, b->level);
2286
2287 if (b->key.ptr[0] != btree_ptr ||
2288 b->seq != seq + 1)
2289 goto out;
2290 }
2291
2292 SET_KEY_PTRS(check_key, 1);
2293 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2294
2295 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2296
2297 bch_keylist_add(&insert, check_key);
2298
Kent Overstreet1b207d82013-09-10 18:52:54 -07002299 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
Kent Overstreete7c590e2013-09-10 18:39:16 -07002300
2301 BUG_ON(!ret && !bch_keylist_empty(&insert));
2302out:
2303 if (upgrade)
2304 downgrade_write(&b->lock);
2305 return ret;
2306}
2307
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002308struct btree_insert_op {
2309 struct btree_op op;
2310 struct keylist *keys;
2311 atomic_t *journal_ref;
2312 struct bkey *replace_key;
2313};
2314
Wei Yongjun08239ca2013-11-28 10:31:35 +08002315static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002316{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002317 struct btree_insert_op *op = container_of(b_op,
2318 struct btree_insert_op, op);
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002319
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002320 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2321 op->journal_ref, op->replace_key);
2322 if (ret && !bch_keylist_empty(op->keys))
2323 return ret;
2324 else
2325 return MAP_DONE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002326}
2327
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002328int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2329 atomic_t *journal_ref, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002330{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002331 struct btree_insert_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002332 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002333
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002334 BUG_ON(current->bio_list);
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002335 BUG_ON(bch_keylist_empty(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002336
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002337 bch_btree_op_init(&op.op, 0);
2338 op.keys = keys;
2339 op.journal_ref = journal_ref;
2340 op.replace_key = replace_key;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002341
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002342 while (!ret && !bch_keylist_empty(keys)) {
2343 op.op.lock = 0;
2344 ret = bch_btree_map_leaf_nodes(&op.op, c,
2345 &START_KEY(keys->keys),
2346 btree_insert_fn);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002347 }
2348
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002349 if (ret) {
2350 struct bkey *k;
2351
2352 pr_err("error %i", ret);
2353
2354 while ((k = bch_keylist_pop(keys)))
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002355 bkey_put(c, k);
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002356 } else if (op.op.insert_collision)
2357 ret = -ESRCH;
Kent Overstreet6054c6d2013-07-24 18:06:22 -07002358
Kent Overstreetcafe5632013-03-23 16:11:31 -07002359 return ret;
2360}
2361
2362void bch_btree_set_root(struct btree *b)
2363{
2364 unsigned i;
Kent Overstreete49c7c32013-06-26 17:25:38 -07002365 struct closure cl;
2366
2367 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002368
Kent Overstreetc37511b2013-04-26 15:39:55 -07002369 trace_bcache_btree_set_root(b);
2370
Kent Overstreetcafe5632013-03-23 16:11:31 -07002371 BUG_ON(!b->written);
2372
2373 for (i = 0; i < KEY_PTRS(&b->key); i++)
2374 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2375
2376 mutex_lock(&b->c->bucket_lock);
2377 list_del_init(&b->list);
2378 mutex_unlock(&b->c->bucket_lock);
2379
2380 b->c->root = b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002381
Kent Overstreete49c7c32013-06-26 17:25:38 -07002382 bch_journal_meta(b->c, &cl);
2383 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002384}
2385
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002386/* Map across nodes or keys */
2387
2388static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2389 struct bkey *from,
2390 btree_map_nodes_fn *fn, int flags)
2391{
2392 int ret = MAP_CONTINUE;
2393
2394 if (b->level) {
2395 struct bkey *k;
2396 struct btree_iter iter;
2397
2398 bch_btree_iter_init(b, &iter, from);
2399
2400 while ((k = bch_btree_iter_next_filter(&iter, b,
2401 bch_ptr_bad))) {
2402 ret = btree(map_nodes_recurse, k, b,
2403 op, from, fn, flags);
2404 from = NULL;
2405
2406 if (ret != MAP_CONTINUE)
2407 return ret;
2408 }
2409 }
2410
2411 if (!b->level || flags == MAP_ALL_NODES)
2412 ret = fn(op, b);
2413
2414 return ret;
2415}
2416
2417int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2418 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2419{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002420 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002421}
2422
2423static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2424 struct bkey *from, btree_map_keys_fn *fn,
2425 int flags)
2426{
2427 int ret = MAP_CONTINUE;
2428 struct bkey *k;
2429 struct btree_iter iter;
2430
2431 bch_btree_iter_init(b, &iter, from);
2432
2433 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
2434 ret = !b->level
2435 ? fn(op, b, k)
2436 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2437 from = NULL;
2438
2439 if (ret != MAP_CONTINUE)
2440 return ret;
2441 }
2442
2443 if (!b->level && (flags & MAP_END_KEY))
2444 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2445 KEY_OFFSET(&b->key), 0));
2446
2447 return ret;
2448}
2449
2450int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2451 struct bkey *from, btree_map_keys_fn *fn, int flags)
2452{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002453 return btree_root(map_keys_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002454}
2455
Kent Overstreetcafe5632013-03-23 16:11:31 -07002456/* Keybuf code */
2457
2458static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2459{
2460 /* Overlapping keys compare equal */
2461 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2462 return -1;
2463 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2464 return 1;
2465 return 0;
2466}
2467
2468static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2469 struct keybuf_key *r)
2470{
2471 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2472}
2473
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002474struct refill {
2475 struct btree_op op;
Kent Overstreet48a915a2013-10-31 15:43:22 -07002476 unsigned nr_found;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002477 struct keybuf *buf;
2478 struct bkey *end;
2479 keybuf_pred_fn *pred;
2480};
2481
2482static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2483 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002484{
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002485 struct refill *refill = container_of(op, struct refill, op);
2486 struct keybuf *buf = refill->buf;
2487 int ret = MAP_CONTINUE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002488
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002489 if (bkey_cmp(k, refill->end) >= 0) {
2490 ret = MAP_DONE;
2491 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002492 }
2493
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002494 if (!KEY_SIZE(k)) /* end key */
2495 goto out;
2496
2497 if (refill->pred(buf, k)) {
2498 struct keybuf_key *w;
2499
2500 spin_lock(&buf->lock);
2501
2502 w = array_alloc(&buf->freelist);
2503 if (!w) {
2504 spin_unlock(&buf->lock);
2505 return MAP_DONE;
2506 }
2507
2508 w->private = NULL;
2509 bkey_copy(&w->key, k);
2510
2511 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2512 array_free(&buf->freelist, w);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002513 else
2514 refill->nr_found++;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002515
2516 if (array_freelist_empty(&buf->freelist))
2517 ret = MAP_DONE;
2518
2519 spin_unlock(&buf->lock);
2520 }
2521out:
2522 buf->last_scanned = *k;
2523 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002524}
2525
2526void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002527 struct bkey *end, keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002528{
2529 struct bkey start = buf->last_scanned;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002530 struct refill refill;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002531
2532 cond_resched();
2533
Kent Overstreetb54d6932013-07-24 18:04:18 -07002534 bch_btree_op_init(&refill.op, -1);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002535 refill.nr_found = 0;
2536 refill.buf = buf;
2537 refill.end = end;
2538 refill.pred = pred;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002539
2540 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2541 refill_keybuf_fn, MAP_END_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002542
Kent Overstreet48a915a2013-10-31 15:43:22 -07002543 trace_bcache_keyscan(refill.nr_found,
2544 KEY_INODE(&start), KEY_OFFSET(&start),
2545 KEY_INODE(&buf->last_scanned),
2546 KEY_OFFSET(&buf->last_scanned));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002547
2548 spin_lock(&buf->lock);
2549
2550 if (!RB_EMPTY_ROOT(&buf->keys)) {
2551 struct keybuf_key *w;
2552 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2553 buf->start = START_KEY(&w->key);
2554
2555 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2556 buf->end = w->key;
2557 } else {
2558 buf->start = MAX_KEY;
2559 buf->end = MAX_KEY;
2560 }
2561
2562 spin_unlock(&buf->lock);
2563}
2564
2565static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2566{
2567 rb_erase(&w->node, &buf->keys);
2568 array_free(&buf->freelist, w);
2569}
2570
2571void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2572{
2573 spin_lock(&buf->lock);
2574 __bch_keybuf_del(buf, w);
2575 spin_unlock(&buf->lock);
2576}
2577
2578bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2579 struct bkey *end)
2580{
2581 bool ret = false;
2582 struct keybuf_key *p, *w, s;
2583 s.key = *start;
2584
2585 if (bkey_cmp(end, &buf->start) <= 0 ||
2586 bkey_cmp(start, &buf->end) >= 0)
2587 return false;
2588
2589 spin_lock(&buf->lock);
2590 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2591
2592 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2593 p = w;
2594 w = RB_NEXT(w, node);
2595
2596 if (p->private)
2597 ret = true;
2598 else
2599 __bch_keybuf_del(buf, p);
2600 }
2601
2602 spin_unlock(&buf->lock);
2603 return ret;
2604}
2605
2606struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2607{
2608 struct keybuf_key *w;
2609 spin_lock(&buf->lock);
2610
2611 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2612
2613 while (w && w->private)
2614 w = RB_NEXT(w, node);
2615
2616 if (w)
2617 w->private = ERR_PTR(-EINTR);
2618
2619 spin_unlock(&buf->lock);
2620 return w;
2621}
2622
2623struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002624 struct keybuf *buf,
2625 struct bkey *end,
2626 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002627{
2628 struct keybuf_key *ret;
2629
2630 while (1) {
2631 ret = bch_keybuf_next(buf);
2632 if (ret)
2633 break;
2634
2635 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2636 pr_debug("scan finished");
2637 break;
2638 }
2639
Kent Overstreet72c27062013-06-05 06:24:39 -07002640 bch_refill_keybuf(c, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002641 }
2642
2643 return ret;
2644}
2645
Kent Overstreet72c27062013-06-05 06:24:39 -07002646void bch_keybuf_init(struct keybuf *buf)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002647{
Kent Overstreetcafe5632013-03-23 16:11:31 -07002648 buf->last_scanned = MAX_KEY;
2649 buf->keys = RB_ROOT;
2650
2651 spin_lock_init(&buf->lock);
2652 array_allocator_init(&buf->freelist);
2653}
2654
2655void bch_btree_exit(void)
2656{
2657 if (btree_io_wq)
2658 destroy_workqueue(btree_io_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002659}
2660
2661int __init bch_btree_init(void)
2662{
Kent Overstreet72a44512013-10-24 17:19:26 -07002663 btree_io_wq = create_singlethread_workqueue("bch_btree_io");
2664 if (!btree_io_wq)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002665 return -ENOMEM;
2666
2667 return 0;
2668}