blob: 6734e2759b9373359e48426c671449e5ff858104 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
Kent Overstreet65d45232013-12-20 17:22:05 -080026#include "extents.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070027#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070028
29#include <linux/slab.h>
30#include <linux/bitops.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070031#include <linux/freezer.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070032#include <linux/hash.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070033#include <linux/kthread.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010034#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070035#include <linux/random.h>
36#include <linux/rcupdate.h>
37#include <trace/events/bcache.h>
38
39/*
40 * Todo:
41 * register_bcache: Return errors out to userspace correctly
42 *
43 * Writeback: don't undirty key until after a cache flush
44 *
45 * Create an iterator for key pointers
46 *
47 * On btree write error, mark bucket such that it won't be freed from the cache
48 *
49 * Journalling:
50 * Check for bad keys in replay
51 * Propagate barriers
52 * Refcount journal entries in journal_replay
53 *
54 * Garbage collection:
55 * Finish incremental gc
56 * Gc should free old UUIDs, data for invalid UUIDs
57 *
58 * Provide a way to list backing device UUIDs we have data cached for, and
59 * probably how long it's been since we've seen them, and a way to invalidate
60 * dirty data for devices that will never be attached again
61 *
62 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
63 * that based on that and how much dirty data we have we can keep writeback
64 * from being starved
65 *
66 * Add a tracepoint or somesuch to watch for writeback starvation
67 *
68 * When btree depth > 1 and splitting an interior node, we have to make sure
69 * alloc_bucket() cannot fail. This should be true but is not completely
70 * obvious.
71 *
72 * Make sure all allocations get charged to the root cgroup
73 *
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
79 * Also lookup by cgroup in get_open_bucket()
80 *
81 * Superblock needs to be fleshed out for multiple cache devices
82 *
83 * Add a sysfs tunable for the number of writeback IOs in flight
84 *
85 * Add a sysfs tunable for the number of open data buckets
86 *
87 * IO tracking: Can we track when one process is doing io on behalf of another?
88 * IO tracking: Don't use just an average, weigh more recent stuff higher
89 *
90 * Test module load/unload
91 */
92
Kent Overstreetdf8e8972013-07-24 17:37:59 -070093enum {
94 BTREE_INSERT_STATUS_INSERT,
95 BTREE_INSERT_STATUS_BACK_MERGE,
96 BTREE_INSERT_STATUS_OVERWROTE,
97 BTREE_INSERT_STATUS_FRONT_MERGE,
98};
99
Kent Overstreetcafe5632013-03-23 16:11:31 -0700100#define MAX_NEED_GC 64
101#define MAX_SAVE_PRIO 72
102
103#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
104
105#define PTR_HASH(c, k) \
106 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
107
Kent Overstreetcafe5632013-03-23 16:11:31 -0700108static struct workqueue_struct *btree_io_wq;
109
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700110static inline bool should_split(struct btree *b)
111{
112 struct bset *i = write_block(b);
113 return b->written >= btree_blocks(b) ||
Kent Overstreetee811282013-12-17 23:49:49 -0800114 (b->written + __set_blocks(i, i->keys + 15, block_bytes(b->c))
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700115 > btree_blocks(b));
116}
117
118#define insert_lock(s, b) ((b)->level <= (s)->lock)
119
120/*
121 * These macros are for recursing down the btree - they handle the details of
122 * locking and looking up nodes in the cache for you. They're best treated as
123 * mere syntax when reading code that uses them.
124 *
125 * op->lock determines whether we take a read or a write lock at a given depth.
126 * If you've got a read lock and find that you need a write lock (i.e. you're
127 * going to have to split), set op->lock and return -EINTR; btree_root() will
128 * call you again and you'll have the correct lock.
129 */
130
131/**
132 * btree - recurse down the btree on a specified key
133 * @fn: function to call, which will be passed the child node
134 * @key: key to recurse on
135 * @b: parent btree node
136 * @op: pointer to struct btree_op
137 */
138#define btree(fn, key, b, op, ...) \
139({ \
140 int _r, l = (b)->level - 1; \
141 bool _w = l <= (op)->lock; \
142 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
143 if (!IS_ERR(_child)) { \
144 _child->parent = (b); \
145 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
146 rw_unlock(_w, _child); \
147 } else \
148 _r = PTR_ERR(_child); \
149 _r; \
150})
151
152/**
153 * btree_root - call a function on the root of the btree
154 * @fn: function to call, which will be passed the child node
155 * @c: cache set
156 * @op: pointer to struct btree_op
157 */
158#define btree_root(fn, c, op, ...) \
159({ \
160 int _r = -EINTR; \
161 do { \
162 struct btree *_b = (c)->root; \
163 bool _w = insert_lock(op, _b); \
164 rw_lock(_w, _b, _b->level); \
165 if (_b == (c)->root && \
166 _w == insert_lock(op, _b)) { \
167 _b->parent = NULL; \
168 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
169 } \
170 rw_unlock(_w, _b); \
Kent Overstreet78365412013-12-17 01:29:34 -0800171 if (_r == -EINTR) \
172 schedule(); \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700173 bch_cannibalize_unlock(c); \
174 if (_r == -ENOSPC) { \
175 wait_event((c)->try_wait, \
176 !(c)->try_harder); \
177 _r = -EINTR; \
178 } \
179 } while (_r == -EINTR); \
180 \
Kent Overstreet78365412013-12-17 01:29:34 -0800181 finish_wait(&(c)->bucket_wait, &(op)->wait); \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700182 _r; \
183})
184
Kent Overstreetcafe5632013-03-23 16:11:31 -0700185/* Btree key manipulation */
186
Kent Overstreet3a3b6a42013-07-24 16:46:42 -0700187void bkey_put(struct cache_set *c, struct bkey *k)
Kent Overstreete7c590e2013-09-10 18:39:16 -0700188{
189 unsigned i;
190
191 for (i = 0; i < KEY_PTRS(k); i++)
192 if (ptr_available(c, k, i))
193 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
194}
195
Kent Overstreetcafe5632013-03-23 16:11:31 -0700196/* Btree IO */
197
198static uint64_t btree_csum_set(struct btree *b, struct bset *i)
199{
200 uint64_t crc = b->key.ptr[0];
Kent Overstreetfafff812013-12-17 21:56:21 -0800201 void *data = (void *) i + 8, *end = bset_bkey_last(i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700202
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600203 crc = bch_crc64_update(crc, data, end - data);
Kent Overstreetc19ed232013-03-26 13:49:02 -0700204 return crc ^ 0xffffffffffffffffULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700205}
206
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800207void bch_btree_node_read_done(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700208{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700209 const char *err = "bad btree header";
Kent Overstreetee811282013-12-17 23:49:49 -0800210 struct bset *i = btree_bset_first(b);
Kent Overstreet57943512013-04-25 13:58:35 -0700211 struct btree_iter *iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700212
Kent Overstreet57943512013-04-25 13:58:35 -0700213 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
214 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700215 iter->used = 0;
216
Kent Overstreet280481d2013-10-24 16:36:03 -0700217#ifdef CONFIG_BCACHE_DEBUG
218 iter->b = b;
219#endif
220
Kent Overstreet57943512013-04-25 13:58:35 -0700221 if (!i->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700222 goto err;
223
224 for (;
225 b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
226 i = write_block(b)) {
227 err = "unsupported bset version";
228 if (i->version > BCACHE_BSET_VERSION)
229 goto err;
230
231 err = "bad btree header";
Kent Overstreetee811282013-12-17 23:49:49 -0800232 if (b->written + set_blocks(i, block_bytes(b->c)) >
233 btree_blocks(b))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700234 goto err;
235
236 err = "bad magic";
Kent Overstreet81ab4192013-10-31 15:46:42 -0700237 if (i->magic != bset_magic(&b->c->sb))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700238 goto err;
239
240 err = "bad checksum";
241 switch (i->version) {
242 case 0:
243 if (i->csum != csum_set(i))
244 goto err;
245 break;
246 case BCACHE_BSET_VERSION:
247 if (i->csum != btree_csum_set(b, i))
248 goto err;
249 break;
250 }
251
252 err = "empty set";
253 if (i != b->sets[0].data && !i->keys)
254 goto err;
255
Kent Overstreetfafff812013-12-17 21:56:21 -0800256 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700257
Kent Overstreetee811282013-12-17 23:49:49 -0800258 b->written += set_blocks(i, block_bytes(b->c));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700259 }
260
261 err = "corrupted btree";
262 for (i = write_block(b);
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800263 bset_sector_offset(b, i) < KEY_SIZE(&b->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700264 i = ((void *) i) + block_bytes(b->c))
265 if (i->seq == b->sets[0].data->seq)
266 goto err;
267
Kent Overstreet67539e82013-09-10 22:53:34 -0700268 bch_btree_sort_and_fix_extents(b, iter, &b->c->sort);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700269
270 i = b->sets[0].data;
271 err = "short btree key";
272 if (b->sets[0].size &&
273 bkey_cmp(&b->key, &b->sets[0].end) < 0)
274 goto err;
275
276 if (b->written < btree_blocks(b))
Kent Overstreetee811282013-12-17 23:49:49 -0800277 bch_bset_init_next(b, write_block(b), bset_magic(&b->c->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700278out:
Kent Overstreet57943512013-04-25 13:58:35 -0700279 mempool_free(iter, b->c->fill_iter);
280 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700281err:
282 set_btree_node_io_error(b);
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800283 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
Kent Overstreetcafe5632013-03-23 16:11:31 -0700284 err, PTR_BUCKET_NR(b->c, &b->key, 0),
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800285 bset_block_offset(b, i), i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700286 goto out;
287}
288
Kent Overstreet57943512013-04-25 13:58:35 -0700289static void btree_node_read_endio(struct bio *bio, int error)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700290{
Kent Overstreet57943512013-04-25 13:58:35 -0700291 struct closure *cl = bio->bi_private;
292 closure_put(cl);
293}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700294
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800295static void bch_btree_node_read(struct btree *b)
Kent Overstreet57943512013-04-25 13:58:35 -0700296{
297 uint64_t start_time = local_clock();
298 struct closure cl;
299 struct bio *bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700300
Kent Overstreetc37511b2013-04-26 15:39:55 -0700301 trace_bcache_btree_read(b);
302
Kent Overstreet57943512013-04-25 13:58:35 -0700303 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700304
Kent Overstreet57943512013-04-25 13:58:35 -0700305 bio = bch_bbio_alloc(b->c);
306 bio->bi_rw = REQ_META|READ_SYNC;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700307 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
Kent Overstreet57943512013-04-25 13:58:35 -0700308 bio->bi_end_io = btree_node_read_endio;
309 bio->bi_private = &cl;
310
311 bch_bio_map(bio, b->sets[0].data);
312
Kent Overstreet57943512013-04-25 13:58:35 -0700313 bch_submit_bbio(bio, b->c, &b->key, 0);
314 closure_sync(&cl);
315
316 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
317 set_btree_node_io_error(b);
318
319 bch_bbio_free(bio, b->c);
320
321 if (btree_node_io_error(b))
322 goto err;
323
324 bch_btree_node_read_done(b);
Kent Overstreet57943512013-04-25 13:58:35 -0700325 bch_time_stats_update(&b->c->btree_read_time, start_time);
Kent Overstreet57943512013-04-25 13:58:35 -0700326
327 return;
328err:
Geert Uytterhoeven61cbd252013-09-23 23:17:30 -0700329 bch_cache_set_error(b->c, "io error reading bucket %zu",
Kent Overstreet57943512013-04-25 13:58:35 -0700330 PTR_BUCKET_NR(b->c, &b->key, 0));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700331}
332
333static void btree_complete_write(struct btree *b, struct btree_write *w)
334{
335 if (w->prio_blocked &&
336 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700337 wake_up_allocators(b->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700338
339 if (w->journal) {
340 atomic_dec_bug(w->journal);
341 __closure_wake_up(&b->c->journal.wait);
342 }
343
Kent Overstreetcafe5632013-03-23 16:11:31 -0700344 w->prio_blocked = 0;
345 w->journal = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700346}
347
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800348static void btree_node_write_unlock(struct closure *cl)
349{
350 struct btree *b = container_of(cl, struct btree, io);
351
352 up(&b->io_mutex);
353}
354
Kent Overstreet57943512013-04-25 13:58:35 -0700355static void __btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700356{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800357 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700358 struct btree_write *w = btree_prev_write(b);
359
360 bch_bbio_free(b->bio, b->c);
361 b->bio = NULL;
362 btree_complete_write(b, w);
363
364 if (btree_node_dirty(b))
365 queue_delayed_work(btree_io_wq, &b->work,
366 msecs_to_jiffies(30000));
367
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800368 closure_return_with_destructor(cl, btree_node_write_unlock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700369}
370
Kent Overstreet57943512013-04-25 13:58:35 -0700371static void btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700372{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800373 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700374 struct bio_vec *bv;
375 int n;
376
Kent Overstreet79886132013-11-23 17:19:00 -0800377 bio_for_each_segment_all(bv, b->bio, n)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700378 __free_page(bv->bv_page);
379
Kent Overstreet57943512013-04-25 13:58:35 -0700380 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700381}
382
Kent Overstreet57943512013-04-25 13:58:35 -0700383static void btree_node_write_endio(struct bio *bio, int error)
384{
385 struct closure *cl = bio->bi_private;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800386 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreet57943512013-04-25 13:58:35 -0700387
388 if (error)
389 set_btree_node_io_error(b);
390
391 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
392 closure_put(cl);
393}
394
395static void do_btree_node_write(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700396{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800397 struct closure *cl = &b->io;
Kent Overstreetee811282013-12-17 23:49:49 -0800398 struct bset *i = btree_bset_last(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700399 BKEY_PADDED(key) k;
400
401 i->version = BCACHE_BSET_VERSION;
402 i->csum = btree_csum_set(b, i);
403
Kent Overstreet57943512013-04-25 13:58:35 -0700404 BUG_ON(b->bio);
405 b->bio = bch_bbio_alloc(b->c);
406
407 b->bio->bi_end_io = btree_node_write_endio;
Kent Overstreetfaadf0c2013-11-01 18:03:08 -0700408 b->bio->bi_private = cl;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700409 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
Kent Overstreetee811282013-12-17 23:49:49 -0800410 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600411 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700412
Kent Overstreete49c7c32013-06-26 17:25:38 -0700413 /*
414 * If we're appending to a leaf node, we don't technically need FUA -
415 * this write just needs to be persisted before the next journal write,
416 * which will be marked FLUSH|FUA.
417 *
418 * Similarly if we're writing a new btree root - the pointer is going to
419 * be in the next journal entry.
420 *
421 * But if we're writing a new btree node (that isn't a root) or
422 * appending to a non leaf btree node, we need either FUA or a flush
423 * when we write the parent with the new pointer. FUA is cheaper than a
424 * flush, and writes appending to leaf nodes aren't blocking anything so
425 * just make all btree node writes FUA to keep things sane.
426 */
427
Kent Overstreetcafe5632013-03-23 16:11:31 -0700428 bkey_copy(&k.key, &b->key);
Kent Overstreetee811282013-12-17 23:49:49 -0800429 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
430 bset_sector_offset(b, i));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700431
Kent Overstreet8e51e412013-06-06 18:15:57 -0700432 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700433 int j;
434 struct bio_vec *bv;
435 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
436
Kent Overstreet79886132013-11-23 17:19:00 -0800437 bio_for_each_segment_all(bv, b->bio, j)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700438 memcpy(page_address(bv->bv_page),
439 base + j * PAGE_SIZE, PAGE_SIZE);
440
Kent Overstreetcafe5632013-03-23 16:11:31 -0700441 bch_submit_bbio(b->bio, b->c, &k.key, 0);
442
Kent Overstreet57943512013-04-25 13:58:35 -0700443 continue_at(cl, btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700444 } else {
445 b->bio->bi_vcnt = 0;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600446 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700447
Kent Overstreetcafe5632013-03-23 16:11:31 -0700448 bch_submit_bbio(b->bio, b->c, &k.key, 0);
449
450 closure_sync(cl);
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800451 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700452 }
453}
454
Kent Overstreet57943512013-04-25 13:58:35 -0700455void bch_btree_node_write(struct btree *b, struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700456{
Kent Overstreetee811282013-12-17 23:49:49 -0800457 struct bset *i = btree_bset_last(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700458
Kent Overstreetc37511b2013-04-26 15:39:55 -0700459 trace_bcache_btree_write(b);
460
Kent Overstreetcafe5632013-03-23 16:11:31 -0700461 BUG_ON(current->bio_list);
Kent Overstreet57943512013-04-25 13:58:35 -0700462 BUG_ON(b->written >= btree_blocks(b));
463 BUG_ON(b->written && !i->keys);
Kent Overstreetee811282013-12-17 23:49:49 -0800464 BUG_ON(btree_bset_first(b)->seq != i->seq);
Kent Overstreet280481d2013-10-24 16:36:03 -0700465 bch_check_keys(b, "writing");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700466
Kent Overstreetcafe5632013-03-23 16:11:31 -0700467 cancel_delayed_work(&b->work);
468
Kent Overstreet57943512013-04-25 13:58:35 -0700469 /* If caller isn't waiting for write, parent refcount is cache set */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800470 down(&b->io_mutex);
471 closure_init(&b->io, parent ?: &b->c->cl);
Kent Overstreet57943512013-04-25 13:58:35 -0700472
Kent Overstreetcafe5632013-03-23 16:11:31 -0700473 clear_bit(BTREE_NODE_dirty, &b->flags);
474 change_bit(BTREE_NODE_write_idx, &b->flags);
475
Kent Overstreet57943512013-04-25 13:58:35 -0700476 do_btree_node_write(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700477
Kent Overstreetee811282013-12-17 23:49:49 -0800478 b->written += set_blocks(i, block_bytes(b->c));
479 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700480 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
481
Kent Overstreet67539e82013-09-10 22:53:34 -0700482 /* If not a leaf node, always sort */
483 if (b->level && b->nsets)
484 bch_btree_sort(b, &b->c->sort);
485 else
486 bch_btree_sort_lazy(b, &b->c->sort);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700487
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800488 /*
489 * do verify if there was more than one set initially (i.e. we did a
490 * sort) and we sorted down to a single set:
491 */
492 if (i != b->sets->data && !b->nsets)
493 bch_btree_verify(b);
494
Kent Overstreetcafe5632013-03-23 16:11:31 -0700495 if (b->written < btree_blocks(b))
Kent Overstreetee811282013-12-17 23:49:49 -0800496 bch_bset_init_next(b, write_block(b), bset_magic(&b->c->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700497}
498
Kent Overstreetf269af52013-07-23 20:48:29 -0700499static void bch_btree_node_write_sync(struct btree *b)
500{
501 struct closure cl;
502
503 closure_init_stack(&cl);
504 bch_btree_node_write(b, &cl);
505 closure_sync(&cl);
506}
507
Kent Overstreet57943512013-04-25 13:58:35 -0700508static void btree_node_write_work(struct work_struct *w)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700509{
510 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
511
Kent Overstreet57943512013-04-25 13:58:35 -0700512 rw_lock(true, b, b->level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700513
514 if (btree_node_dirty(b))
Kent Overstreet57943512013-04-25 13:58:35 -0700515 bch_btree_node_write(b, NULL);
516 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700517}
518
Kent Overstreetc18536a2013-07-24 17:44:17 -0700519static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700520{
Kent Overstreetee811282013-12-17 23:49:49 -0800521 struct bset *i = btree_bset_last(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700522 struct btree_write *w = btree_current_write(b);
523
Kent Overstreet57943512013-04-25 13:58:35 -0700524 BUG_ON(!b->written);
525 BUG_ON(!i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700526
Kent Overstreet57943512013-04-25 13:58:35 -0700527 if (!btree_node_dirty(b))
528 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700529
Kent Overstreet57943512013-04-25 13:58:35 -0700530 set_btree_node_dirty(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700531
Kent Overstreetc18536a2013-07-24 17:44:17 -0700532 if (journal_ref) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700533 if (w->journal &&
Kent Overstreetc18536a2013-07-24 17:44:17 -0700534 journal_pin_cmp(b->c, w->journal, journal_ref)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700535 atomic_dec_bug(w->journal);
536 w->journal = NULL;
537 }
538
539 if (!w->journal) {
Kent Overstreetc18536a2013-07-24 17:44:17 -0700540 w->journal = journal_ref;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700541 atomic_inc(w->journal);
542 }
543 }
544
Kent Overstreetcafe5632013-03-23 16:11:31 -0700545 /* Force write if set is too big */
Kent Overstreet57943512013-04-25 13:58:35 -0700546 if (set_bytes(i) > PAGE_SIZE - 48 &&
547 !current->bio_list)
548 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700549}
550
551/*
552 * Btree in memory cache - allocation/freeing
553 * mca -> memory cache
554 */
555
556static void mca_reinit(struct btree *b)
557{
558 unsigned i;
559
560 b->flags = 0;
561 b->written = 0;
562 b->nsets = 0;
563
564 for (i = 0; i < MAX_BSETS; i++)
565 b->sets[i].size = 0;
566 /*
567 * Second loop starts at 1 because b->sets[0]->data is the memory we
568 * allocated
569 */
570 for (i = 1; i < MAX_BSETS; i++)
571 b->sets[i].data = NULL;
572}
573
574#define mca_reserve(c) (((c->root && c->root->level) \
575 ? c->root->level : 1) * 8 + 16)
576#define mca_can_free(c) \
577 max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
578
579static void mca_data_free(struct btree *b)
580{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800581 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700582
Kent Overstreetee811282013-12-17 23:49:49 -0800583 bch_btree_keys_free(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700584
Kent Overstreetcafe5632013-03-23 16:11:31 -0700585 b->c->bucket_cache_used--;
Kent Overstreetee811282013-12-17 23:49:49 -0800586 list_move(&b->list, &b->c->btree_cache_freed);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700587}
588
589static void mca_bucket_free(struct btree *b)
590{
591 BUG_ON(btree_node_dirty(b));
592
593 b->key.ptr[0] = 0;
594 hlist_del_init_rcu(&b->hash);
595 list_move(&b->list, &b->c->btree_cache_freeable);
596}
597
598static unsigned btree_order(struct bkey *k)
599{
600 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
601}
602
603static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
604{
Kent Overstreetee811282013-12-17 23:49:49 -0800605 if (!bch_btree_keys_alloc(b,
606 max_t(unsigned,
607 ilog2(b->c->btree_pages),
608 btree_order(k)),
609 gfp)) {
610 b->c->bucket_cache_used++;
611 list_move(&b->list, &b->c->btree_cache);
612 } else {
613 list_move(&b->list, &b->c->btree_cache_freed);
614 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700615}
616
617static struct btree *mca_bucket_alloc(struct cache_set *c,
618 struct bkey *k, gfp_t gfp)
619{
620 struct btree *b = kzalloc(sizeof(struct btree), gfp);
621 if (!b)
622 return NULL;
623
624 init_rwsem(&b->lock);
625 lockdep_set_novalidate_class(&b->lock);
626 INIT_LIST_HEAD(&b->list);
Kent Overstreet57943512013-04-25 13:58:35 -0700627 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700628 b->c = c;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800629 sema_init(&b->io_mutex, 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700630
631 mca_data_alloc(b, k, gfp);
632 return b;
633}
634
Kent Overstreete8e1d462013-07-24 17:27:07 -0700635static int mca_reap(struct btree *b, unsigned min_order, bool flush)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700636{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700637 struct closure cl;
638
639 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700640 lockdep_assert_held(&b->c->bucket_lock);
641
642 if (!down_write_trylock(&b->lock))
643 return -ENOMEM;
644
Kent Overstreete8e1d462013-07-24 17:27:07 -0700645 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
646
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800647 if (b->page_order < min_order)
648 goto out_unlock;
649
650 if (!flush) {
651 if (btree_node_dirty(b))
652 goto out_unlock;
653
654 if (down_trylock(&b->io_mutex))
655 goto out_unlock;
656 up(&b->io_mutex);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700657 }
658
Kent Overstreetf269af52013-07-23 20:48:29 -0700659 if (btree_node_dirty(b))
660 bch_btree_node_write_sync(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700661
Kent Overstreete8e1d462013-07-24 17:27:07 -0700662 /* wait for any in flight btree write */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800663 down(&b->io_mutex);
664 up(&b->io_mutex);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700665
Kent Overstreetcafe5632013-03-23 16:11:31 -0700666 return 0;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800667out_unlock:
668 rw_unlock(true, b);
669 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700670}
671
Dave Chinner7dc19d52013-08-28 10:18:11 +1000672static unsigned long bch_mca_scan(struct shrinker *shrink,
673 struct shrink_control *sc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700674{
675 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
676 struct btree *b, *t;
677 unsigned long i, nr = sc->nr_to_scan;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000678 unsigned long freed = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700679
680 if (c->shrinker_disabled)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000681 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700682
683 if (c->try_harder)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000684 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700685
686 /* Return -1 if we can't do anything right now */
Kent Overstreeta698e082013-09-23 23:17:34 -0700687 if (sc->gfp_mask & __GFP_IO)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700688 mutex_lock(&c->bucket_lock);
689 else if (!mutex_trylock(&c->bucket_lock))
690 return -1;
691
Kent Overstreet36c9ea92013-06-03 13:04:56 -0700692 /*
693 * It's _really_ critical that we don't free too many btree nodes - we
694 * have to always leave ourselves a reserve. The reserve is how we
695 * guarantee that allocating memory for a new btree node can always
696 * succeed, so that inserting keys into the btree can always succeed and
697 * IO can always make forward progress:
698 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700699 nr /= c->btree_pages;
700 nr = min_t(unsigned long, nr, mca_can_free(c));
701
702 i = 0;
703 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
Dave Chinner7dc19d52013-08-28 10:18:11 +1000704 if (freed >= nr)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700705 break;
706
707 if (++i > 3 &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700708 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700709 mca_data_free(b);
710 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000711 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700712 }
713 }
714
Dave Chinner7dc19d52013-08-28 10:18:11 +1000715 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
Kent Overstreetb0f32a52013-12-10 13:24:26 -0800716 if (list_empty(&c->btree_cache))
717 goto out;
718
Kent Overstreetcafe5632013-03-23 16:11:31 -0700719 b = list_first_entry(&c->btree_cache, struct btree, list);
720 list_rotate_left(&c->btree_cache);
721
722 if (!b->accessed &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700723 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700724 mca_bucket_free(b);
725 mca_data_free(b);
726 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000727 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700728 } else
729 b->accessed = 0;
730 }
731out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700732 mutex_unlock(&c->bucket_lock);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000733 return freed;
734}
735
736static unsigned long bch_mca_count(struct shrinker *shrink,
737 struct shrink_control *sc)
738{
739 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
740
741 if (c->shrinker_disabled)
742 return 0;
743
744 if (c->try_harder)
745 return 0;
746
747 return mca_can_free(c) * c->btree_pages;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700748}
749
750void bch_btree_cache_free(struct cache_set *c)
751{
752 struct btree *b;
753 struct closure cl;
754 closure_init_stack(&cl);
755
756 if (c->shrink.list.next)
757 unregister_shrinker(&c->shrink);
758
759 mutex_lock(&c->bucket_lock);
760
761#ifdef CONFIG_BCACHE_DEBUG
762 if (c->verify_data)
763 list_move(&c->verify_data->list, &c->btree_cache);
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800764
765 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700766#endif
767
768 list_splice(&c->btree_cache_freeable,
769 &c->btree_cache);
770
771 while (!list_empty(&c->btree_cache)) {
772 b = list_first_entry(&c->btree_cache, struct btree, list);
773
774 if (btree_node_dirty(b))
775 btree_complete_write(b, btree_current_write(b));
776 clear_bit(BTREE_NODE_dirty, &b->flags);
777
778 mca_data_free(b);
779 }
780
781 while (!list_empty(&c->btree_cache_freed)) {
782 b = list_first_entry(&c->btree_cache_freed,
783 struct btree, list);
784 list_del(&b->list);
785 cancel_delayed_work_sync(&b->work);
786 kfree(b);
787 }
788
789 mutex_unlock(&c->bucket_lock);
790}
791
792int bch_btree_cache_alloc(struct cache_set *c)
793{
794 unsigned i;
795
Kent Overstreetcafe5632013-03-23 16:11:31 -0700796 for (i = 0; i < mca_reserve(c); i++)
Kent Overstreet72a44512013-10-24 17:19:26 -0700797 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
798 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700799
800 list_splice_init(&c->btree_cache,
801 &c->btree_cache_freeable);
802
803#ifdef CONFIG_BCACHE_DEBUG
804 mutex_init(&c->verify_lock);
805
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800806 c->verify_ondisk = (void *)
807 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
808
Kent Overstreetcafe5632013-03-23 16:11:31 -0700809 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
810
811 if (c->verify_data &&
812 c->verify_data->sets[0].data)
813 list_del_init(&c->verify_data->list);
814 else
815 c->verify_data = NULL;
816#endif
817
Dave Chinner7dc19d52013-08-28 10:18:11 +1000818 c->shrink.count_objects = bch_mca_count;
819 c->shrink.scan_objects = bch_mca_scan;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700820 c->shrink.seeks = 4;
821 c->shrink.batch = c->btree_pages * 2;
822 register_shrinker(&c->shrink);
823
824 return 0;
825}
826
827/* Btree in memory cache - hash table */
828
829static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
830{
831 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
832}
833
834static struct btree *mca_find(struct cache_set *c, struct bkey *k)
835{
836 struct btree *b;
837
838 rcu_read_lock();
839 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
840 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
841 goto out;
842 b = NULL;
843out:
844 rcu_read_unlock();
845 return b;
846}
847
Kent Overstreete8e1d462013-07-24 17:27:07 -0700848static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700849{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700850 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700851
Kent Overstreetc37511b2013-04-26 15:39:55 -0700852 trace_bcache_btree_cache_cannibalize(c);
853
Kent Overstreete8e1d462013-07-24 17:27:07 -0700854 if (!c->try_harder) {
855 c->try_harder = current;
856 c->try_harder_start = local_clock();
857 } else if (c->try_harder != current)
858 return ERR_PTR(-ENOSPC);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700859
Kent Overstreete8e1d462013-07-24 17:27:07 -0700860 list_for_each_entry_reverse(b, &c->btree_cache, list)
861 if (!mca_reap(b, btree_order(k), false))
862 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700863
Kent Overstreete8e1d462013-07-24 17:27:07 -0700864 list_for_each_entry_reverse(b, &c->btree_cache, list)
865 if (!mca_reap(b, btree_order(k), true))
866 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700867
Kent Overstreete8e1d462013-07-24 17:27:07 -0700868 return ERR_PTR(-ENOMEM);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700869}
870
871/*
872 * We can only have one thread cannibalizing other cached btree nodes at a time,
873 * or we'll deadlock. We use an open coded mutex to ensure that, which a
874 * cannibalize_bucket() will take. This means every time we unlock the root of
875 * the btree, we need to release this lock if we have it held.
876 */
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700877static void bch_cannibalize_unlock(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700878{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700879 if (c->try_harder == current) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600880 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700881 c->try_harder = NULL;
Kent Overstreete8e1d462013-07-24 17:27:07 -0700882 wake_up(&c->try_wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700883 }
884}
885
Kent Overstreete8e1d462013-07-24 17:27:07 -0700886static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700887{
888 struct btree *b;
889
Kent Overstreete8e1d462013-07-24 17:27:07 -0700890 BUG_ON(current->bio_list);
891
Kent Overstreetcafe5632013-03-23 16:11:31 -0700892 lockdep_assert_held(&c->bucket_lock);
893
894 if (mca_find(c, k))
895 return NULL;
896
897 /* btree_free() doesn't free memory; it sticks the node on the end of
898 * the list. Check if there's any freed nodes there:
899 */
900 list_for_each_entry(b, &c->btree_cache_freeable, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700901 if (!mca_reap(b, btree_order(k), false))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700902 goto out;
903
904 /* We never free struct btree itself, just the memory that holds the on
905 * disk node. Check the freed list before allocating a new one:
906 */
907 list_for_each_entry(b, &c->btree_cache_freed, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700908 if (!mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700909 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
910 if (!b->sets[0].data)
911 goto err;
912 else
913 goto out;
914 }
915
916 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
917 if (!b)
918 goto err;
919
920 BUG_ON(!down_write_trylock(&b->lock));
921 if (!b->sets->data)
922 goto err;
923out:
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800924 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700925
926 bkey_copy(&b->key, k);
927 list_move(&b->list, &c->btree_cache);
928 hlist_del_init_rcu(&b->hash);
929 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
930
931 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
932 b->level = level;
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700933 b->parent = (void *) ~0UL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700934
Kent Overstreet65d45232013-12-20 17:22:05 -0800935 if (!b->level)
936 b->ops = &bch_extent_keys_ops;
937 else
938 b->ops = &bch_btree_keys_ops;
939
Kent Overstreetcafe5632013-03-23 16:11:31 -0700940 mca_reinit(b);
941
942 return b;
943err:
944 if (b)
945 rw_unlock(true, b);
946
Kent Overstreete8e1d462013-07-24 17:27:07 -0700947 b = mca_cannibalize(c, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700948 if (!IS_ERR(b))
949 goto out;
950
951 return b;
952}
953
954/**
955 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
956 * in from disk if necessary.
957 *
Kent Overstreetb54d6932013-07-24 18:04:18 -0700958 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
Kent Overstreetcafe5632013-03-23 16:11:31 -0700959 *
960 * The btree node will have either a read or a write lock held, depending on
961 * level and op->lock.
962 */
963struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
Kent Overstreete8e1d462013-07-24 17:27:07 -0700964 int level, bool write)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700965{
966 int i = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700967 struct btree *b;
968
969 BUG_ON(level < 0);
970retry:
971 b = mca_find(c, k);
972
973 if (!b) {
Kent Overstreet57943512013-04-25 13:58:35 -0700974 if (current->bio_list)
975 return ERR_PTR(-EAGAIN);
976
Kent Overstreetcafe5632013-03-23 16:11:31 -0700977 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700978 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700979 mutex_unlock(&c->bucket_lock);
980
981 if (!b)
982 goto retry;
983 if (IS_ERR(b))
984 return b;
985
Kent Overstreet57943512013-04-25 13:58:35 -0700986 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700987
988 if (!write)
989 downgrade_write(&b->lock);
990 } else {
991 rw_lock(write, b, level);
992 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
993 rw_unlock(write, b);
994 goto retry;
995 }
996 BUG_ON(b->level != level);
997 }
998
999 b->accessed = 1;
1000
1001 for (; i <= b->nsets && b->sets[i].size; i++) {
1002 prefetch(b->sets[i].tree);
1003 prefetch(b->sets[i].data);
1004 }
1005
1006 for (; i <= b->nsets; i++)
1007 prefetch(b->sets[i].data);
1008
Kent Overstreet57943512013-04-25 13:58:35 -07001009 if (btree_node_io_error(b)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001010 rw_unlock(write, b);
Kent Overstreet57943512013-04-25 13:58:35 -07001011 return ERR_PTR(-EIO);
1012 }
1013
1014 BUG_ON(!b->written);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001015
1016 return b;
1017}
1018
1019static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
1020{
1021 struct btree *b;
1022
1023 mutex_lock(&c->bucket_lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001024 b = mca_alloc(c, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001025 mutex_unlock(&c->bucket_lock);
1026
1027 if (!IS_ERR_OR_NULL(b)) {
Kent Overstreet57943512013-04-25 13:58:35 -07001028 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001029 rw_unlock(true, b);
1030 }
1031}
1032
1033/* Btree alloc */
1034
Kent Overstreete8e1d462013-07-24 17:27:07 -07001035static void btree_node_free(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001036{
1037 unsigned i;
1038
Kent Overstreetc37511b2013-04-26 15:39:55 -07001039 trace_bcache_btree_node_free(b);
1040
Kent Overstreetcafe5632013-03-23 16:11:31 -07001041 BUG_ON(b == b->c->root);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001042
1043 if (btree_node_dirty(b))
1044 btree_complete_write(b, btree_current_write(b));
1045 clear_bit(BTREE_NODE_dirty, &b->flags);
1046
Kent Overstreetcafe5632013-03-23 16:11:31 -07001047 cancel_delayed_work(&b->work);
1048
1049 mutex_lock(&b->c->bucket_lock);
1050
1051 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1052 BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
1053
1054 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1055 PTR_BUCKET(b->c, &b->key, i));
1056 }
1057
1058 bch_bucket_free(b->c, &b->key);
1059 mca_bucket_free(b);
1060 mutex_unlock(&b->c->bucket_lock);
1061}
1062
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001063struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001064{
1065 BKEY_PADDED(key) k;
1066 struct btree *b = ERR_PTR(-EAGAIN);
1067
1068 mutex_lock(&c->bucket_lock);
1069retry:
Kent Overstreet78365412013-12-17 01:29:34 -08001070 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001071 goto err;
1072
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07001073 bkey_put(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001074 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1075
Kent Overstreete8e1d462013-07-24 17:27:07 -07001076 b = mca_alloc(c, &k.key, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001077 if (IS_ERR(b))
1078 goto err_free;
1079
1080 if (!b) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001081 cache_bug(c,
1082 "Tried to allocate bucket that was in btree cache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001083 goto retry;
1084 }
1085
Kent Overstreetcafe5632013-03-23 16:11:31 -07001086 b->accessed = 1;
Kent Overstreetee811282013-12-17 23:49:49 -08001087 bch_bset_init_next(b, b->sets->data, bset_magic(&b->c->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001088
1089 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001090
1091 trace_bcache_btree_node_alloc(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001092 return b;
1093err_free:
1094 bch_bucket_free(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001095err:
1096 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001097
1098 trace_bcache_btree_node_alloc_fail(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001099 return b;
1100}
1101
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001102static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001103{
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001104 struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
Kent Overstreet67539e82013-09-10 22:53:34 -07001105 if (!IS_ERR_OR_NULL(n)) {
1106 bch_btree_sort_into(b, n, &b->c->sort);
1107 bkey_copy_key(&n->key, &b->key);
1108 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001109
1110 return n;
1111}
1112
Kent Overstreet8835c122013-07-24 23:18:05 -07001113static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1114{
1115 unsigned i;
1116
1117 bkey_copy(k, &b->key);
1118 bkey_copy_key(k, &ZERO_KEY);
1119
1120 for (i = 0; i < KEY_PTRS(k); i++) {
1121 uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
1122
1123 SET_PTR_GEN(k, i, g);
1124 }
1125
1126 atomic_inc(&b->c->prio_blocked);
1127}
1128
Kent Overstreet78365412013-12-17 01:29:34 -08001129static int btree_check_reserve(struct btree *b, struct btree_op *op)
1130{
1131 struct cache_set *c = b->c;
1132 struct cache *ca;
1133 unsigned i, reserve = c->root->level * 2 + 1;
1134 int ret = 0;
1135
1136 mutex_lock(&c->bucket_lock);
1137
1138 for_each_cache(ca, c, i)
1139 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1140 if (op)
1141 prepare_to_wait(&c->bucket_wait, &op->wait,
1142 TASK_UNINTERRUPTIBLE);
1143 ret = -EINTR;
1144 break;
1145 }
1146
1147 mutex_unlock(&c->bucket_lock);
1148 return ret;
1149}
1150
Kent Overstreetcafe5632013-03-23 16:11:31 -07001151/* Garbage collection */
1152
1153uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1154{
1155 uint8_t stale = 0;
1156 unsigned i;
1157 struct bucket *g;
1158
1159 /*
1160 * ptr_invalid() can't return true for the keys that mark btree nodes as
1161 * freed, but since ptr_bad() returns true we'll never actually use them
1162 * for anything and thus we don't want mark their pointers here
1163 */
1164 if (!bkey_cmp(k, &ZERO_KEY))
1165 return stale;
1166
1167 for (i = 0; i < KEY_PTRS(k); i++) {
1168 if (!ptr_available(c, k, i))
1169 continue;
1170
1171 g = PTR_BUCKET(c, k, i);
1172
1173 if (gen_after(g->gc_gen, PTR_GEN(k, i)))
1174 g->gc_gen = PTR_GEN(k, i);
1175
1176 if (ptr_stale(c, k, i)) {
1177 stale = max(stale, ptr_stale(c, k, i));
1178 continue;
1179 }
1180
1181 cache_bug_on(GC_MARK(g) &&
1182 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1183 c, "inconsistent ptrs: mark = %llu, level = %i",
1184 GC_MARK(g), level);
1185
1186 if (level)
1187 SET_GC_MARK(g, GC_MARK_METADATA);
1188 else if (KEY_DIRTY(k))
1189 SET_GC_MARK(g, GC_MARK_DIRTY);
1190
1191 /* guard against overflow */
1192 SET_GC_SECTORS_USED(g, min_t(unsigned,
1193 GC_SECTORS_USED(g) + KEY_SIZE(k),
1194 (1 << 14) - 1));
1195
1196 BUG_ON(!GC_SECTORS_USED(g));
1197 }
1198
1199 return stale;
1200}
1201
1202#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1203
Kent Overstreeta1f03582013-09-10 19:07:00 -07001204static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001205{
1206 uint8_t stale = 0;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001207 unsigned keys = 0, good_keys = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001208 struct bkey *k;
1209 struct btree_iter iter;
1210 struct bset_tree *t;
1211
1212 gc->nodes++;
1213
1214 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001215 stale = max(stale, btree_mark_key(b, k));
Kent Overstreeta1f03582013-09-10 19:07:00 -07001216 keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001217
1218 if (bch_ptr_bad(b, k))
1219 continue;
1220
Kent Overstreetcafe5632013-03-23 16:11:31 -07001221 gc->key_bytes += bkey_u64s(k);
1222 gc->nkeys++;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001223 good_keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001224
1225 gc->data += KEY_SIZE(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001226 }
1227
1228 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
1229 btree_bug_on(t->size &&
1230 bset_written(b, t) &&
1231 bkey_cmp(&b->key, &t->end) < 0,
1232 b, "found short btree key in gc");
1233
Kent Overstreeta1f03582013-09-10 19:07:00 -07001234 if (b->c->gc_always_rewrite)
1235 return true;
1236
1237 if (stale > 10)
1238 return true;
1239
1240 if ((keys - good_keys) * 2 > keys)
1241 return true;
1242
1243 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001244}
1245
Kent Overstreeta1f03582013-09-10 19:07:00 -07001246#define GC_MERGE_NODES 4U
Kent Overstreetcafe5632013-03-23 16:11:31 -07001247
1248struct gc_merge_info {
1249 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001250 unsigned keys;
1251};
1252
Kent Overstreeta1f03582013-09-10 19:07:00 -07001253static int bch_btree_insert_node(struct btree *, struct btree_op *,
1254 struct keylist *, atomic_t *, struct bkey *);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001255
Kent Overstreeta1f03582013-09-10 19:07:00 -07001256static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1257 struct keylist *keylist, struct gc_stat *gc,
1258 struct gc_merge_info *r)
1259{
1260 unsigned i, nodes = 0, keys = 0, blocks;
1261 struct btree *new_nodes[GC_MERGE_NODES];
1262 struct closure cl;
1263 struct bkey *k;
1264
1265 memset(new_nodes, 0, sizeof(new_nodes));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001266 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001267
Kent Overstreeta1f03582013-09-10 19:07:00 -07001268 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001269 keys += r[nodes++].keys;
1270
1271 blocks = btree_default_blocks(b->c) * 2 / 3;
1272
1273 if (nodes < 2 ||
Kent Overstreetee811282013-12-17 23:49:49 -08001274 __set_blocks(b->sets[0].data, keys,
1275 block_bytes(b->c)) > blocks * (nodes - 1))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001276 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001277
Kent Overstreeta1f03582013-09-10 19:07:00 -07001278 for (i = 0; i < nodes; i++) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001279 new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001280 if (IS_ERR_OR_NULL(new_nodes[i]))
1281 goto out_nocoalesce;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001282 }
1283
1284 for (i = nodes - 1; i > 0; --i) {
Kent Overstreetee811282013-12-17 23:49:49 -08001285 struct bset *n1 = btree_bset_first(new_nodes[i]);
1286 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001287 struct bkey *k, *last = NULL;
1288
1289 keys = 0;
1290
Kent Overstreeta1f03582013-09-10 19:07:00 -07001291 if (i > 1) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001292 for (k = n2->start;
Kent Overstreetfafff812013-12-17 21:56:21 -08001293 k < bset_bkey_last(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001294 k = bkey_next(k)) {
1295 if (__set_blocks(n1, n1->keys + keys +
Kent Overstreetee811282013-12-17 23:49:49 -08001296 bkey_u64s(k),
1297 block_bytes(b->c)) > blocks)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001298 break;
1299
1300 last = k;
1301 keys += bkey_u64s(k);
1302 }
Kent Overstreeta1f03582013-09-10 19:07:00 -07001303 } else {
1304 /*
1305 * Last node we're not getting rid of - we're getting
1306 * rid of the node at r[0]. Have to try and fit all of
1307 * the remaining keys into this node; we can't ensure
1308 * they will always fit due to rounding and variable
1309 * length keys (shouldn't be possible in practice,
1310 * though)
1311 */
1312 if (__set_blocks(n1, n1->keys + n2->keys,
Kent Overstreetee811282013-12-17 23:49:49 -08001313 block_bytes(b->c)) >
1314 btree_blocks(new_nodes[i]))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001315 goto out_nocoalesce;
1316
1317 keys = n2->keys;
1318 /* Take the key of the node we're getting rid of */
1319 last = &r->b->key;
1320 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001321
Kent Overstreetee811282013-12-17 23:49:49 -08001322 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1323 btree_blocks(new_nodes[i]));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001324
Kent Overstreeta1f03582013-09-10 19:07:00 -07001325 if (last)
1326 bkey_copy_key(&new_nodes[i]->key, last);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001327
Kent Overstreetfafff812013-12-17 21:56:21 -08001328 memcpy(bset_bkey_last(n1),
Kent Overstreetcafe5632013-03-23 16:11:31 -07001329 n2->start,
Kent Overstreetfafff812013-12-17 21:56:21 -08001330 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001331
1332 n1->keys += keys;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001333 r[i].keys = n1->keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001334
1335 memmove(n2->start,
Kent Overstreetfafff812013-12-17 21:56:21 -08001336 bset_bkey_idx(n2, keys),
1337 (void *) bset_bkey_last(n2) -
1338 (void *) bset_bkey_idx(n2, keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001339
1340 n2->keys -= keys;
1341
Kent Overstreet085d2a32013-11-11 18:20:51 -08001342 if (__bch_keylist_realloc(keylist,
1343 bkey_u64s(&new_nodes[i]->key)))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001344 goto out_nocoalesce;
1345
1346 bch_btree_node_write(new_nodes[i], &cl);
1347 bch_keylist_add(keylist, &new_nodes[i]->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001348 }
1349
Kent Overstreeta1f03582013-09-10 19:07:00 -07001350 for (i = 0; i < nodes; i++) {
Kent Overstreet085d2a32013-11-11 18:20:51 -08001351 if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001352 goto out_nocoalesce;
1353
1354 make_btree_freeing_key(r[i].b, keylist->top);
1355 bch_keylist_push(keylist);
1356 }
1357
1358 /* We emptied out this node */
Kent Overstreetee811282013-12-17 23:49:49 -08001359 BUG_ON(btree_bset_first(new_nodes[0])->keys);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001360 btree_node_free(new_nodes[0]);
1361 rw_unlock(true, new_nodes[0]);
1362
1363 closure_sync(&cl);
1364
1365 for (i = 0; i < nodes; i++) {
1366 btree_node_free(r[i].b);
1367 rw_unlock(true, r[i].b);
1368
1369 r[i].b = new_nodes[i];
1370 }
1371
1372 bch_btree_insert_node(b, op, keylist, NULL, NULL);
1373 BUG_ON(!bch_keylist_empty(keylist));
1374
1375 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1376 r[nodes - 1].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001377
Kent Overstreetc37511b2013-04-26 15:39:55 -07001378 trace_bcache_btree_gc_coalesce(nodes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001379 gc->nodes--;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001380
Kent Overstreeta1f03582013-09-10 19:07:00 -07001381 /* Invalidated our iterator */
1382 return -EINTR;
1383
1384out_nocoalesce:
1385 closure_sync(&cl);
1386
1387 while ((k = bch_keylist_pop(keylist)))
1388 if (!bkey_cmp(k, &ZERO_KEY))
1389 atomic_dec(&b->c->prio_blocked);
1390
1391 for (i = 0; i < nodes; i++)
1392 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1393 btree_node_free(new_nodes[i]);
1394 rw_unlock(true, new_nodes[i]);
1395 }
1396 return 0;
1397}
1398
1399static unsigned btree_gc_count_keys(struct btree *b)
1400{
1401 struct bkey *k;
1402 struct btree_iter iter;
1403 unsigned ret = 0;
1404
1405 for_each_key_filter(b, k, &iter, bch_ptr_bad)
1406 ret += bkey_u64s(k);
1407
1408 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001409}
1410
1411static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1412 struct closure *writes, struct gc_stat *gc)
1413{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001414 unsigned i;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001415 int ret = 0;
1416 bool should_rewrite;
1417 struct btree *n;
1418 struct bkey *k;
1419 struct keylist keys;
1420 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001421 struct gc_merge_info r[GC_MERGE_NODES];
Kent Overstreeta1f03582013-09-10 19:07:00 -07001422 struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001423
Kent Overstreeta1f03582013-09-10 19:07:00 -07001424 bch_keylist_init(&keys);
1425 bch_btree_iter_init(b, &iter, &b->c->gc_done);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001426
Kent Overstreeta1f03582013-09-10 19:07:00 -07001427 for (i = 0; i < GC_MERGE_NODES; i++)
1428 r[i].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001429
Kent Overstreeta1f03582013-09-10 19:07:00 -07001430 while (1) {
1431 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1432 if (k) {
1433 r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
1434 if (IS_ERR(r->b)) {
1435 ret = PTR_ERR(r->b);
1436 break;
1437 }
1438
1439 r->keys = btree_gc_count_keys(r->b);
1440
1441 ret = btree_gc_coalesce(b, op, &keys, gc, r);
1442 if (ret)
1443 break;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001444 }
1445
Kent Overstreeta1f03582013-09-10 19:07:00 -07001446 if (!last->b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001447 break;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001448
1449 if (!IS_ERR(last->b)) {
1450 should_rewrite = btree_gc_mark_node(last->b, gc);
Kent Overstreet78365412013-12-17 01:29:34 -08001451 if (should_rewrite &&
1452 !btree_check_reserve(b, NULL)) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001453 n = btree_node_alloc_replacement(last->b,
1454 false);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001455
1456 if (!IS_ERR_OR_NULL(n)) {
1457 bch_btree_node_write_sync(n);
1458 bch_keylist_add(&keys, &n->key);
1459
1460 make_btree_freeing_key(last->b,
1461 keys.top);
1462 bch_keylist_push(&keys);
1463
1464 btree_node_free(last->b);
1465
1466 bch_btree_insert_node(b, op, &keys,
1467 NULL, NULL);
1468 BUG_ON(!bch_keylist_empty(&keys));
1469
1470 rw_unlock(true, last->b);
1471 last->b = n;
1472
1473 /* Invalidated our iterator */
1474 ret = -EINTR;
1475 break;
1476 }
1477 }
1478
1479 if (last->b->level) {
1480 ret = btree_gc_recurse(last->b, op, writes, gc);
1481 if (ret)
1482 break;
1483 }
1484
1485 bkey_copy_key(&b->c->gc_done, &last->b->key);
1486
1487 /*
1488 * Must flush leaf nodes before gc ends, since replace
1489 * operations aren't journalled
1490 */
1491 if (btree_node_dirty(last->b))
1492 bch_btree_node_write(last->b, writes);
1493 rw_unlock(true, last->b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001494 }
1495
Kent Overstreeta1f03582013-09-10 19:07:00 -07001496 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1497 r->b = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001498
Kent Overstreetcafe5632013-03-23 16:11:31 -07001499 if (need_resched()) {
1500 ret = -EAGAIN;
1501 break;
1502 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001503 }
1504
Kent Overstreeta1f03582013-09-10 19:07:00 -07001505 for (i = 0; i < GC_MERGE_NODES; i++)
1506 if (!IS_ERR_OR_NULL(r[i].b)) {
1507 if (btree_node_dirty(r[i].b))
1508 bch_btree_node_write(r[i].b, writes);
1509 rw_unlock(true, r[i].b);
1510 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001511
Kent Overstreeta1f03582013-09-10 19:07:00 -07001512 bch_keylist_free(&keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001513
1514 return ret;
1515}
1516
1517static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1518 struct closure *writes, struct gc_stat *gc)
1519{
1520 struct btree *n = NULL;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001521 int ret = 0;
1522 bool should_rewrite;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001523
Kent Overstreeta1f03582013-09-10 19:07:00 -07001524 should_rewrite = btree_gc_mark_node(b, gc);
1525 if (should_rewrite) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07001526 n = btree_node_alloc_replacement(b, false);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001527
Kent Overstreeta1f03582013-09-10 19:07:00 -07001528 if (!IS_ERR_OR_NULL(n)) {
1529 bch_btree_node_write_sync(n);
1530 bch_btree_set_root(n);
1531 btree_node_free(b);
1532 rw_unlock(true, n);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001533
Kent Overstreeta1f03582013-09-10 19:07:00 -07001534 return -EINTR;
1535 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001536 }
1537
Kent Overstreeta1f03582013-09-10 19:07:00 -07001538 if (b->level) {
1539 ret = btree_gc_recurse(b, op, writes, gc);
1540 if (ret)
1541 return ret;
1542 }
1543
1544 bkey_copy_key(&b->c->gc_done, &b->key);
1545
Kent Overstreetcafe5632013-03-23 16:11:31 -07001546 return ret;
1547}
1548
1549static void btree_gc_start(struct cache_set *c)
1550{
1551 struct cache *ca;
1552 struct bucket *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001553 unsigned i;
1554
1555 if (!c->gc_mark_valid)
1556 return;
1557
1558 mutex_lock(&c->bucket_lock);
1559
1560 c->gc_mark_valid = 0;
1561 c->gc_done = ZERO_KEY;
1562
1563 for_each_cache(ca, c, i)
1564 for_each_bucket(b, ca) {
1565 b->gc_gen = b->gen;
Kent Overstreet29ebf462013-07-11 19:43:21 -07001566 if (!atomic_read(&b->pin)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001567 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
Kent Overstreet29ebf462013-07-11 19:43:21 -07001568 SET_GC_SECTORS_USED(b, 0);
1569 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001570 }
1571
Kent Overstreetcafe5632013-03-23 16:11:31 -07001572 mutex_unlock(&c->bucket_lock);
1573}
1574
1575size_t bch_btree_gc_finish(struct cache_set *c)
1576{
1577 size_t available = 0;
1578 struct bucket *b;
1579 struct cache *ca;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001580 unsigned i;
1581
1582 mutex_lock(&c->bucket_lock);
1583
1584 set_gc_sectors(c);
1585 c->gc_mark_valid = 1;
1586 c->need_gc = 0;
1587
1588 if (c->root)
1589 for (i = 0; i < KEY_PTRS(&c->root->key); i++)
1590 SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
1591 GC_MARK_METADATA);
1592
1593 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1594 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1595 GC_MARK_METADATA);
1596
Nicholas Swensonbf0a6282013-11-26 19:14:23 -08001597 /* don't reclaim buckets to which writeback keys point */
1598 rcu_read_lock();
1599 for (i = 0; i < c->nr_uuids; i++) {
1600 struct bcache_device *d = c->devices[i];
1601 struct cached_dev *dc;
1602 struct keybuf_key *w, *n;
1603 unsigned j;
1604
1605 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1606 continue;
1607 dc = container_of(d, struct cached_dev, disk);
1608
1609 spin_lock(&dc->writeback_keys.lock);
1610 rbtree_postorder_for_each_entry_safe(w, n,
1611 &dc->writeback_keys.keys, node)
1612 for (j = 0; j < KEY_PTRS(&w->key); j++)
1613 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1614 GC_MARK_DIRTY);
1615 spin_unlock(&dc->writeback_keys.lock);
1616 }
1617 rcu_read_unlock();
1618
Kent Overstreetcafe5632013-03-23 16:11:31 -07001619 for_each_cache(ca, c, i) {
1620 uint64_t *i;
1621
1622 ca->invalidate_needs_gc = 0;
1623
1624 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1625 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1626
1627 for (i = ca->prio_buckets;
1628 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1629 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1630
1631 for_each_bucket(b, ca) {
1632 b->last_gc = b->gc_gen;
1633 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1634
1635 if (!atomic_read(&b->pin) &&
1636 GC_MARK(b) == GC_MARK_RECLAIMABLE) {
1637 available++;
1638 if (!GC_SECTORS_USED(b))
1639 bch_bucket_add_unused(ca, b);
1640 }
1641 }
1642 }
1643
Kent Overstreetcafe5632013-03-23 16:11:31 -07001644 mutex_unlock(&c->bucket_lock);
1645 return available;
1646}
1647
Kent Overstreet72a44512013-10-24 17:19:26 -07001648static void bch_btree_gc(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001649{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001650 int ret;
1651 unsigned long available;
1652 struct gc_stat stats;
1653 struct closure writes;
1654 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001655 uint64_t start_time = local_clock();
Kent Overstreet57943512013-04-25 13:58:35 -07001656
Kent Overstreetc37511b2013-04-26 15:39:55 -07001657 trace_bcache_gc_start(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001658
1659 memset(&stats, 0, sizeof(struct gc_stat));
1660 closure_init_stack(&writes);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001661 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001662
1663 btree_gc_start(c);
1664
Kent Overstreeta1f03582013-09-10 19:07:00 -07001665 do {
1666 ret = btree_root(gc_root, c, &op, &writes, &stats);
1667 closure_sync(&writes);
Kent Overstreet57943512013-04-25 13:58:35 -07001668
Kent Overstreeta1f03582013-09-10 19:07:00 -07001669 if (ret && ret != -EAGAIN)
1670 pr_warn("gc failed!");
1671 } while (ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001672
1673 available = bch_btree_gc_finish(c);
Kent Overstreet57943512013-04-25 13:58:35 -07001674 wake_up_allocators(c);
1675
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001676 bch_time_stats_update(&c->btree_gc_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001677
1678 stats.key_bytes *= sizeof(uint64_t);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001679 stats.data <<= 9;
1680 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1681 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001682
Kent Overstreetc37511b2013-04-26 15:39:55 -07001683 trace_bcache_gc_end(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001684
Kent Overstreet72a44512013-10-24 17:19:26 -07001685 bch_moving_gc(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001686}
1687
Kent Overstreet72a44512013-10-24 17:19:26 -07001688static int bch_gc_thread(void *arg)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001689{
Kent Overstreet72a44512013-10-24 17:19:26 -07001690 struct cache_set *c = arg;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001691 struct cache *ca;
1692 unsigned i;
Kent Overstreet72a44512013-10-24 17:19:26 -07001693
1694 while (1) {
Kent Overstreeta1f03582013-09-10 19:07:00 -07001695again:
Kent Overstreet72a44512013-10-24 17:19:26 -07001696 bch_btree_gc(c);
1697
1698 set_current_state(TASK_INTERRUPTIBLE);
1699 if (kthread_should_stop())
1700 break;
1701
Kent Overstreeta1f03582013-09-10 19:07:00 -07001702 mutex_lock(&c->bucket_lock);
1703
1704 for_each_cache(ca, c, i)
1705 if (ca->invalidate_needs_gc) {
1706 mutex_unlock(&c->bucket_lock);
1707 set_current_state(TASK_RUNNING);
1708 goto again;
1709 }
1710
1711 mutex_unlock(&c->bucket_lock);
1712
Kent Overstreet72a44512013-10-24 17:19:26 -07001713 try_to_freeze();
1714 schedule();
1715 }
1716
1717 return 0;
1718}
1719
1720int bch_gc_thread_start(struct cache_set *c)
1721{
1722 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1723 if (IS_ERR(c->gc_thread))
1724 return PTR_ERR(c->gc_thread);
1725
1726 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1727 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001728}
1729
1730/* Initial partial gc */
1731
1732static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
1733 unsigned long **seen)
1734{
Kent Overstreet50310162013-09-10 17:18:59 -07001735 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001736 unsigned i;
Kent Overstreet50310162013-09-10 17:18:59 -07001737 struct bkey *k, *p = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001738 struct bucket *g;
1739 struct btree_iter iter;
1740
1741 for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
1742 for (i = 0; i < KEY_PTRS(k); i++) {
1743 if (!ptr_available(b->c, k, i))
1744 continue;
1745
1746 g = PTR_BUCKET(b->c, k, i);
1747
1748 if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
1749 seen[PTR_DEV(k, i)]) ||
1750 !ptr_stale(b->c, k, i)) {
1751 g->gen = PTR_GEN(k, i);
1752
1753 if (b->level)
1754 g->prio = BTREE_PRIO;
1755 else if (g->prio == BTREE_PRIO)
1756 g->prio = INITIAL_PRIO;
1757 }
1758 }
1759
1760 btree_mark_key(b, k);
1761 }
1762
1763 if (b->level) {
Kent Overstreet50310162013-09-10 17:18:59 -07001764 bch_btree_iter_init(b, &iter, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001765
Kent Overstreet50310162013-09-10 17:18:59 -07001766 do {
1767 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
1768 if (k)
1769 btree_node_prefetch(b->c, k, b->level - 1);
1770
Kent Overstreetcafe5632013-03-23 16:11:31 -07001771 if (p)
Kent Overstreet50310162013-09-10 17:18:59 -07001772 ret = btree(check_recurse, p, b, op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001773
Kent Overstreet50310162013-09-10 17:18:59 -07001774 p = k;
1775 } while (p && !ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001776 }
1777
1778 return 0;
1779}
1780
Kent Overstreetc18536a2013-07-24 17:44:17 -07001781int bch_btree_check(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001782{
1783 int ret = -ENOMEM;
1784 unsigned i;
1785 unsigned long *seen[MAX_CACHES_PER_SET];
Kent Overstreetc18536a2013-07-24 17:44:17 -07001786 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001787
1788 memset(seen, 0, sizeof(seen));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001789 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001790
1791 for (i = 0; c->cache[i]; i++) {
1792 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
1793 seen[i] = kmalloc(n, GFP_KERNEL);
1794 if (!seen[i])
1795 goto err;
1796
1797 /* Disables the seen array until prio_read() uses it too */
1798 memset(seen[i], 0xFF, n);
1799 }
1800
Kent Overstreetc18536a2013-07-24 17:44:17 -07001801 ret = btree_root(check_recurse, c, &op, seen);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001802err:
1803 for (i = 0; i < MAX_CACHES_PER_SET; i++)
1804 kfree(seen[i]);
1805 return ret;
1806}
1807
1808/* Btree insertion */
1809
Kent Overstreet1b207d82013-09-10 18:52:54 -07001810static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
Kent Overstreetcafe5632013-03-23 16:11:31 -07001811 struct btree_iter *iter,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001812 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001813{
Kent Overstreet279afba2013-06-05 06:21:07 -07001814 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001815 {
Kent Overstreet279afba2013-06-05 06:21:07 -07001816 if (KEY_DIRTY(k))
1817 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1818 offset, -sectors);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001819 }
1820
Kent Overstreet279afba2013-06-05 06:21:07 -07001821 uint64_t old_offset;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001822 unsigned old_size, sectors_found = 0;
1823
1824 while (1) {
1825 struct bkey *k = bch_btree_iter_next(iter);
Kent Overstreet911c9612013-07-28 18:35:09 -07001826 if (!k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001827 break;
1828
Kent Overstreet911c9612013-07-28 18:35:09 -07001829 if (bkey_cmp(&START_KEY(k), insert) >= 0) {
1830 if (KEY_SIZE(k))
1831 break;
1832 else
1833 continue;
1834 }
1835
Kent Overstreetcafe5632013-03-23 16:11:31 -07001836 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1837 continue;
1838
Kent Overstreet279afba2013-06-05 06:21:07 -07001839 old_offset = KEY_START(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001840 old_size = KEY_SIZE(k);
1841
1842 /*
1843 * We might overlap with 0 size extents; we can't skip these
1844 * because if they're in the set we're inserting to we have to
1845 * adjust them so they don't overlap with the key we're
Kent Overstreet1b207d82013-09-10 18:52:54 -07001846 * inserting. But we don't want to check them for replace
Kent Overstreetcafe5632013-03-23 16:11:31 -07001847 * operations.
1848 */
1849
Kent Overstreet1b207d82013-09-10 18:52:54 -07001850 if (replace_key && KEY_SIZE(k)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001851 /*
1852 * k might have been split since we inserted/found the
1853 * key we're replacing
1854 */
1855 unsigned i;
1856 uint64_t offset = KEY_START(k) -
Kent Overstreet1b207d82013-09-10 18:52:54 -07001857 KEY_START(replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001858
1859 /* But it must be a subset of the replace key */
Kent Overstreet1b207d82013-09-10 18:52:54 -07001860 if (KEY_START(k) < KEY_START(replace_key) ||
1861 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001862 goto check_failed;
1863
1864 /* We didn't find a key that we were supposed to */
1865 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1866 goto check_failed;
1867
Kent Overstreetd24a6e12013-11-10 21:55:27 -08001868 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1869 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001870 goto check_failed;
1871
1872 /* skip past gen */
1873 offset <<= 8;
1874
Kent Overstreet1b207d82013-09-10 18:52:54 -07001875 BUG_ON(!KEY_PTRS(replace_key));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001876
Kent Overstreet1b207d82013-09-10 18:52:54 -07001877 for (i = 0; i < KEY_PTRS(replace_key); i++)
1878 if (k->ptr[i] != replace_key->ptr[i] + offset)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001879 goto check_failed;
1880
1881 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1882 }
1883
1884 if (bkey_cmp(insert, k) < 0 &&
1885 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1886 /*
1887 * We overlapped in the middle of an existing key: that
1888 * means we have to split the old key. But we have to do
1889 * slightly different things depending on whether the
1890 * old key has been written out yet.
1891 */
1892
1893 struct bkey *top;
1894
Kent Overstreet279afba2013-06-05 06:21:07 -07001895 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001896
1897 if (bkey_written(b, k)) {
1898 /*
1899 * We insert a new key to cover the top of the
1900 * old key, and the old key is modified in place
1901 * to represent the bottom split.
1902 *
1903 * It's completely arbitrary whether the new key
1904 * is the top or the bottom, but it has to match
1905 * up with what btree_sort_fixup() does - it
1906 * doesn't check for this kind of overlap, it
1907 * depends on us inserting a new key for the top
1908 * here.
1909 */
Kent Overstreetee811282013-12-17 23:49:49 -08001910 top = bch_bset_search(b, bset_tree_last(b),
Kent Overstreetcafe5632013-03-23 16:11:31 -07001911 insert);
Kent Overstreetee811282013-12-17 23:49:49 -08001912 bch_bset_insert(b, top, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001913 } else {
1914 BKEY_PADDED(key) temp;
1915 bkey_copy(&temp.key, k);
Kent Overstreetee811282013-12-17 23:49:49 -08001916 bch_bset_insert(b, k, &temp.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001917 top = bkey_next(k);
1918 }
1919
1920 bch_cut_front(insert, top);
1921 bch_cut_back(&START_KEY(insert), k);
1922 bch_bset_fix_invalidated_key(b, k);
1923 return false;
1924 }
1925
1926 if (bkey_cmp(insert, k) < 0) {
1927 bch_cut_front(insert, k);
1928 } else {
Kent Overstreet1fa84552013-11-10 21:55:27 -08001929 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1930 old_offset = KEY_START(insert);
1931
Kent Overstreetcafe5632013-03-23 16:11:31 -07001932 if (bkey_written(b, k) &&
1933 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1934 /*
1935 * Completely overwrote, so we don't have to
1936 * invalidate the binary search tree
1937 */
1938 bch_cut_front(k, k);
1939 } else {
1940 __bch_cut_back(&START_KEY(insert), k);
1941 bch_bset_fix_invalidated_key(b, k);
1942 }
1943 }
1944
Kent Overstreet279afba2013-06-05 06:21:07 -07001945 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001946 }
1947
1948check_failed:
Kent Overstreet1b207d82013-09-10 18:52:54 -07001949 if (replace_key) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001950 if (!sectors_found) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001951 return true;
1952 } else if (sectors_found < KEY_SIZE(insert)) {
1953 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1954 (KEY_SIZE(insert) - sectors_found));
1955 SET_KEY_SIZE(insert, sectors_found);
1956 }
1957 }
1958
1959 return false;
1960}
1961
1962static bool btree_insert_key(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001963 struct bkey *k, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001964{
Kent Overstreetee811282013-12-17 23:49:49 -08001965 struct bset *i = btree_bset_last(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001966 struct bkey *m, *prev;
Kent Overstreet85b14922013-05-14 20:33:16 -07001967 unsigned status = BTREE_INSERT_STATUS_INSERT;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001968
1969 BUG_ON(bkey_cmp(k, &b->key) > 0);
1970 BUG_ON(b->level && !KEY_PTRS(k));
1971 BUG_ON(!b->level && !KEY_OFFSET(k));
1972
1973 if (!b->level) {
1974 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001975
1976 /*
1977 * bset_search() returns the first key that is strictly greater
1978 * than the search key - but for back merging, we want to find
Kent Overstreet0eacac22013-07-01 19:29:05 -07001979 * the previous key.
Kent Overstreetcafe5632013-03-23 16:11:31 -07001980 */
Kent Overstreetcafe5632013-03-23 16:11:31 -07001981 prev = NULL;
Kent Overstreet0eacac22013-07-01 19:29:05 -07001982 m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001983
Kent Overstreet1b207d82013-09-10 18:52:54 -07001984 if (fix_overlapping_extents(b, k, &iter, replace_key)) {
1985 op->insert_collision = true;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001986 return false;
Kent Overstreet1b207d82013-09-10 18:52:54 -07001987 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001988
Kent Overstreet1fa84552013-11-10 21:55:27 -08001989 if (KEY_DIRTY(k))
1990 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1991 KEY_START(k), KEY_SIZE(k));
1992
Kent Overstreetfafff812013-12-17 21:56:21 -08001993 while (m != bset_bkey_last(i) &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07001994 bkey_cmp(k, &START_KEY(m)) > 0)
1995 prev = m, m = bkey_next(m);
1996
1997 if (key_merging_disabled(b->c))
1998 goto insert;
1999
2000 /* prev is in the tree, if we merge we're done */
Kent Overstreet85b14922013-05-14 20:33:16 -07002001 status = BTREE_INSERT_STATUS_BACK_MERGE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002002 if (prev &&
2003 bch_bkey_try_merge(b, prev, k))
2004 goto merged;
2005
Kent Overstreet85b14922013-05-14 20:33:16 -07002006 status = BTREE_INSERT_STATUS_OVERWROTE;
Kent Overstreetfafff812013-12-17 21:56:21 -08002007 if (m != bset_bkey_last(i) &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07002008 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
2009 goto copy;
2010
Kent Overstreet85b14922013-05-14 20:33:16 -07002011 status = BTREE_INSERT_STATUS_FRONT_MERGE;
Kent Overstreetfafff812013-12-17 21:56:21 -08002012 if (m != bset_bkey_last(i) &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07002013 bch_bkey_try_merge(b, k, m))
2014 goto copy;
Kent Overstreet1b207d82013-09-10 18:52:54 -07002015 } else {
2016 BUG_ON(replace_key);
Kent Overstreetee811282013-12-17 23:49:49 -08002017 m = bch_bset_search(b, bset_tree_last(b), k);
Kent Overstreet1b207d82013-09-10 18:52:54 -07002018 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002019
Kent Overstreetee811282013-12-17 23:49:49 -08002020insert: bch_bset_insert(b, m, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002021copy: bkey_copy(m, k);
2022merged:
Kent Overstreet1b207d82013-09-10 18:52:54 -07002023 bch_check_keys(b, "%u for %s", status,
2024 replace_key ? "replace" : "insert");
Kent Overstreetcafe5632013-03-23 16:11:31 -07002025
2026 if (b->level && !KEY_OFFSET(k))
Kent Overstreet57943512013-04-25 13:58:35 -07002027 btree_current_write(b)->prio_blocked++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002028
Kent Overstreet1b207d82013-09-10 18:52:54 -07002029 trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002030
2031 return true;
2032}
2033
Kent Overstreet26c949f2013-09-10 18:41:15 -07002034static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002035 struct keylist *insert_keys,
2036 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002037{
2038 bool ret = false;
Kent Overstreet280481d2013-10-24 16:36:03 -07002039 int oldsize = bch_count_data(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002040
Kent Overstreet26c949f2013-09-10 18:41:15 -07002041 while (!bch_keylist_empty(insert_keys)) {
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002042 struct bset *i = write_block(b);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002043 struct bkey *k = insert_keys->keys;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002044
Kent Overstreetee811282013-12-17 23:49:49 -08002045 if (b->written +
2046 __set_blocks(i, i->keys + bkey_u64s(k),
2047 block_bytes(b->c)) > btree_blocks(b))
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002048 break;
2049
2050 if (bkey_cmp(k, &b->key) <= 0) {
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002051 if (!b->level)
2052 bkey_put(b->c, k);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002053
Kent Overstreet1b207d82013-09-10 18:52:54 -07002054 ret |= btree_insert_key(b, op, k, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002055 bch_keylist_pop_front(insert_keys);
2056 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
Kent Overstreet26c949f2013-09-10 18:41:15 -07002057 BKEY_PADDED(key) temp;
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002058 bkey_copy(&temp.key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002059
2060 bch_cut_back(&b->key, &temp.key);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002061 bch_cut_front(&b->key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002062
Kent Overstreet1b207d82013-09-10 18:52:54 -07002063 ret |= btree_insert_key(b, op, &temp.key, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002064 break;
2065 } else {
2066 break;
2067 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002068 }
2069
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002070 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2071
Kent Overstreetcafe5632013-03-23 16:11:31 -07002072 BUG_ON(bch_count_data(b) < oldsize);
2073 return ret;
2074}
2075
Kent Overstreet26c949f2013-09-10 18:41:15 -07002076static int btree_split(struct btree *b, struct btree_op *op,
2077 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002078 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002079{
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002080 bool split;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002081 struct btree *n1, *n2 = NULL, *n3 = NULL;
2082 uint64_t start_time = local_clock();
Kent Overstreetb54d6932013-07-24 18:04:18 -07002083 struct closure cl;
Kent Overstreet17e21a92013-07-26 12:32:38 -07002084 struct keylist parent_keys;
Kent Overstreetb54d6932013-07-24 18:04:18 -07002085
2086 closure_init_stack(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002087 bch_keylist_init(&parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002088
Kent Overstreet78365412013-12-17 01:29:34 -08002089 if (!b->level &&
2090 btree_check_reserve(b, op))
2091 return -EINTR;
2092
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002093 n1 = btree_node_alloc_replacement(b, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002094 if (IS_ERR(n1))
2095 goto err;
2096
Kent Overstreetee811282013-12-17 23:49:49 -08002097 split = set_blocks(btree_bset_first(n1),
2098 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002099
Kent Overstreetcafe5632013-03-23 16:11:31 -07002100 if (split) {
2101 unsigned keys = 0;
2102
Kent Overstreetee811282013-12-17 23:49:49 -08002103 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002104
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002105 n2 = bch_btree_node_alloc(b->c, b->level, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002106 if (IS_ERR(n2))
2107 goto err_free1;
2108
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002109 if (!b->parent) {
Kent Overstreetbc9389e2013-09-10 19:07:35 -07002110 n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002111 if (IS_ERR(n3))
2112 goto err_free2;
2113 }
2114
Kent Overstreet1b207d82013-09-10 18:52:54 -07002115 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002116
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002117 /*
2118 * Has to be a linear search because we don't have an auxiliary
Kent Overstreetcafe5632013-03-23 16:11:31 -07002119 * search tree yet
2120 */
2121
Kent Overstreetee811282013-12-17 23:49:49 -08002122 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2123 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
Kent Overstreetfafff812013-12-17 21:56:21 -08002124 keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002125
Kent Overstreetfafff812013-12-17 21:56:21 -08002126 bkey_copy_key(&n1->key,
Kent Overstreetee811282013-12-17 23:49:49 -08002127 bset_bkey_idx(btree_bset_first(n1), keys));
2128 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002129
Kent Overstreetee811282013-12-17 23:49:49 -08002130 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2131 btree_bset_first(n1)->keys = keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002132
Kent Overstreetee811282013-12-17 23:49:49 -08002133 memcpy(btree_bset_first(n2)->start,
2134 bset_bkey_last(btree_bset_first(n1)),
2135 btree_bset_first(n2)->keys * sizeof(uint64_t));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002136
2137 bkey_copy_key(&n2->key, &b->key);
2138
Kent Overstreet17e21a92013-07-26 12:32:38 -07002139 bch_keylist_add(&parent_keys, &n2->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002140 bch_btree_node_write(n2, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002141 rw_unlock(true, n2);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002142 } else {
Kent Overstreetee811282013-12-17 23:49:49 -08002143 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002144
Kent Overstreet1b207d82013-09-10 18:52:54 -07002145 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002146 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002147
Kent Overstreet17e21a92013-07-26 12:32:38 -07002148 bch_keylist_add(&parent_keys, &n1->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002149 bch_btree_node_write(n1, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002150
2151 if (n3) {
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002152 /* Depth increases, make a new root */
Kent Overstreetcafe5632013-03-23 16:11:31 -07002153 bkey_copy_key(&n3->key, &MAX_KEY);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002154 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002155 bch_btree_node_write(n3, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002156
Kent Overstreetb54d6932013-07-24 18:04:18 -07002157 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002158 bch_btree_set_root(n3);
2159 rw_unlock(true, n3);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002160
2161 btree_node_free(b);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002162 } else if (!b->parent) {
2163 /* Root filled up but didn't need to be split */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002164 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002165 bch_btree_set_root(n1);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002166
2167 btree_node_free(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002168 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002169 /* Split a non root node */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002170 closure_sync(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002171 make_btree_freeing_key(b, parent_keys.top);
2172 bch_keylist_push(&parent_keys);
2173
2174 btree_node_free(b);
2175
2176 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2177 BUG_ON(!bch_keylist_empty(&parent_keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002178 }
2179
2180 rw_unlock(true, n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002181
Kent Overstreet169ef1c2013-03-28 12:50:55 -06002182 bch_time_stats_update(&b->c->btree_split_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002183
2184 return 0;
2185err_free2:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002186 bkey_put(b->c, &n2->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002187 btree_node_free(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002188 rw_unlock(true, n2);
2189err_free1:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002190 bkey_put(b->c, &n1->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002191 btree_node_free(n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002192 rw_unlock(true, n1);
2193err:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002194 WARN(1, "bcache: btree split failed");
2195
Kent Overstreetcafe5632013-03-23 16:11:31 -07002196 if (n3 == ERR_PTR(-EAGAIN) ||
2197 n2 == ERR_PTR(-EAGAIN) ||
2198 n1 == ERR_PTR(-EAGAIN))
2199 return -EAGAIN;
2200
Kent Overstreetcafe5632013-03-23 16:11:31 -07002201 return -ENOMEM;
2202}
2203
Kent Overstreet26c949f2013-09-10 18:41:15 -07002204static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002205 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002206 atomic_t *journal_ref,
2207 struct bkey *replace_key)
Kent Overstreet26c949f2013-09-10 18:41:15 -07002208{
Kent Overstreet17e21a92013-07-26 12:32:38 -07002209 BUG_ON(b->level && replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002210
Kent Overstreet17e21a92013-07-26 12:32:38 -07002211 if (should_split(b)) {
2212 if (current->bio_list) {
2213 op->lock = b->c->root->level + 1;
2214 return -EAGAIN;
2215 } else if (op->lock <= b->c->root->level) {
2216 op->lock = b->c->root->level + 1;
2217 return -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002218 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002219 /* Invalidated all iterators */
2220 return btree_split(b, op, insert_keys, replace_key) ?:
2221 -EINTR;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002222 }
Kent Overstreet17e21a92013-07-26 12:32:38 -07002223 } else {
Kent Overstreetee811282013-12-17 23:49:49 -08002224 BUG_ON(write_block(b) != btree_bset_last(b));
Kent Overstreet26c949f2013-09-10 18:41:15 -07002225
Kent Overstreet17e21a92013-07-26 12:32:38 -07002226 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2227 if (!b->level)
2228 bch_btree_leaf_dirty(b, journal_ref);
2229 else
2230 bch_btree_node_write_sync(b);
2231 }
2232
2233 return 0;
2234 }
Kent Overstreet26c949f2013-09-10 18:41:15 -07002235}
2236
Kent Overstreete7c590e2013-09-10 18:39:16 -07002237int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2238 struct bkey *check_key)
2239{
2240 int ret = -EINTR;
2241 uint64_t btree_ptr = b->key.ptr[0];
2242 unsigned long seq = b->seq;
2243 struct keylist insert;
2244 bool upgrade = op->lock == -1;
2245
2246 bch_keylist_init(&insert);
2247
2248 if (upgrade) {
2249 rw_unlock(false, b);
2250 rw_lock(true, b, b->level);
2251
2252 if (b->key.ptr[0] != btree_ptr ||
2253 b->seq != seq + 1)
2254 goto out;
2255 }
2256
2257 SET_KEY_PTRS(check_key, 1);
2258 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2259
2260 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2261
2262 bch_keylist_add(&insert, check_key);
2263
Kent Overstreet1b207d82013-09-10 18:52:54 -07002264 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
Kent Overstreete7c590e2013-09-10 18:39:16 -07002265
2266 BUG_ON(!ret && !bch_keylist_empty(&insert));
2267out:
2268 if (upgrade)
2269 downgrade_write(&b->lock);
2270 return ret;
2271}
2272
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002273struct btree_insert_op {
2274 struct btree_op op;
2275 struct keylist *keys;
2276 atomic_t *journal_ref;
2277 struct bkey *replace_key;
2278};
2279
Wei Yongjun08239ca2013-11-28 10:31:35 +08002280static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002281{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002282 struct btree_insert_op *op = container_of(b_op,
2283 struct btree_insert_op, op);
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002284
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002285 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2286 op->journal_ref, op->replace_key);
2287 if (ret && !bch_keylist_empty(op->keys))
2288 return ret;
2289 else
2290 return MAP_DONE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002291}
2292
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002293int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2294 atomic_t *journal_ref, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002295{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002296 struct btree_insert_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002297 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002298
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002299 BUG_ON(current->bio_list);
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002300 BUG_ON(bch_keylist_empty(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002301
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002302 bch_btree_op_init(&op.op, 0);
2303 op.keys = keys;
2304 op.journal_ref = journal_ref;
2305 op.replace_key = replace_key;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002306
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002307 while (!ret && !bch_keylist_empty(keys)) {
2308 op.op.lock = 0;
2309 ret = bch_btree_map_leaf_nodes(&op.op, c,
2310 &START_KEY(keys->keys),
2311 btree_insert_fn);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002312 }
2313
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002314 if (ret) {
2315 struct bkey *k;
2316
2317 pr_err("error %i", ret);
2318
2319 while ((k = bch_keylist_pop(keys)))
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002320 bkey_put(c, k);
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002321 } else if (op.op.insert_collision)
2322 ret = -ESRCH;
Kent Overstreet6054c6d2013-07-24 18:06:22 -07002323
Kent Overstreetcafe5632013-03-23 16:11:31 -07002324 return ret;
2325}
2326
2327void bch_btree_set_root(struct btree *b)
2328{
2329 unsigned i;
Kent Overstreete49c7c32013-06-26 17:25:38 -07002330 struct closure cl;
2331
2332 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002333
Kent Overstreetc37511b2013-04-26 15:39:55 -07002334 trace_bcache_btree_set_root(b);
2335
Kent Overstreetcafe5632013-03-23 16:11:31 -07002336 BUG_ON(!b->written);
2337
2338 for (i = 0; i < KEY_PTRS(&b->key); i++)
2339 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2340
2341 mutex_lock(&b->c->bucket_lock);
2342 list_del_init(&b->list);
2343 mutex_unlock(&b->c->bucket_lock);
2344
2345 b->c->root = b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002346
Kent Overstreete49c7c32013-06-26 17:25:38 -07002347 bch_journal_meta(b->c, &cl);
2348 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002349}
2350
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002351/* Map across nodes or keys */
2352
2353static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2354 struct bkey *from,
2355 btree_map_nodes_fn *fn, int flags)
2356{
2357 int ret = MAP_CONTINUE;
2358
2359 if (b->level) {
2360 struct bkey *k;
2361 struct btree_iter iter;
2362
2363 bch_btree_iter_init(b, &iter, from);
2364
2365 while ((k = bch_btree_iter_next_filter(&iter, b,
2366 bch_ptr_bad))) {
2367 ret = btree(map_nodes_recurse, k, b,
2368 op, from, fn, flags);
2369 from = NULL;
2370
2371 if (ret != MAP_CONTINUE)
2372 return ret;
2373 }
2374 }
2375
2376 if (!b->level || flags == MAP_ALL_NODES)
2377 ret = fn(op, b);
2378
2379 return ret;
2380}
2381
2382int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2383 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2384{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002385 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002386}
2387
2388static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2389 struct bkey *from, btree_map_keys_fn *fn,
2390 int flags)
2391{
2392 int ret = MAP_CONTINUE;
2393 struct bkey *k;
2394 struct btree_iter iter;
2395
2396 bch_btree_iter_init(b, &iter, from);
2397
2398 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
2399 ret = !b->level
2400 ? fn(op, b, k)
2401 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2402 from = NULL;
2403
2404 if (ret != MAP_CONTINUE)
2405 return ret;
2406 }
2407
2408 if (!b->level && (flags & MAP_END_KEY))
2409 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2410 KEY_OFFSET(&b->key), 0));
2411
2412 return ret;
2413}
2414
2415int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2416 struct bkey *from, btree_map_keys_fn *fn, int flags)
2417{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002418 return btree_root(map_keys_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002419}
2420
Kent Overstreetcafe5632013-03-23 16:11:31 -07002421/* Keybuf code */
2422
2423static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2424{
2425 /* Overlapping keys compare equal */
2426 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2427 return -1;
2428 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2429 return 1;
2430 return 0;
2431}
2432
2433static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2434 struct keybuf_key *r)
2435{
2436 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2437}
2438
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002439struct refill {
2440 struct btree_op op;
Kent Overstreet48a915a2013-10-31 15:43:22 -07002441 unsigned nr_found;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002442 struct keybuf *buf;
2443 struct bkey *end;
2444 keybuf_pred_fn *pred;
2445};
2446
2447static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2448 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002449{
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002450 struct refill *refill = container_of(op, struct refill, op);
2451 struct keybuf *buf = refill->buf;
2452 int ret = MAP_CONTINUE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002453
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002454 if (bkey_cmp(k, refill->end) >= 0) {
2455 ret = MAP_DONE;
2456 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002457 }
2458
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002459 if (!KEY_SIZE(k)) /* end key */
2460 goto out;
2461
2462 if (refill->pred(buf, k)) {
2463 struct keybuf_key *w;
2464
2465 spin_lock(&buf->lock);
2466
2467 w = array_alloc(&buf->freelist);
2468 if (!w) {
2469 spin_unlock(&buf->lock);
2470 return MAP_DONE;
2471 }
2472
2473 w->private = NULL;
2474 bkey_copy(&w->key, k);
2475
2476 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2477 array_free(&buf->freelist, w);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002478 else
2479 refill->nr_found++;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002480
2481 if (array_freelist_empty(&buf->freelist))
2482 ret = MAP_DONE;
2483
2484 spin_unlock(&buf->lock);
2485 }
2486out:
2487 buf->last_scanned = *k;
2488 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002489}
2490
2491void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002492 struct bkey *end, keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002493{
2494 struct bkey start = buf->last_scanned;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002495 struct refill refill;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002496
2497 cond_resched();
2498
Kent Overstreetb54d6932013-07-24 18:04:18 -07002499 bch_btree_op_init(&refill.op, -1);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002500 refill.nr_found = 0;
2501 refill.buf = buf;
2502 refill.end = end;
2503 refill.pred = pred;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002504
2505 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2506 refill_keybuf_fn, MAP_END_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002507
Kent Overstreet48a915a2013-10-31 15:43:22 -07002508 trace_bcache_keyscan(refill.nr_found,
2509 KEY_INODE(&start), KEY_OFFSET(&start),
2510 KEY_INODE(&buf->last_scanned),
2511 KEY_OFFSET(&buf->last_scanned));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002512
2513 spin_lock(&buf->lock);
2514
2515 if (!RB_EMPTY_ROOT(&buf->keys)) {
2516 struct keybuf_key *w;
2517 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2518 buf->start = START_KEY(&w->key);
2519
2520 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2521 buf->end = w->key;
2522 } else {
2523 buf->start = MAX_KEY;
2524 buf->end = MAX_KEY;
2525 }
2526
2527 spin_unlock(&buf->lock);
2528}
2529
2530static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2531{
2532 rb_erase(&w->node, &buf->keys);
2533 array_free(&buf->freelist, w);
2534}
2535
2536void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2537{
2538 spin_lock(&buf->lock);
2539 __bch_keybuf_del(buf, w);
2540 spin_unlock(&buf->lock);
2541}
2542
2543bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2544 struct bkey *end)
2545{
2546 bool ret = false;
2547 struct keybuf_key *p, *w, s;
2548 s.key = *start;
2549
2550 if (bkey_cmp(end, &buf->start) <= 0 ||
2551 bkey_cmp(start, &buf->end) >= 0)
2552 return false;
2553
2554 spin_lock(&buf->lock);
2555 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2556
2557 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2558 p = w;
2559 w = RB_NEXT(w, node);
2560
2561 if (p->private)
2562 ret = true;
2563 else
2564 __bch_keybuf_del(buf, p);
2565 }
2566
2567 spin_unlock(&buf->lock);
2568 return ret;
2569}
2570
2571struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2572{
2573 struct keybuf_key *w;
2574 spin_lock(&buf->lock);
2575
2576 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2577
2578 while (w && w->private)
2579 w = RB_NEXT(w, node);
2580
2581 if (w)
2582 w->private = ERR_PTR(-EINTR);
2583
2584 spin_unlock(&buf->lock);
2585 return w;
2586}
2587
2588struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002589 struct keybuf *buf,
2590 struct bkey *end,
2591 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002592{
2593 struct keybuf_key *ret;
2594
2595 while (1) {
2596 ret = bch_keybuf_next(buf);
2597 if (ret)
2598 break;
2599
2600 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2601 pr_debug("scan finished");
2602 break;
2603 }
2604
Kent Overstreet72c27062013-06-05 06:24:39 -07002605 bch_refill_keybuf(c, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002606 }
2607
2608 return ret;
2609}
2610
Kent Overstreet72c27062013-06-05 06:24:39 -07002611void bch_keybuf_init(struct keybuf *buf)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002612{
Kent Overstreetcafe5632013-03-23 16:11:31 -07002613 buf->last_scanned = MAX_KEY;
2614 buf->keys = RB_ROOT;
2615
2616 spin_lock_init(&buf->lock);
2617 array_allocator_init(&buf->freelist);
2618}
2619
2620void bch_btree_exit(void)
2621{
2622 if (btree_io_wq)
2623 destroy_workqueue(btree_io_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002624}
2625
2626int __init bch_btree_init(void)
2627{
Kent Overstreet72a44512013-10-24 17:19:26 -07002628 btree_io_wq = create_singlethread_workqueue("bch_btree_io");
2629 if (!btree_io_wq)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002630 return -ENOMEM;
2631
2632 return 0;
2633}