blob: 7d388b8bb50e35cf154d7c082ea492a5de08235d [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Code for working with individual keys, and sorted sets of keys with in a
3 * btree node
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "btree.h"
10#include "debug.h"
11
12#include <linux/random.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010013#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070014
15/* Keylists */
16
Kent Overstreetcafe5632013-03-23 16:11:31 -070017int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
18{
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070019 size_t oldsize = bch_keylist_nkeys(l);
20 size_t newsize = oldsize + 2 + nptrs;
21 uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
22 uint64_t *new_keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -070023
24 /* The journalling code doesn't handle the case where the keys to insert
25 * is bigger than an empty write: If we just return -ENOMEM here,
26 * bio_insert() and bio_invalidate() will insert the keys created so far
27 * and finish the rest when the keylist is empty.
28 */
29 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
30 return -ENOMEM;
31
32 newsize = roundup_pow_of_two(newsize);
33
34 if (newsize <= KEYLIST_INLINE ||
35 roundup_pow_of_two(oldsize) == newsize)
36 return 0;
37
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070038 new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
Kent Overstreetcafe5632013-03-23 16:11:31 -070039
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070040 if (!new_keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -070041 return -ENOMEM;
42
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070043 if (!old_keys)
44 memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
Kent Overstreetcafe5632013-03-23 16:11:31 -070045
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070046 l->keys_p = new_keys;
47 l->top_p = new_keys + oldsize;
Kent Overstreetcafe5632013-03-23 16:11:31 -070048
49 return 0;
50}
51
52struct bkey *bch_keylist_pop(struct keylist *l)
53{
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070054 struct bkey *k = l->keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -070055
56 if (k == l->top)
57 return NULL;
58
59 while (bkey_next(k) != l->top)
60 k = bkey_next(k);
61
62 return l->top = k;
63}
64
Kent Overstreet26c949f2013-09-10 18:41:15 -070065void bch_keylist_pop_front(struct keylist *l)
66{
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070067 l->top_p -= bkey_u64s(l->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -070068
Kent Overstreetc2f95ae2013-07-24 17:24:25 -070069 memmove(l->keys,
70 bkey_next(l->keys),
71 bch_keylist_bytes(l));
Kent Overstreet26c949f2013-09-10 18:41:15 -070072}
73
Kent Overstreetcafe5632013-03-23 16:11:31 -070074/* Pointer validation */
75
Kent Overstreetd5cc66e2013-07-24 23:06:40 -070076static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -070077{
78 unsigned i;
Kent Overstreetcafe5632013-03-23 16:11:31 -070079
80 for (i = 0; i < KEY_PTRS(k); i++)
81 if (ptr_available(c, k, i)) {
82 struct cache *ca = PTR_CACHE(c, k, i);
83 size_t bucket = PTR_BUCKET_NR(c, k, i);
84 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
85
86 if (KEY_SIZE(k) + r > c->sb.bucket_size ||
87 bucket < ca->sb.first_bucket ||
88 bucket >= ca->sb.nbuckets)
Kent Overstreetd5cc66e2013-07-24 23:06:40 -070089 return true;
Kent Overstreetcafe5632013-03-23 16:11:31 -070090 }
91
92 return false;
Kent Overstreetd5cc66e2013-07-24 23:06:40 -070093}
94
95bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
96{
97 char buf[80];
98
99 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
100 goto bad;
101
102 if (__ptr_invalid(c, k))
103 goto bad;
104
105 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700106bad:
Kent Overstreet85b14922013-05-14 20:33:16 -0700107 bch_bkey_to_text(buf, sizeof(buf), k);
Kent Overstreetd5cc66e2013-07-24 23:06:40 -0700108 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
109 return true;
110}
111
112bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
113{
114 char buf[80];
115
116 if (!KEY_SIZE(k))
117 return true;
118
119 if (KEY_SIZE(k) > KEY_OFFSET(k))
120 goto bad;
121
122 if (__ptr_invalid(c, k))
123 goto bad;
124
125 return false;
126bad:
127 bch_bkey_to_text(buf, sizeof(buf), k);
128 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700129 return true;
130}
131
Kent Overstreet280481d2013-10-24 16:36:03 -0700132static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
133 unsigned ptr)
134{
135 struct bucket *g = PTR_BUCKET(b->c, k, ptr);
136 char buf[80];
137
138 if (mutex_trylock(&b->c->bucket_lock)) {
139 if (b->level) {
140 if (KEY_DIRTY(k) ||
141 g->prio != BTREE_PRIO ||
142 (b->c->gc_mark_valid &&
143 GC_MARK(g) != GC_MARK_METADATA))
144 goto err;
145
146 } else {
147 if (g->prio == BTREE_PRIO)
148 goto err;
149
150 if (KEY_DIRTY(k) &&
151 b->c->gc_mark_valid &&
152 GC_MARK(g) != GC_MARK_DIRTY)
153 goto err;
154 }
155 mutex_unlock(&b->c->bucket_lock);
156 }
157
158 return false;
159err:
160 mutex_unlock(&b->c->bucket_lock);
161 bch_bkey_to_text(buf, sizeof(buf), k);
162 btree_bug(b,
163"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
164 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
165 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
166 return true;
167}
168
Kent Overstreetcafe5632013-03-23 16:11:31 -0700169bool bch_ptr_bad(struct btree *b, const struct bkey *k)
170{
171 struct bucket *g;
172 unsigned i, stale;
173
174 if (!bkey_cmp(k, &ZERO_KEY) ||
175 !KEY_PTRS(k) ||
176 bch_ptr_invalid(b, k))
177 return true;
178
Kent Overstreete58ff152013-07-24 18:14:44 -0700179 for (i = 0; i < KEY_PTRS(k); i++) {
180 if (!ptr_available(b->c, k, i))
181 return true;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700182
Kent Overstreete58ff152013-07-24 18:14:44 -0700183 g = PTR_BUCKET(b->c, k, i);
184 stale = ptr_stale(b->c, k, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700185
Kent Overstreete58ff152013-07-24 18:14:44 -0700186 btree_bug_on(stale > 96, b,
187 "key too stale: %i, need_gc %u",
188 stale, b->c->need_gc);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700189
Kent Overstreete58ff152013-07-24 18:14:44 -0700190 btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
191 b, "stale dirty pointer");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700192
Kent Overstreete58ff152013-07-24 18:14:44 -0700193 if (stale)
194 return true;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700195
Kent Overstreet280481d2013-10-24 16:36:03 -0700196 if (expensive_debug_checks(b->c) &&
197 ptr_bad_expensive_checks(b, k, i))
198 return true;
Kent Overstreete58ff152013-07-24 18:14:44 -0700199 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700200
201 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700202}
203
204/* Key/pointer manipulation */
205
206void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
207 unsigned i)
208{
209 BUG_ON(i > KEY_PTRS(src));
210
211 /* Only copy the header, key, and one pointer. */
212 memcpy(dest, src, 2 * sizeof(uint64_t));
213 dest->ptr[0] = src->ptr[i];
214 SET_KEY_PTRS(dest, 1);
215 /* We didn't copy the checksum so clear that bit. */
216 SET_KEY_CSUM(dest, 0);
217}
218
219bool __bch_cut_front(const struct bkey *where, struct bkey *k)
220{
221 unsigned i, len = 0;
222
223 if (bkey_cmp(where, &START_KEY(k)) <= 0)
224 return false;
225
226 if (bkey_cmp(where, k) < 0)
227 len = KEY_OFFSET(k) - KEY_OFFSET(where);
228 else
229 bkey_copy_key(k, where);
230
231 for (i = 0; i < KEY_PTRS(k); i++)
232 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
233
234 BUG_ON(len > KEY_SIZE(k));
235 SET_KEY_SIZE(k, len);
236 return true;
237}
238
239bool __bch_cut_back(const struct bkey *where, struct bkey *k)
240{
241 unsigned len = 0;
242
243 if (bkey_cmp(where, k) >= 0)
244 return false;
245
246 BUG_ON(KEY_INODE(where) != KEY_INODE(k));
247
248 if (bkey_cmp(where, &START_KEY(k)) > 0)
249 len = KEY_OFFSET(where) - KEY_START(k);
250
251 bkey_copy_key(k, where);
252
253 BUG_ON(len > KEY_SIZE(k));
254 SET_KEY_SIZE(k, len);
255 return true;
256}
257
258static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
259{
260 return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
261 ~((uint64_t)1 << 63);
262}
263
264/* Tries to merge l and r: l should be lower than r
265 * Returns true if we were able to merge. If we did merge, l will be the merged
266 * key, r will be untouched.
267 */
268bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
269{
270 unsigned i;
271
272 if (key_merging_disabled(b->c))
273 return false;
274
275 if (KEY_PTRS(l) != KEY_PTRS(r) ||
276 KEY_DIRTY(l) != KEY_DIRTY(r) ||
277 bkey_cmp(l, &START_KEY(r)))
278 return false;
279
280 for (i = 0; i < KEY_PTRS(l); i++)
281 if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
282 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
283 return false;
284
285 /* Keys with no pointers aren't restricted to one bucket and could
286 * overflow KEY_SIZE
287 */
288 if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
289 SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
290 SET_KEY_SIZE(l, USHRT_MAX);
291
292 bch_cut_front(l, r);
293 return false;
294 }
295
296 if (KEY_CSUM(l)) {
297 if (KEY_CSUM(r))
298 l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
299 else
300 SET_KEY_CSUM(l, 0);
301 }
302
303 SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
304 SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
305
306 return true;
307}
308
309/* Binary tree stuff for auxiliary search trees */
310
311static unsigned inorder_next(unsigned j, unsigned size)
312{
313 if (j * 2 + 1 < size) {
314 j = j * 2 + 1;
315
316 while (j * 2 < size)
317 j *= 2;
318 } else
319 j >>= ffz(j) + 1;
320
321 return j;
322}
323
324static unsigned inorder_prev(unsigned j, unsigned size)
325{
326 if (j * 2 < size) {
327 j = j * 2;
328
329 while (j * 2 + 1 < size)
330 j = j * 2 + 1;
331 } else
332 j >>= ffs(j);
333
334 return j;
335}
336
337/* I have no idea why this code works... and I'm the one who wrote it
338 *
339 * However, I do know what it does:
340 * Given a binary tree constructed in an array (i.e. how you normally implement
341 * a heap), it converts a node in the tree - referenced by array index - to the
342 * index it would have if you did an inorder traversal.
343 *
344 * Also tested for every j, size up to size somewhere around 6 million.
345 *
346 * The binary tree starts at array index 1, not 0
347 * extra is a function of size:
348 * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
349 */
350static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
351{
352 unsigned b = fls(j);
353 unsigned shift = fls(size - 1) - b;
354
355 j ^= 1U << (b - 1);
356 j <<= 1;
357 j |= 1;
358 j <<= shift;
359
360 if (j > extra)
361 j -= (j - extra) >> 1;
362
363 return j;
364}
365
366static unsigned to_inorder(unsigned j, struct bset_tree *t)
367{
368 return __to_inorder(j, t->size, t->extra);
369}
370
371static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
372{
373 unsigned shift;
374
375 if (j > extra)
376 j += j - extra;
377
378 shift = ffs(j);
379
380 j >>= shift;
381 j |= roundup_pow_of_two(size) >> shift;
382
383 return j;
384}
385
386static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
387{
388 return __inorder_to_tree(j, t->size, t->extra);
389}
390
391#if 0
392void inorder_test(void)
393{
394 unsigned long done = 0;
395 ktime_t start = ktime_get();
396
397 for (unsigned size = 2;
398 size < 65536000;
399 size++) {
400 unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
401 unsigned i = 1, j = rounddown_pow_of_two(size - 1);
402
403 if (!(size % 4096))
404 printk(KERN_NOTICE "loop %u, %llu per us\n", size,
405 done / ktime_us_delta(ktime_get(), start));
406
407 while (1) {
408 if (__inorder_to_tree(i, size, extra) != j)
409 panic("size %10u j %10u i %10u", size, j, i);
410
411 if (__to_inorder(j, size, extra) != i)
412 panic("size %10u j %10u i %10u", size, j, i);
413
414 if (j == rounddown_pow_of_two(size) - 1)
415 break;
416
417 BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
418
419 j = inorder_next(j, size);
420 i++;
421 }
422
423 done += size - 1;
424 }
425}
426#endif
427
428/*
Phil Viana48a73022013-06-03 09:51:42 -0300429 * Cacheline/offset <-> bkey pointer arithmetic:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700430 *
431 * t->tree is a binary search tree in an array; each node corresponds to a key
432 * in one cacheline in t->set (BSET_CACHELINE bytes).
433 *
434 * This means we don't have to store the full index of the key that a node in
435 * the binary tree points to; to_inorder() gives us the cacheline, and then
436 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
437 *
Phil Viana48a73022013-06-03 09:51:42 -0300438 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
Kent Overstreetcafe5632013-03-23 16:11:31 -0700439 * make this work.
440 *
441 * To construct the bfloat for an arbitrary key we need to know what the key
442 * immediately preceding it is: we have to check if the two keys differ in the
443 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
444 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
445 */
446
447static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
448 unsigned offset)
449{
450 return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
451}
452
453static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
454{
455 return ((void *) k - (void *) t->data) / BSET_CACHELINE;
456}
457
458static unsigned bkey_to_cacheline_offset(struct bkey *k)
459{
460 return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
461}
462
463static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
464{
465 return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
466}
467
468static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
469{
470 return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
471}
472
473/*
474 * For the write set - the one we're currently inserting keys into - we don't
475 * maintain a full search tree, we just keep a simple lookup table in t->prev.
476 */
477static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
478{
479 return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
480}
481
482static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
483{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700484 low >>= shift;
485 low |= (high << 1) << (63U - shift);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700486 return low;
487}
488
489static inline unsigned bfloat_mantissa(const struct bkey *k,
490 struct bkey_float *f)
491{
492 const uint64_t *p = &k->low - (f->exponent >> 6);
493 return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
494}
495
496static void make_bfloat(struct bset_tree *t, unsigned j)
497{
498 struct bkey_float *f = &t->tree[j];
499 struct bkey *m = tree_to_bkey(t, j);
500 struct bkey *p = tree_to_prev_bkey(t, j);
501
502 struct bkey *l = is_power_of_2(j)
503 ? t->data->start
504 : tree_to_prev_bkey(t, j >> ffs(j));
505
506 struct bkey *r = is_power_of_2(j + 1)
507 ? node(t->data, t->data->keys - bkey_u64s(&t->end))
508 : tree_to_bkey(t, j >> (ffz(j) + 1));
509
510 BUG_ON(m < l || m > r);
511 BUG_ON(bkey_next(p) != m);
512
513 if (KEY_INODE(l) != KEY_INODE(r))
514 f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
515 else
516 f->exponent = fls64(r->low ^ l->low);
517
518 f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
519
520 /*
521 * Setting f->exponent = 127 flags this node as failed, and causes the
522 * lookup code to fall back to comparing against the original key.
523 */
524
525 if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
526 f->mantissa = bfloat_mantissa(m, f) - 1;
527 else
528 f->exponent = 127;
529}
530
531static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
532{
533 if (t != b->sets) {
534 unsigned j = roundup(t[-1].size,
535 64 / sizeof(struct bkey_float));
536
537 t->tree = t[-1].tree + j;
538 t->prev = t[-1].prev + j;
539 }
540
541 while (t < b->sets + MAX_BSETS)
542 t++->size = 0;
543}
544
545static void bset_build_unwritten_tree(struct btree *b)
546{
547 struct bset_tree *t = b->sets + b->nsets;
548
549 bset_alloc_tree(b, t);
550
551 if (t->tree != b->sets->tree + bset_tree_space(b)) {
552 t->prev[0] = bkey_to_cacheline_offset(t->data->start);
553 t->size = 1;
554 }
555}
556
557static void bset_build_written_tree(struct btree *b)
558{
559 struct bset_tree *t = b->sets + b->nsets;
560 struct bkey *k = t->data->start;
561 unsigned j, cacheline = 1;
562
563 bset_alloc_tree(b, t);
564
565 t->size = min_t(unsigned,
566 bkey_to_cacheline(t, end(t->data)),
567 b->sets->tree + bset_tree_space(b) - t->tree);
568
569 if (t->size < 2) {
570 t->size = 0;
571 return;
572 }
573
574 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
575
576 /* First we figure out where the first key in each cacheline is */
577 for (j = inorder_next(0, t->size);
578 j;
579 j = inorder_next(j, t->size)) {
580 while (bkey_to_cacheline(t, k) != cacheline)
581 k = bkey_next(k);
582
583 t->prev[j] = bkey_u64s(k);
584 k = bkey_next(k);
585 cacheline++;
586 t->tree[j].m = bkey_to_cacheline_offset(k);
587 }
588
589 while (bkey_next(k) != end(t->data))
590 k = bkey_next(k);
591
592 t->end = *k;
593
594 /* Then we build the tree */
595 for (j = inorder_next(0, t->size);
596 j;
597 j = inorder_next(j, t->size))
598 make_bfloat(t, j);
599}
600
601void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
602{
603 struct bset_tree *t;
604 unsigned inorder, j = 1;
605
606 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
607 if (k < end(t->data))
608 goto found_set;
609
610 BUG();
611found_set:
612 if (!t->size || !bset_written(b, t))
613 return;
614
615 inorder = bkey_to_cacheline(t, k);
616
617 if (k == t->data->start)
618 goto fix_left;
619
620 if (bkey_next(k) == end(t->data)) {
621 t->end = *k;
622 goto fix_right;
623 }
624
625 j = inorder_to_tree(inorder, t);
626
627 if (j &&
628 j < t->size &&
629 k == tree_to_bkey(t, j))
630fix_left: do {
631 make_bfloat(t, j);
632 j = j * 2;
633 } while (j < t->size);
634
635 j = inorder_to_tree(inorder + 1, t);
636
637 if (j &&
638 j < t->size &&
639 k == tree_to_prev_bkey(t, j))
640fix_right: do {
641 make_bfloat(t, j);
642 j = j * 2 + 1;
643 } while (j < t->size);
644}
645
646void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
647{
648 struct bset_tree *t = &b->sets[b->nsets];
649 unsigned shift = bkey_u64s(k);
650 unsigned j = bkey_to_cacheline(t, k);
651
652 /* We're getting called from btree_split() or btree_gc, just bail out */
653 if (!t->size)
654 return;
655
656 /* k is the key we just inserted; we need to find the entry in the
657 * lookup table for the first key that is strictly greater than k:
658 * it's either k's cacheline or the next one
659 */
660 if (j < t->size &&
661 table_to_bkey(t, j) <= k)
662 j++;
663
664 /* Adjust all the lookup table entries, and find a new key for any that
665 * have gotten too big
666 */
667 for (; j < t->size; j++) {
668 t->prev[j] += shift;
669
670 if (t->prev[j] > 7) {
671 k = table_to_bkey(t, j - 1);
672
673 while (k < cacheline_to_bkey(t, j, 0))
674 k = bkey_next(k);
675
676 t->prev[j] = bkey_to_cacheline_offset(k);
677 }
678 }
679
680 if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
681 return;
682
683 /* Possibly add a new entry to the end of the lookup table */
684
685 for (k = table_to_bkey(t, t->size - 1);
686 k != end(t->data);
687 k = bkey_next(k))
688 if (t->size == bkey_to_cacheline(t, k)) {
689 t->prev[t->size] = bkey_to_cacheline_offset(k);
690 t->size++;
691 }
692}
693
694void bch_bset_init_next(struct btree *b)
695{
696 struct bset *i = write_block(b);
697
698 if (i != b->sets[0].data) {
699 b->sets[++b->nsets].data = i;
700 i->seq = b->sets[0].data->seq;
701 } else
702 get_random_bytes(&i->seq, sizeof(uint64_t));
703
Kent Overstreet81ab4192013-10-31 15:46:42 -0700704 i->magic = bset_magic(&b->c->sb);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700705 i->version = 0;
706 i->keys = 0;
707
708 bset_build_unwritten_tree(b);
709}
710
711struct bset_search_iter {
712 struct bkey *l, *r;
713};
714
715static struct bset_search_iter bset_search_write_set(struct btree *b,
716 struct bset_tree *t,
717 const struct bkey *search)
718{
719 unsigned li = 0, ri = t->size;
720
721 BUG_ON(!b->nsets &&
722 t->size < bkey_to_cacheline(t, end(t->data)));
723
724 while (li + 1 != ri) {
725 unsigned m = (li + ri) >> 1;
726
727 if (bkey_cmp(table_to_bkey(t, m), search) > 0)
728 ri = m;
729 else
730 li = m;
731 }
732
733 return (struct bset_search_iter) {
734 table_to_bkey(t, li),
735 ri < t->size ? table_to_bkey(t, ri) : end(t->data)
736 };
737}
738
739static struct bset_search_iter bset_search_tree(struct btree *b,
740 struct bset_tree *t,
741 const struct bkey *search)
742{
743 struct bkey *l, *r;
744 struct bkey_float *f;
745 unsigned inorder, j, n = 1;
746
747 do {
748 unsigned p = n << 4;
749 p &= ((int) (p - t->size)) >> 31;
750
751 prefetch(&t->tree[p]);
752
753 j = n;
754 f = &t->tree[j];
755
756 /*
757 * n = (f->mantissa > bfloat_mantissa())
758 * ? j * 2
759 * : j * 2 + 1;
760 *
761 * We need to subtract 1 from f->mantissa for the sign bit trick
762 * to work - that's done in make_bfloat()
763 */
764 if (likely(f->exponent != 127))
765 n = j * 2 + (((unsigned)
766 (f->mantissa -
767 bfloat_mantissa(search, f))) >> 31);
768 else
769 n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
770 ? j * 2
771 : j * 2 + 1;
772 } while (n < t->size);
773
774 inorder = to_inorder(j, t);
775
776 /*
777 * n would have been the node we recursed to - the low bit tells us if
778 * we recursed left or recursed right.
779 */
780 if (n & 1) {
781 l = cacheline_to_bkey(t, inorder, f->m);
782
783 if (++inorder != t->size) {
784 f = &t->tree[inorder_next(j, t->size)];
785 r = cacheline_to_bkey(t, inorder, f->m);
786 } else
787 r = end(t->data);
788 } else {
789 r = cacheline_to_bkey(t, inorder, f->m);
790
791 if (--inorder) {
792 f = &t->tree[inorder_prev(j, t->size)];
793 l = cacheline_to_bkey(t, inorder, f->m);
794 } else
795 l = t->data->start;
796 }
797
798 return (struct bset_search_iter) {l, r};
799}
800
801struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
802 const struct bkey *search)
803{
804 struct bset_search_iter i;
805
806 /*
807 * First, we search for a cacheline, then lastly we do a linear search
808 * within that cacheline.
809 *
810 * To search for the cacheline, there's three different possibilities:
811 * * The set is too small to have a search tree, so we just do a linear
812 * search over the whole set.
813 * * The set is the one we're currently inserting into; keeping a full
814 * auxiliary search tree up to date would be too expensive, so we
815 * use a much simpler lookup table to do a binary search -
816 * bset_search_write_set().
817 * * Or we use the auxiliary search tree we constructed earlier -
818 * bset_search_tree()
819 */
820
821 if (unlikely(!t->size)) {
822 i.l = t->data->start;
823 i.r = end(t->data);
824 } else if (bset_written(b, t)) {
825 /*
826 * Each node in the auxiliary search tree covers a certain range
827 * of bits, and keys above and below the set it covers might
828 * differ outside those bits - so we have to special case the
829 * start and end - handle that here:
830 */
831
832 if (unlikely(bkey_cmp(search, &t->end) >= 0))
833 return end(t->data);
834
835 if (unlikely(bkey_cmp(search, t->data->start) < 0))
836 return t->data->start;
837
838 i = bset_search_tree(b, t, search);
839 } else
840 i = bset_search_write_set(b, t, search);
841
Kent Overstreet280481d2013-10-24 16:36:03 -0700842 if (expensive_debug_checks(b->c)) {
843 BUG_ON(bset_written(b, t) &&
844 i.l != t->data->start &&
845 bkey_cmp(tree_to_prev_bkey(t,
846 inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
847 search) > 0);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700848
Kent Overstreet280481d2013-10-24 16:36:03 -0700849 BUG_ON(i.r != end(t->data) &&
850 bkey_cmp(i.r, search) <= 0);
851 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700852
853 while (likely(i.l != i.r) &&
854 bkey_cmp(i.l, search) <= 0)
855 i.l = bkey_next(i.l);
856
857 return i.l;
858}
859
860/* Btree iterator */
861
Kent Overstreet48dad8b2013-09-10 18:48:51 -0700862/*
863 * Returns true if l > r - unless l == r, in which case returns true if l is
864 * older than r.
865 *
866 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
867 * equal in different sets, we have to process them newest to oldest.
868 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700869static inline bool btree_iter_cmp(struct btree_iter_set l,
870 struct btree_iter_set r)
871{
872 int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
873
874 return c ? c > 0 : l.k < r.k;
875}
876
877static inline bool btree_iter_end(struct btree_iter *iter)
878{
879 return !iter->used;
880}
881
882void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
883 struct bkey *end)
884{
885 if (k != end)
886 BUG_ON(!heap_add(iter,
887 ((struct btree_iter_set) { k, end }),
888 btree_iter_cmp));
889}
890
891struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
Kent Overstreet280481d2013-10-24 16:36:03 -0700892 struct bkey *search, struct bset_tree *start)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700893{
894 struct bkey *ret = NULL;
895 iter->size = ARRAY_SIZE(iter->data);
896 iter->used = 0;
897
Kent Overstreet280481d2013-10-24 16:36:03 -0700898#ifdef CONFIG_BCACHE_DEBUG
899 iter->b = b;
900#endif
901
Kent Overstreetcafe5632013-03-23 16:11:31 -0700902 for (; start <= &b->sets[b->nsets]; start++) {
903 ret = bch_bset_search(b, start, search);
904 bch_btree_iter_push(iter, ret, end(start->data));
905 }
906
907 return ret;
908}
909
910struct bkey *bch_btree_iter_next(struct btree_iter *iter)
911{
912 struct btree_iter_set unused;
913 struct bkey *ret = NULL;
914
915 if (!btree_iter_end(iter)) {
Kent Overstreet280481d2013-10-24 16:36:03 -0700916 bch_btree_iter_next_check(iter);
917
Kent Overstreetcafe5632013-03-23 16:11:31 -0700918 ret = iter->data->k;
919 iter->data->k = bkey_next(iter->data->k);
920
921 if (iter->data->k > iter->data->end) {
Kent Overstreetcc0f4ea2013-03-27 12:47:45 -0700922 WARN_ONCE(1, "bset was corrupt!\n");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700923 iter->data->k = iter->data->end;
924 }
925
926 if (iter->data->k == iter->data->end)
927 heap_pop(iter, unused, btree_iter_cmp);
928 else
929 heap_sift(iter, 0, btree_iter_cmp);
930 }
931
932 return ret;
933}
934
935struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
936 struct btree *b, ptr_filter_fn fn)
937{
938 struct bkey *ret;
939
940 do {
941 ret = bch_btree_iter_next(iter);
942 } while (ret && fn(b, ret));
943
944 return ret;
945}
946
Kent Overstreetcafe5632013-03-23 16:11:31 -0700947/* Mergesort */
948
Kent Overstreet84786432013-09-23 23:17:35 -0700949static void sort_key_next(struct btree_iter *iter,
950 struct btree_iter_set *i)
951{
952 i->k = bkey_next(i->k);
953
954 if (i->k == i->end)
955 *i = iter->data[--iter->used];
956}
957
Kent Overstreetcafe5632013-03-23 16:11:31 -0700958static void btree_sort_fixup(struct btree_iter *iter)
959{
960 while (iter->used > 1) {
961 struct btree_iter_set *top = iter->data, *i = top + 1;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700962
963 if (iter->used > 2 &&
964 btree_iter_cmp(i[0], i[1]))
965 i++;
966
Kent Overstreet84786432013-09-23 23:17:35 -0700967 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700968 break;
969
Kent Overstreet84786432013-09-23 23:17:35 -0700970 if (!KEY_SIZE(i->k)) {
971 sort_key_next(iter, i);
972 heap_sift(iter, i - top, btree_iter_cmp);
973 continue;
974 }
975
976 if (top->k > i->k) {
977 if (bkey_cmp(top->k, i->k) >= 0)
978 sort_key_next(iter, i);
979 else
980 bch_cut_front(top->k, i->k);
981
982 heap_sift(iter, i - top, btree_iter_cmp);
983 } else {
984 /* can't happen because of comparison func */
985 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
986 bch_cut_back(&START_KEY(i->k), top->k);
987 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700988 }
989}
990
991static void btree_mergesort(struct btree *b, struct bset *out,
992 struct btree_iter *iter,
993 bool fixup, bool remove_stale)
994{
995 struct bkey *k, *last = NULL;
996 bool (*bad)(struct btree *, const struct bkey *) = remove_stale
997 ? bch_ptr_bad
998 : bch_ptr_invalid;
999
1000 while (!btree_iter_end(iter)) {
1001 if (fixup && !b->level)
1002 btree_sort_fixup(iter);
1003
1004 k = bch_btree_iter_next(iter);
1005 if (bad(b, k))
1006 continue;
1007
1008 if (!last) {
1009 last = out->start;
1010 bkey_copy(last, k);
1011 } else if (b->level ||
1012 !bch_bkey_try_merge(b, last, k)) {
1013 last = bkey_next(last);
1014 bkey_copy(last, k);
1015 }
1016 }
1017
1018 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1019
1020 pr_debug("sorted %i keys", out->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001021}
1022
1023static void __btree_sort(struct btree *b, struct btree_iter *iter,
1024 unsigned start, unsigned order, bool fixup)
1025{
1026 uint64_t start_time;
1027 bool remove_stale = !b->written;
1028 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
1029 order);
1030 if (!out) {
1031 mutex_lock(&b->c->sort_lock);
1032 out = b->c->sort;
1033 order = ilog2(bucket_pages(b->c));
1034 }
1035
1036 start_time = local_clock();
1037
1038 btree_mergesort(b, out, iter, fixup, remove_stale);
1039 b->nsets = start;
1040
1041 if (!fixup && !start && b->written)
1042 bch_btree_verify(b, out);
1043
1044 if (!start && order == b->page_order) {
1045 /*
1046 * Our temporary buffer is the same size as the btree node's
1047 * buffer, we can just swap buffers instead of doing a big
1048 * memcpy()
1049 */
1050
Kent Overstreet81ab4192013-10-31 15:46:42 -07001051 out->magic = bset_magic(&b->c->sb);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001052 out->seq = b->sets[0].data->seq;
1053 out->version = b->sets[0].data->version;
1054 swap(out, b->sets[0].data);
1055
1056 if (b->c->sort == b->sets[0].data)
1057 b->c->sort = out;
1058 } else {
1059 b->sets[start].data->keys = out->keys;
1060 memcpy(b->sets[start].data->start, out->start,
1061 (void *) end(out) - (void *) out->start);
1062 }
1063
1064 if (out == b->c->sort)
1065 mutex_unlock(&b->c->sort_lock);
1066 else
1067 free_pages((unsigned long) out, order);
1068
1069 if (b->written)
1070 bset_build_written_tree(b);
1071
Kent Overstreet65d22e92013-07-31 00:03:54 -07001072 if (!start)
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001073 bch_time_stats_update(&b->c->sort_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001074}
1075
1076void bch_btree_sort_partial(struct btree *b, unsigned start)
1077{
Kent Overstreet280481d2013-10-24 16:36:03 -07001078 size_t order = b->page_order, keys = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001079 struct btree_iter iter;
Kent Overstreet280481d2013-10-24 16:36:03 -07001080 int oldsize = bch_count_data(b);
1081
Kent Overstreetcafe5632013-03-23 16:11:31 -07001082 __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
1083
1084 BUG_ON(b->sets[b->nsets].data == write_block(b) &&
1085 (b->sets[b->nsets].size || b->nsets));
1086
Kent Overstreetcafe5632013-03-23 16:11:31 -07001087
1088 if (start) {
1089 unsigned i;
1090
1091 for (i = start; i <= b->nsets; i++)
1092 keys += b->sets[i].data->keys;
1093
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001094 order = roundup_pow_of_two(__set_bytes(b->sets->data,
1095 keys)) / PAGE_SIZE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001096 if (order)
1097 order = ilog2(order);
1098 }
1099
1100 __btree_sort(b, &iter, start, order, false);
1101
Kent Overstreet280481d2013-10-24 16:36:03 -07001102 EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001103}
1104
1105void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
1106{
1107 BUG_ON(!b->written);
1108 __btree_sort(b, iter, 0, b->page_order, true);
1109}
1110
1111void bch_btree_sort_into(struct btree *b, struct btree *new)
1112{
1113 uint64_t start_time = local_clock();
1114
1115 struct btree_iter iter;
1116 bch_btree_iter_init(b, &iter, NULL);
1117
1118 btree_mergesort(b, new->sets->data, &iter, false, true);
1119
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001120 bch_time_stats_update(&b->c->sort_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001121
1122 bkey_copy_key(&new->key, &b->key);
1123 new->sets->size = 0;
1124}
1125
Kent Overstreet6ded34d2013-05-11 15:59:37 -07001126#define SORT_CRIT (4096 / sizeof(uint64_t))
1127
Kent Overstreetcafe5632013-03-23 16:11:31 -07001128void bch_btree_sort_lazy(struct btree *b)
1129{
Kent Overstreet6ded34d2013-05-11 15:59:37 -07001130 unsigned crit = SORT_CRIT;
1131 int i;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001132
Kent Overstreet6ded34d2013-05-11 15:59:37 -07001133 /* Don't sort if nothing to do */
1134 if (!b->nsets)
1135 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001136
Kent Overstreet6ded34d2013-05-11 15:59:37 -07001137 /* If not a leaf node, always sort */
1138 if (b->level) {
1139 bch_btree_sort(b);
1140 return;
1141 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001142
Kent Overstreet6ded34d2013-05-11 15:59:37 -07001143 for (i = b->nsets - 1; i >= 0; --i) {
1144 crit *= b->c->sort_crit_factor;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001145
Kent Overstreet6ded34d2013-05-11 15:59:37 -07001146 if (b->sets[i].data->keys < crit) {
1147 bch_btree_sort_partial(b, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001148 return;
1149 }
1150 }
1151
Kent Overstreet6ded34d2013-05-11 15:59:37 -07001152 /* Sort if we'd overflow */
1153 if (b->nsets + 1 == MAX_BSETS) {
1154 bch_btree_sort(b);
1155 return;
1156 }
1157
1158out:
Kent Overstreetcafe5632013-03-23 16:11:31 -07001159 bset_build_written_tree(b);
1160}
1161
1162/* Sysfs stuff */
1163
1164struct bset_stats {
Kent Overstreet48dad8b2013-09-10 18:48:51 -07001165 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001166 size_t nodes;
1167 size_t sets_written, sets_unwritten;
1168 size_t bytes_written, bytes_unwritten;
1169 size_t floats, failed;
1170};
1171
Kent Overstreet48dad8b2013-09-10 18:48:51 -07001172static int btree_bset_stats(struct btree_op *op, struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001173{
Kent Overstreet48dad8b2013-09-10 18:48:51 -07001174 struct bset_stats *stats = container_of(op, struct bset_stats, op);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001175 unsigned i;
1176
1177 stats->nodes++;
1178
1179 for (i = 0; i <= b->nsets; i++) {
1180 struct bset_tree *t = &b->sets[i];
1181 size_t bytes = t->data->keys * sizeof(uint64_t);
1182 size_t j;
1183
1184 if (bset_written(b, t)) {
1185 stats->sets_written++;
1186 stats->bytes_written += bytes;
1187
1188 stats->floats += t->size - 1;
1189
1190 for (j = 1; j < t->size; j++)
1191 if (t->tree[j].exponent == 127)
1192 stats->failed++;
1193 } else {
1194 stats->sets_unwritten++;
1195 stats->bytes_unwritten += bytes;
1196 }
1197 }
1198
Kent Overstreet48dad8b2013-09-10 18:48:51 -07001199 return MAP_CONTINUE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001200}
1201
1202int bch_bset_print_stats(struct cache_set *c, char *buf)
1203{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001204 struct bset_stats t;
1205 int ret;
1206
Kent Overstreetcafe5632013-03-23 16:11:31 -07001207 memset(&t, 0, sizeof(struct bset_stats));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001208 bch_btree_op_init(&t.op, -1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001209
Kent Overstreet48dad8b2013-09-10 18:48:51 -07001210 ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
1211 if (ret < 0)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001212 return ret;
1213
1214 return snprintf(buf, PAGE_SIZE,
1215 "btree nodes: %zu\n"
1216 "written sets: %zu\n"
1217 "unwritten sets: %zu\n"
1218 "written key bytes: %zu\n"
1219 "unwritten key bytes: %zu\n"
1220 "floats: %zu\n"
1221 "failed: %zu\n",
1222 t.nodes,
1223 t.sets_written, t.sets_unwritten,
1224 t.bytes_written, t.bytes_unwritten,
1225 t.floats, t.failed);
1226}