Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> |
| 3 | * |
| 4 | * Uses a block device as cache for other block devices; optimized for SSDs. |
| 5 | * All allocation is done in buckets, which should match the erase block size |
| 6 | * of the device. |
| 7 | * |
| 8 | * Buckets containing cached data are kept on a heap sorted by priority; |
| 9 | * bucket priority is increased on cache hit, and periodically all the buckets |
| 10 | * on the heap have their priority scaled down. This currently is just used as |
| 11 | * an LRU but in the future should allow for more intelligent heuristics. |
| 12 | * |
| 13 | * Buckets have an 8 bit counter; freeing is accomplished by incrementing the |
| 14 | * counter. Garbage collection is used to remove stale pointers. |
| 15 | * |
| 16 | * Indexing is done via a btree; nodes are not necessarily fully sorted, rather |
| 17 | * as keys are inserted we only sort the pages that have not yet been written. |
| 18 | * When garbage collection is run, we resort the entire node. |
| 19 | * |
| 20 | * All configuration is done via sysfs; see Documentation/bcache.txt. |
| 21 | */ |
| 22 | |
| 23 | #include "bcache.h" |
| 24 | #include "btree.h" |
| 25 | #include "debug.h" |
| 26 | #include "extents.h" |
| 27 | #include "writeback.h" |
| 28 | |
| 29 | static void sort_key_next(struct btree_iter *iter, |
| 30 | struct btree_iter_set *i) |
| 31 | { |
| 32 | i->k = bkey_next(i->k); |
| 33 | |
| 34 | if (i->k == i->end) |
| 35 | *i = iter->data[--iter->used]; |
| 36 | } |
| 37 | |
| 38 | static bool bch_key_sort_cmp(struct btree_iter_set l, |
| 39 | struct btree_iter_set r) |
| 40 | { |
| 41 | int64_t c = bkey_cmp(l.k, r.k); |
| 42 | |
| 43 | return c ? c > 0 : l.k < r.k; |
| 44 | } |
| 45 | |
| 46 | static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) |
| 47 | { |
| 48 | unsigned i; |
| 49 | |
| 50 | for (i = 0; i < KEY_PTRS(k); i++) |
| 51 | if (ptr_available(c, k, i)) { |
| 52 | struct cache *ca = PTR_CACHE(c, k, i); |
| 53 | size_t bucket = PTR_BUCKET_NR(c, k, i); |
| 54 | size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); |
| 55 | |
| 56 | if (KEY_SIZE(k) + r > c->sb.bucket_size || |
| 57 | bucket < ca->sb.first_bucket || |
| 58 | bucket >= ca->sb.nbuckets) |
| 59 | return true; |
| 60 | } |
| 61 | |
| 62 | return false; |
| 63 | } |
| 64 | |
Kent Overstreet | dc9d98d | 2013-12-17 23:47:33 -0800 | [diff] [blame] | 65 | /* Common among btree and extent ptrs */ |
| 66 | |
| 67 | static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) |
| 68 | { |
| 69 | unsigned i; |
| 70 | |
| 71 | for (i = 0; i < KEY_PTRS(k); i++) |
| 72 | if (ptr_available(c, k, i)) { |
| 73 | struct cache *ca = PTR_CACHE(c, k, i); |
| 74 | size_t bucket = PTR_BUCKET_NR(c, k, i); |
| 75 | size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); |
| 76 | |
| 77 | if (KEY_SIZE(k) + r > c->sb.bucket_size) |
| 78 | return "bad, length too big"; |
| 79 | if (bucket < ca->sb.first_bucket) |
| 80 | return "bad, short offset"; |
| 81 | if (bucket >= ca->sb.nbuckets) |
| 82 | return "bad, offset past end of device"; |
| 83 | if (ptr_stale(c, k, i)) |
| 84 | return "stale"; |
| 85 | } |
| 86 | |
| 87 | if (!bkey_cmp(k, &ZERO_KEY)) |
| 88 | return "bad, null key"; |
| 89 | if (!KEY_PTRS(k)) |
| 90 | return "bad, no pointers"; |
| 91 | if (!KEY_SIZE(k)) |
| 92 | return "zeroed key"; |
| 93 | return ""; |
| 94 | } |
| 95 | |
| 96 | void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) |
| 97 | { |
| 98 | unsigned i = 0; |
| 99 | char *out = buf, *end = buf + size; |
| 100 | |
| 101 | #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) |
| 102 | |
| 103 | p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); |
| 104 | |
| 105 | for (i = 0; i < KEY_PTRS(k); i++) { |
| 106 | if (i) |
| 107 | p(", "); |
| 108 | |
| 109 | if (PTR_DEV(k, i) == PTR_CHECK_DEV) |
| 110 | p("check dev"); |
| 111 | else |
| 112 | p("%llu:%llu gen %llu", PTR_DEV(k, i), |
| 113 | PTR_OFFSET(k, i), PTR_GEN(k, i)); |
| 114 | } |
| 115 | |
| 116 | p("]"); |
| 117 | |
| 118 | if (KEY_DIRTY(k)) |
| 119 | p(" dirty"); |
| 120 | if (KEY_CSUM(k)) |
| 121 | p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); |
| 122 | #undef p |
| 123 | } |
| 124 | |
| 125 | static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) |
| 126 | { |
| 127 | struct btree *b = container_of(keys, struct btree, keys); |
| 128 | unsigned j; |
| 129 | char buf[80]; |
| 130 | |
| 131 | bch_extent_to_text(buf, sizeof(buf), k); |
| 132 | printk(" %s", buf); |
| 133 | |
| 134 | for (j = 0; j < KEY_PTRS(k); j++) { |
| 135 | size_t n = PTR_BUCKET_NR(b->c, k, j); |
| 136 | printk(" bucket %zu", n); |
| 137 | |
| 138 | if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) |
| 139 | printk(" prio %i", |
| 140 | PTR_BUCKET(b->c, k, j)->prio); |
| 141 | } |
| 142 | |
| 143 | printk(" %s\n", bch_ptr_status(b->c, k)); |
| 144 | } |
| 145 | |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 146 | /* Btree ptrs */ |
| 147 | |
| 148 | bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) |
| 149 | { |
| 150 | char buf[80]; |
| 151 | |
| 152 | if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) |
| 153 | goto bad; |
| 154 | |
| 155 | if (__ptr_invalid(c, k)) |
| 156 | goto bad; |
| 157 | |
| 158 | return false; |
| 159 | bad: |
Kent Overstreet | dc9d98d | 2013-12-17 23:47:33 -0800 | [diff] [blame] | 160 | bch_extent_to_text(buf, sizeof(buf), k); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 161 | cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); |
| 162 | return true; |
| 163 | } |
| 164 | |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 165 | static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 166 | { |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 167 | struct btree *b = container_of(bk, struct btree, keys); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 168 | return __bch_btree_ptr_invalid(b->c, k); |
| 169 | } |
| 170 | |
| 171 | static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) |
| 172 | { |
| 173 | unsigned i; |
| 174 | char buf[80]; |
| 175 | struct bucket *g; |
| 176 | |
| 177 | if (mutex_trylock(&b->c->bucket_lock)) { |
| 178 | for (i = 0; i < KEY_PTRS(k); i++) |
| 179 | if (ptr_available(b->c, k, i)) { |
| 180 | g = PTR_BUCKET(b->c, k, i); |
| 181 | |
| 182 | if (KEY_DIRTY(k) || |
| 183 | g->prio != BTREE_PRIO || |
| 184 | (b->c->gc_mark_valid && |
| 185 | GC_MARK(g) != GC_MARK_METADATA)) |
| 186 | goto err; |
| 187 | } |
| 188 | |
| 189 | mutex_unlock(&b->c->bucket_lock); |
| 190 | } |
| 191 | |
| 192 | return false; |
| 193 | err: |
| 194 | mutex_unlock(&b->c->bucket_lock); |
Kent Overstreet | dc9d98d | 2013-12-17 23:47:33 -0800 | [diff] [blame] | 195 | bch_extent_to_text(buf, sizeof(buf), k); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 196 | btree_bug(b, |
| 197 | "inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", |
| 198 | buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), |
| 199 | g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); |
| 200 | return true; |
| 201 | } |
| 202 | |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 203 | static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 204 | { |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 205 | struct btree *b = container_of(bk, struct btree, keys); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 206 | unsigned i; |
| 207 | |
| 208 | if (!bkey_cmp(k, &ZERO_KEY) || |
| 209 | !KEY_PTRS(k) || |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 210 | bch_ptr_invalid(bk, k)) |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 211 | return true; |
| 212 | |
| 213 | for (i = 0; i < KEY_PTRS(k); i++) |
| 214 | if (!ptr_available(b->c, k, i) || |
| 215 | ptr_stale(b->c, k, i)) |
| 216 | return true; |
| 217 | |
| 218 | if (expensive_debug_checks(b->c) && |
| 219 | btree_ptr_bad_expensive(b, k)) |
| 220 | return true; |
| 221 | |
| 222 | return false; |
| 223 | } |
| 224 | |
Kent Overstreet | 829a60b | 2013-11-11 17:02:31 -0800 | [diff] [blame^] | 225 | static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk, |
| 226 | struct bkey *insert, |
| 227 | struct btree_iter *iter, |
| 228 | struct bkey *replace_key) |
| 229 | { |
| 230 | struct btree *b = container_of(bk, struct btree, keys); |
| 231 | |
| 232 | if (!KEY_OFFSET(insert)) |
| 233 | btree_current_write(b)->prio_blocked++; |
| 234 | |
| 235 | return false; |
| 236 | } |
| 237 | |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 238 | const struct btree_keys_ops bch_btree_keys_ops = { |
| 239 | .sort_cmp = bch_key_sort_cmp, |
Kent Overstreet | 829a60b | 2013-11-11 17:02:31 -0800 | [diff] [blame^] | 240 | .insert_fixup = bch_btree_ptr_insert_fixup, |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 241 | .key_invalid = bch_btree_ptr_invalid, |
| 242 | .key_bad = bch_btree_ptr_bad, |
Kent Overstreet | dc9d98d | 2013-12-17 23:47:33 -0800 | [diff] [blame] | 243 | .key_to_text = bch_extent_to_text, |
| 244 | .key_dump = bch_bkey_dump, |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 245 | }; |
| 246 | |
| 247 | /* Extents */ |
| 248 | |
| 249 | /* |
| 250 | * Returns true if l > r - unless l == r, in which case returns true if l is |
| 251 | * older than r. |
| 252 | * |
| 253 | * Necessary for btree_sort_fixup() - if there are multiple keys that compare |
| 254 | * equal in different sets, we have to process them newest to oldest. |
| 255 | */ |
| 256 | static bool bch_extent_sort_cmp(struct btree_iter_set l, |
| 257 | struct btree_iter_set r) |
| 258 | { |
| 259 | int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); |
| 260 | |
| 261 | return c ? c > 0 : l.k < r.k; |
| 262 | } |
| 263 | |
| 264 | static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, |
| 265 | struct bkey *tmp) |
| 266 | { |
| 267 | while (iter->used > 1) { |
| 268 | struct btree_iter_set *top = iter->data, *i = top + 1; |
| 269 | |
| 270 | if (iter->used > 2 && |
| 271 | bch_extent_sort_cmp(i[0], i[1])) |
| 272 | i++; |
| 273 | |
| 274 | if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) |
| 275 | break; |
| 276 | |
| 277 | if (!KEY_SIZE(i->k)) { |
| 278 | sort_key_next(iter, i); |
| 279 | heap_sift(iter, i - top, bch_extent_sort_cmp); |
| 280 | continue; |
| 281 | } |
| 282 | |
| 283 | if (top->k > i->k) { |
| 284 | if (bkey_cmp(top->k, i->k) >= 0) |
| 285 | sort_key_next(iter, i); |
| 286 | else |
| 287 | bch_cut_front(top->k, i->k); |
| 288 | |
| 289 | heap_sift(iter, i - top, bch_extent_sort_cmp); |
| 290 | } else { |
| 291 | /* can't happen because of comparison func */ |
| 292 | BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); |
| 293 | |
| 294 | if (bkey_cmp(i->k, top->k) < 0) { |
| 295 | bkey_copy(tmp, top->k); |
| 296 | |
| 297 | bch_cut_back(&START_KEY(i->k), tmp); |
| 298 | bch_cut_front(i->k, top->k); |
| 299 | heap_sift(iter, 0, bch_extent_sort_cmp); |
| 300 | |
| 301 | return tmp; |
| 302 | } else { |
| 303 | bch_cut_back(&START_KEY(i->k), top->k); |
| 304 | } |
| 305 | } |
| 306 | } |
| 307 | |
| 308 | return NULL; |
| 309 | } |
| 310 | |
Kent Overstreet | 829a60b | 2013-11-11 17:02:31 -0800 | [diff] [blame^] | 311 | static bool bch_extent_insert_fixup(struct btree_keys *b, |
| 312 | struct bkey *insert, |
| 313 | struct btree_iter *iter, |
| 314 | struct bkey *replace_key) |
| 315 | { |
| 316 | struct cache_set *c = container_of(b, struct btree, keys)->c; |
| 317 | |
| 318 | void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) |
| 319 | { |
| 320 | if (KEY_DIRTY(k)) |
| 321 | bcache_dev_sectors_dirty_add(c, KEY_INODE(k), |
| 322 | offset, -sectors); |
| 323 | } |
| 324 | |
| 325 | uint64_t old_offset; |
| 326 | unsigned old_size, sectors_found = 0; |
| 327 | |
| 328 | BUG_ON(!KEY_OFFSET(insert)); |
| 329 | BUG_ON(!KEY_SIZE(insert)); |
| 330 | |
| 331 | while (1) { |
| 332 | struct bkey *k = bch_btree_iter_next(iter); |
| 333 | if (!k) |
| 334 | break; |
| 335 | |
| 336 | if (bkey_cmp(&START_KEY(k), insert) >= 0) { |
| 337 | if (KEY_SIZE(k)) |
| 338 | break; |
| 339 | else |
| 340 | continue; |
| 341 | } |
| 342 | |
| 343 | if (bkey_cmp(k, &START_KEY(insert)) <= 0) |
| 344 | continue; |
| 345 | |
| 346 | old_offset = KEY_START(k); |
| 347 | old_size = KEY_SIZE(k); |
| 348 | |
| 349 | /* |
| 350 | * We might overlap with 0 size extents; we can't skip these |
| 351 | * because if they're in the set we're inserting to we have to |
| 352 | * adjust them so they don't overlap with the key we're |
| 353 | * inserting. But we don't want to check them for replace |
| 354 | * operations. |
| 355 | */ |
| 356 | |
| 357 | if (replace_key && KEY_SIZE(k)) { |
| 358 | /* |
| 359 | * k might have been split since we inserted/found the |
| 360 | * key we're replacing |
| 361 | */ |
| 362 | unsigned i; |
| 363 | uint64_t offset = KEY_START(k) - |
| 364 | KEY_START(replace_key); |
| 365 | |
| 366 | /* But it must be a subset of the replace key */ |
| 367 | if (KEY_START(k) < KEY_START(replace_key) || |
| 368 | KEY_OFFSET(k) > KEY_OFFSET(replace_key)) |
| 369 | goto check_failed; |
| 370 | |
| 371 | /* We didn't find a key that we were supposed to */ |
| 372 | if (KEY_START(k) > KEY_START(insert) + sectors_found) |
| 373 | goto check_failed; |
| 374 | |
| 375 | if (KEY_PTRS(k) != KEY_PTRS(replace_key) || |
| 376 | KEY_DIRTY(k) != KEY_DIRTY(replace_key)) |
| 377 | goto check_failed; |
| 378 | |
| 379 | /* skip past gen */ |
| 380 | offset <<= 8; |
| 381 | |
| 382 | BUG_ON(!KEY_PTRS(replace_key)); |
| 383 | |
| 384 | for (i = 0; i < KEY_PTRS(replace_key); i++) |
| 385 | if (k->ptr[i] != replace_key->ptr[i] + offset) |
| 386 | goto check_failed; |
| 387 | |
| 388 | sectors_found = KEY_OFFSET(k) - KEY_START(insert); |
| 389 | } |
| 390 | |
| 391 | if (bkey_cmp(insert, k) < 0 && |
| 392 | bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { |
| 393 | /* |
| 394 | * We overlapped in the middle of an existing key: that |
| 395 | * means we have to split the old key. But we have to do |
| 396 | * slightly different things depending on whether the |
| 397 | * old key has been written out yet. |
| 398 | */ |
| 399 | |
| 400 | struct bkey *top; |
| 401 | |
| 402 | subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); |
| 403 | |
| 404 | if (bkey_written(b, k)) { |
| 405 | /* |
| 406 | * We insert a new key to cover the top of the |
| 407 | * old key, and the old key is modified in place |
| 408 | * to represent the bottom split. |
| 409 | * |
| 410 | * It's completely arbitrary whether the new key |
| 411 | * is the top or the bottom, but it has to match |
| 412 | * up with what btree_sort_fixup() does - it |
| 413 | * doesn't check for this kind of overlap, it |
| 414 | * depends on us inserting a new key for the top |
| 415 | * here. |
| 416 | */ |
| 417 | top = bch_bset_search(b, bset_tree_last(b), |
| 418 | insert); |
| 419 | bch_bset_insert(b, top, k); |
| 420 | } else { |
| 421 | BKEY_PADDED(key) temp; |
| 422 | bkey_copy(&temp.key, k); |
| 423 | bch_bset_insert(b, k, &temp.key); |
| 424 | top = bkey_next(k); |
| 425 | } |
| 426 | |
| 427 | bch_cut_front(insert, top); |
| 428 | bch_cut_back(&START_KEY(insert), k); |
| 429 | bch_bset_fix_invalidated_key(b, k); |
| 430 | goto out; |
| 431 | } |
| 432 | |
| 433 | if (bkey_cmp(insert, k) < 0) { |
| 434 | bch_cut_front(insert, k); |
| 435 | } else { |
| 436 | if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) |
| 437 | old_offset = KEY_START(insert); |
| 438 | |
| 439 | if (bkey_written(b, k) && |
| 440 | bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { |
| 441 | /* |
| 442 | * Completely overwrote, so we don't have to |
| 443 | * invalidate the binary search tree |
| 444 | */ |
| 445 | bch_cut_front(k, k); |
| 446 | } else { |
| 447 | __bch_cut_back(&START_KEY(insert), k); |
| 448 | bch_bset_fix_invalidated_key(b, k); |
| 449 | } |
| 450 | } |
| 451 | |
| 452 | subtract_dirty(k, old_offset, old_size - KEY_SIZE(k)); |
| 453 | } |
| 454 | |
| 455 | check_failed: |
| 456 | if (replace_key) { |
| 457 | if (!sectors_found) { |
| 458 | return true; |
| 459 | } else if (sectors_found < KEY_SIZE(insert)) { |
| 460 | SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - |
| 461 | (KEY_SIZE(insert) - sectors_found)); |
| 462 | SET_KEY_SIZE(insert, sectors_found); |
| 463 | } |
| 464 | } |
| 465 | out: |
| 466 | if (KEY_DIRTY(insert)) |
| 467 | bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), |
| 468 | KEY_START(insert), |
| 469 | KEY_SIZE(insert)); |
| 470 | |
| 471 | return false; |
| 472 | } |
| 473 | |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 474 | static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 475 | { |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 476 | struct btree *b = container_of(bk, struct btree, keys); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 477 | char buf[80]; |
| 478 | |
| 479 | if (!KEY_SIZE(k)) |
| 480 | return true; |
| 481 | |
| 482 | if (KEY_SIZE(k) > KEY_OFFSET(k)) |
| 483 | goto bad; |
| 484 | |
| 485 | if (__ptr_invalid(b->c, k)) |
| 486 | goto bad; |
| 487 | |
| 488 | return false; |
| 489 | bad: |
Kent Overstreet | dc9d98d | 2013-12-17 23:47:33 -0800 | [diff] [blame] | 490 | bch_extent_to_text(buf, sizeof(buf), k); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 491 | cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k)); |
| 492 | return true; |
| 493 | } |
| 494 | |
| 495 | static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, |
| 496 | unsigned ptr) |
| 497 | { |
| 498 | struct bucket *g = PTR_BUCKET(b->c, k, ptr); |
| 499 | char buf[80]; |
| 500 | |
| 501 | if (mutex_trylock(&b->c->bucket_lock)) { |
| 502 | if (b->c->gc_mark_valid && |
| 503 | ((GC_MARK(g) != GC_MARK_DIRTY && |
| 504 | KEY_DIRTY(k)) || |
| 505 | GC_MARK(g) == GC_MARK_METADATA)) |
| 506 | goto err; |
| 507 | |
| 508 | if (g->prio == BTREE_PRIO) |
| 509 | goto err; |
| 510 | |
| 511 | mutex_unlock(&b->c->bucket_lock); |
| 512 | } |
| 513 | |
| 514 | return false; |
| 515 | err: |
| 516 | mutex_unlock(&b->c->bucket_lock); |
Kent Overstreet | dc9d98d | 2013-12-17 23:47:33 -0800 | [diff] [blame] | 517 | bch_extent_to_text(buf, sizeof(buf), k); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 518 | btree_bug(b, |
| 519 | "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", |
| 520 | buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), |
| 521 | g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); |
| 522 | return true; |
| 523 | } |
| 524 | |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 525 | static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 526 | { |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 527 | struct btree *b = container_of(bk, struct btree, keys); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 528 | struct bucket *g; |
| 529 | unsigned i, stale; |
| 530 | |
| 531 | if (!KEY_PTRS(k) || |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 532 | bch_extent_invalid(bk, k)) |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 533 | return true; |
| 534 | |
| 535 | for (i = 0; i < KEY_PTRS(k); i++) |
| 536 | if (!ptr_available(b->c, k, i)) |
| 537 | return true; |
| 538 | |
| 539 | if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) |
| 540 | return false; |
| 541 | |
| 542 | for (i = 0; i < KEY_PTRS(k); i++) { |
| 543 | g = PTR_BUCKET(b->c, k, i); |
| 544 | stale = ptr_stale(b->c, k, i); |
| 545 | |
| 546 | btree_bug_on(stale > 96, b, |
| 547 | "key too stale: %i, need_gc %u", |
| 548 | stale, b->c->need_gc); |
| 549 | |
| 550 | btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), |
| 551 | b, "stale dirty pointer"); |
| 552 | |
| 553 | if (stale) |
| 554 | return true; |
| 555 | |
| 556 | if (expensive_debug_checks(b->c) && |
| 557 | bch_extent_bad_expensive(b, k, i)) |
| 558 | return true; |
| 559 | } |
| 560 | |
| 561 | return false; |
| 562 | } |
| 563 | |
| 564 | static uint64_t merge_chksums(struct bkey *l, struct bkey *r) |
| 565 | { |
| 566 | return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) & |
| 567 | ~((uint64_t)1 << 63); |
| 568 | } |
| 569 | |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 570 | static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 571 | { |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 572 | struct btree *b = container_of(bk, struct btree, keys); |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 573 | unsigned i; |
| 574 | |
| 575 | if (key_merging_disabled(b->c)) |
| 576 | return false; |
| 577 | |
| 578 | if (KEY_PTRS(l) != KEY_PTRS(r) || |
| 579 | KEY_DIRTY(l) != KEY_DIRTY(r) || |
| 580 | bkey_cmp(l, &START_KEY(r))) |
| 581 | return false; |
| 582 | |
| 583 | for (i = 0; i < KEY_PTRS(l); i++) |
| 584 | if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || |
| 585 | PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) |
| 586 | return false; |
| 587 | |
| 588 | /* Keys with no pointers aren't restricted to one bucket and could |
| 589 | * overflow KEY_SIZE |
| 590 | */ |
| 591 | if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) { |
| 592 | SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l)); |
| 593 | SET_KEY_SIZE(l, USHRT_MAX); |
| 594 | |
| 595 | bch_cut_front(l, r); |
| 596 | return false; |
| 597 | } |
| 598 | |
| 599 | if (KEY_CSUM(l)) { |
| 600 | if (KEY_CSUM(r)) |
| 601 | l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); |
| 602 | else |
| 603 | SET_KEY_CSUM(l, 0); |
| 604 | } |
| 605 | |
| 606 | SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r)); |
| 607 | SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r)); |
| 608 | |
| 609 | return true; |
| 610 | } |
| 611 | |
| 612 | const struct btree_keys_ops bch_extent_keys_ops = { |
| 613 | .sort_cmp = bch_extent_sort_cmp, |
| 614 | .sort_fixup = bch_extent_sort_fixup, |
Kent Overstreet | 829a60b | 2013-11-11 17:02:31 -0800 | [diff] [blame^] | 615 | .insert_fixup = bch_extent_insert_fixup, |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 616 | .key_invalid = bch_extent_invalid, |
| 617 | .key_bad = bch_extent_bad, |
| 618 | .key_merge = bch_extent_merge, |
Kent Overstreet | dc9d98d | 2013-12-17 23:47:33 -0800 | [diff] [blame] | 619 | .key_to_text = bch_extent_to_text, |
| 620 | .key_dump = bch_bkey_dump, |
Kent Overstreet | 65d4523 | 2013-12-20 17:22:05 -0800 | [diff] [blame] | 621 | .is_extents = true, |
| 622 | }; |