Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1 | #ifndef _BCACHE_H |
| 2 | #define _BCACHE_H |
| 3 | |
| 4 | /* |
| 5 | * SOME HIGH LEVEL CODE DOCUMENTATION: |
| 6 | * |
| 7 | * Bcache mostly works with cache sets, cache devices, and backing devices. |
| 8 | * |
| 9 | * Support for multiple cache devices hasn't quite been finished off yet, but |
| 10 | * it's about 95% plumbed through. A cache set and its cache devices is sort of |
| 11 | * like a md raid array and its component devices. Most of the code doesn't care |
| 12 | * about individual cache devices, the main abstraction is the cache set. |
| 13 | * |
| 14 | * Multiple cache devices is intended to give us the ability to mirror dirty |
| 15 | * cached data and metadata, without mirroring clean cached data. |
| 16 | * |
| 17 | * Backing devices are different, in that they have a lifetime independent of a |
| 18 | * cache set. When you register a newly formatted backing device it'll come up |
| 19 | * in passthrough mode, and then you can attach and detach a backing device from |
| 20 | * a cache set at runtime - while it's mounted and in use. Detaching implicitly |
| 21 | * invalidates any cached data for that backing device. |
| 22 | * |
| 23 | * A cache set can have multiple (many) backing devices attached to it. |
| 24 | * |
| 25 | * There's also flash only volumes - this is the reason for the distinction |
| 26 | * between struct cached_dev and struct bcache_device. A flash only volume |
| 27 | * works much like a bcache device that has a backing device, except the |
| 28 | * "cached" data is always dirty. The end result is that we get thin |
| 29 | * provisioning with very little additional code. |
| 30 | * |
| 31 | * Flash only volumes work but they're not production ready because the moving |
| 32 | * garbage collector needs more work. More on that later. |
| 33 | * |
| 34 | * BUCKETS/ALLOCATION: |
| 35 | * |
| 36 | * Bcache is primarily designed for caching, which means that in normal |
| 37 | * operation all of our available space will be allocated. Thus, we need an |
| 38 | * efficient way of deleting things from the cache so we can write new things to |
| 39 | * it. |
| 40 | * |
| 41 | * To do this, we first divide the cache device up into buckets. A bucket is the |
| 42 | * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ |
| 43 | * works efficiently. |
| 44 | * |
| 45 | * Each bucket has a 16 bit priority, and an 8 bit generation associated with |
| 46 | * it. The gens and priorities for all the buckets are stored contiguously and |
| 47 | * packed on disk (in a linked list of buckets - aside from the superblock, all |
| 48 | * of bcache's metadata is stored in buckets). |
| 49 | * |
| 50 | * The priority is used to implement an LRU. We reset a bucket's priority when |
| 51 | * we allocate it or on cache it, and every so often we decrement the priority |
| 52 | * of each bucket. It could be used to implement something more sophisticated, |
| 53 | * if anyone ever gets around to it. |
| 54 | * |
| 55 | * The generation is used for invalidating buckets. Each pointer also has an 8 |
| 56 | * bit generation embedded in it; for a pointer to be considered valid, its gen |
| 57 | * must match the gen of the bucket it points into. Thus, to reuse a bucket all |
| 58 | * we have to do is increment its gen (and write its new gen to disk; we batch |
| 59 | * this up). |
| 60 | * |
| 61 | * Bcache is entirely COW - we never write twice to a bucket, even buckets that |
| 62 | * contain metadata (including btree nodes). |
| 63 | * |
| 64 | * THE BTREE: |
| 65 | * |
| 66 | * Bcache is in large part design around the btree. |
| 67 | * |
| 68 | * At a high level, the btree is just an index of key -> ptr tuples. |
| 69 | * |
| 70 | * Keys represent extents, and thus have a size field. Keys also have a variable |
| 71 | * number of pointers attached to them (potentially zero, which is handy for |
| 72 | * invalidating the cache). |
| 73 | * |
| 74 | * The key itself is an inode:offset pair. The inode number corresponds to a |
| 75 | * backing device or a flash only volume. The offset is the ending offset of the |
| 76 | * extent within the inode - not the starting offset; this makes lookups |
| 77 | * slightly more convenient. |
| 78 | * |
| 79 | * Pointers contain the cache device id, the offset on that device, and an 8 bit |
| 80 | * generation number. More on the gen later. |
| 81 | * |
| 82 | * Index lookups are not fully abstracted - cache lookups in particular are |
| 83 | * still somewhat mixed in with the btree code, but things are headed in that |
| 84 | * direction. |
| 85 | * |
| 86 | * Updates are fairly well abstracted, though. There are two different ways of |
| 87 | * updating the btree; insert and replace. |
| 88 | * |
| 89 | * BTREE_INSERT will just take a list of keys and insert them into the btree - |
| 90 | * overwriting (possibly only partially) any extents they overlap with. This is |
| 91 | * used to update the index after a write. |
| 92 | * |
| 93 | * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is |
| 94 | * overwriting a key that matches another given key. This is used for inserting |
| 95 | * data into the cache after a cache miss, and for background writeback, and for |
| 96 | * the moving garbage collector. |
| 97 | * |
| 98 | * There is no "delete" operation; deleting things from the index is |
| 99 | * accomplished by either by invalidating pointers (by incrementing a bucket's |
| 100 | * gen) or by inserting a key with 0 pointers - which will overwrite anything |
| 101 | * previously present at that location in the index. |
| 102 | * |
| 103 | * This means that there are always stale/invalid keys in the btree. They're |
| 104 | * filtered out by the code that iterates through a btree node, and removed when |
| 105 | * a btree node is rewritten. |
| 106 | * |
| 107 | * BTREE NODES: |
| 108 | * |
| 109 | * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and |
| 110 | * free smaller than a bucket - so, that's how big our btree nodes are. |
| 111 | * |
| 112 | * (If buckets are really big we'll only use part of the bucket for a btree node |
| 113 | * - no less than 1/4th - but a bucket still contains no more than a single |
| 114 | * btree node. I'd actually like to change this, but for now we rely on the |
| 115 | * bucket's gen for deleting btree nodes when we rewrite/split a node.) |
| 116 | * |
| 117 | * Anyways, btree nodes are big - big enough to be inefficient with a textbook |
| 118 | * btree implementation. |
| 119 | * |
| 120 | * The way this is solved is that btree nodes are internally log structured; we |
| 121 | * can append new keys to an existing btree node without rewriting it. This |
| 122 | * means each set of keys we write is sorted, but the node is not. |
| 123 | * |
| 124 | * We maintain this log structure in memory - keeping 1Mb of keys sorted would |
| 125 | * be expensive, and we have to distinguish between the keys we have written and |
| 126 | * the keys we haven't. So to do a lookup in a btree node, we have to search |
| 127 | * each sorted set. But we do merge written sets together lazily, so the cost of |
| 128 | * these extra searches is quite low (normally most of the keys in a btree node |
| 129 | * will be in one big set, and then there'll be one or two sets that are much |
| 130 | * smaller). |
| 131 | * |
| 132 | * This log structure makes bcache's btree more of a hybrid between a |
| 133 | * conventional btree and a compacting data structure, with some of the |
| 134 | * advantages of both. |
| 135 | * |
| 136 | * GARBAGE COLLECTION: |
| 137 | * |
| 138 | * We can't just invalidate any bucket - it might contain dirty data or |
| 139 | * metadata. If it once contained dirty data, other writes might overwrite it |
| 140 | * later, leaving no valid pointers into that bucket in the index. |
| 141 | * |
| 142 | * Thus, the primary purpose of garbage collection is to find buckets to reuse. |
| 143 | * It also counts how much valid data it each bucket currently contains, so that |
| 144 | * allocation can reuse buckets sooner when they've been mostly overwritten. |
| 145 | * |
| 146 | * It also does some things that are really internal to the btree |
| 147 | * implementation. If a btree node contains pointers that are stale by more than |
| 148 | * some threshold, it rewrites the btree node to avoid the bucket's generation |
| 149 | * wrapping around. It also merges adjacent btree nodes if they're empty enough. |
| 150 | * |
| 151 | * THE JOURNAL: |
| 152 | * |
| 153 | * Bcache's journal is not necessary for consistency; we always strictly |
| 154 | * order metadata writes so that the btree and everything else is consistent on |
| 155 | * disk in the event of an unclean shutdown, and in fact bcache had writeback |
| 156 | * caching (with recovery from unclean shutdown) before journalling was |
| 157 | * implemented. |
| 158 | * |
| 159 | * Rather, the journal is purely a performance optimization; we can't complete a |
| 160 | * write until we've updated the index on disk, otherwise the cache would be |
| 161 | * inconsistent in the event of an unclean shutdown. This means that without the |
| 162 | * journal, on random write workloads we constantly have to update all the leaf |
| 163 | * nodes in the btree, and those writes will be mostly empty (appending at most |
| 164 | * a few keys each) - highly inefficient in terms of amount of metadata writes, |
| 165 | * and it puts more strain on the various btree resorting/compacting code. |
| 166 | * |
| 167 | * The journal is just a log of keys we've inserted; on startup we just reinsert |
| 168 | * all the keys in the open journal entries. That means that when we're updating |
| 169 | * a node in the btree, we can wait until a 4k block of keys fills up before |
| 170 | * writing them out. |
| 171 | * |
| 172 | * For simplicity, we only journal updates to leaf nodes; updates to parent |
| 173 | * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth |
| 174 | * the complexity to deal with journalling them (in particular, journal replay) |
| 175 | * - updates to non leaf nodes just happen synchronously (see btree_split()). |
| 176 | */ |
| 177 | |
| 178 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ |
| 179 | |
Kent Overstreet | 81ab419 | 2013-10-31 15:46:42 -0700 | [diff] [blame] | 180 | #include <linux/bcache.h> |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 181 | #include <linux/bio.h> |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 182 | #include <linux/kobject.h> |
| 183 | #include <linux/list.h> |
| 184 | #include <linux/mutex.h> |
| 185 | #include <linux/rbtree.h> |
| 186 | #include <linux/rwsem.h> |
| 187 | #include <linux/types.h> |
| 188 | #include <linux/workqueue.h> |
| 189 | |
Kent Overstreet | 67539e8 | 2013-09-10 22:53:34 -0700 | [diff] [blame] | 190 | #include "bset.h" |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 191 | #include "util.h" |
| 192 | #include "closure.h" |
| 193 | |
| 194 | struct bucket { |
| 195 | atomic_t pin; |
| 196 | uint16_t prio; |
| 197 | uint8_t gen; |
| 198 | uint8_t disk_gen; |
| 199 | uint8_t last_gc; /* Most out of date gen in the btree */ |
| 200 | uint8_t gc_gen; |
Nicholas Swenson | 981aa8c | 2013-11-07 17:53:19 -0800 | [diff] [blame] | 201 | uint16_t gc_mark; /* Bitfield used by GC. See below for field */ |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 202 | }; |
| 203 | |
| 204 | /* |
| 205 | * I'd use bitfields for these, but I don't trust the compiler not to screw me |
| 206 | * as multiple threads touch struct bucket without locking |
| 207 | */ |
| 208 | |
| 209 | BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); |
| 210 | #define GC_MARK_RECLAIMABLE 0 |
| 211 | #define GC_MARK_DIRTY 1 |
| 212 | #define GC_MARK_METADATA 2 |
Nicholas Swenson | 981aa8c | 2013-11-07 17:53:19 -0800 | [diff] [blame] | 213 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); |
| 214 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 215 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 216 | #include "journal.h" |
| 217 | #include "stats.h" |
| 218 | struct search; |
| 219 | struct btree; |
| 220 | struct keybuf; |
| 221 | |
| 222 | struct keybuf_key { |
| 223 | struct rb_node node; |
| 224 | BKEY_PADDED(key); |
| 225 | void *private; |
| 226 | }; |
| 227 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 228 | struct keybuf { |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 229 | struct bkey last_scanned; |
| 230 | spinlock_t lock; |
| 231 | |
| 232 | /* |
| 233 | * Beginning and end of range in rb tree - so that we can skip taking |
| 234 | * lock and checking the rb tree when we need to check for overlapping |
| 235 | * keys. |
| 236 | */ |
| 237 | struct bkey start; |
| 238 | struct bkey end; |
| 239 | |
| 240 | struct rb_root keys; |
| 241 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 242 | #define KEYBUF_NR 500 |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 243 | DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); |
| 244 | }; |
| 245 | |
| 246 | struct bio_split_pool { |
| 247 | struct bio_set *bio_split; |
| 248 | mempool_t *bio_split_hook; |
| 249 | }; |
| 250 | |
| 251 | struct bio_split_hook { |
| 252 | struct closure cl; |
| 253 | struct bio_split_pool *p; |
| 254 | struct bio *bio; |
| 255 | bio_end_io_t *bi_end_io; |
| 256 | void *bi_private; |
| 257 | }; |
| 258 | |
| 259 | struct bcache_device { |
| 260 | struct closure cl; |
| 261 | |
| 262 | struct kobject kobj; |
| 263 | |
| 264 | struct cache_set *c; |
| 265 | unsigned id; |
| 266 | #define BCACHEDEVNAME_SIZE 12 |
| 267 | char name[BCACHEDEVNAME_SIZE]; |
| 268 | |
| 269 | struct gendisk *disk; |
| 270 | |
Kent Overstreet | c4d951d | 2013-08-21 17:49:09 -0700 | [diff] [blame] | 271 | unsigned long flags; |
| 272 | #define BCACHE_DEV_CLOSING 0 |
| 273 | #define BCACHE_DEV_DETACHING 1 |
| 274 | #define BCACHE_DEV_UNLINK_DONE 2 |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 275 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 276 | unsigned nr_stripes; |
Kent Overstreet | 2d679fc | 2013-08-17 02:13:15 -0700 | [diff] [blame] | 277 | unsigned stripe_size; |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 278 | atomic_t *stripe_sectors_dirty; |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 279 | unsigned long *full_dirty_stripes; |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 280 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 281 | unsigned long sectors_dirty_last; |
| 282 | long sectors_dirty_derivative; |
| 283 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 284 | struct bio_set *bio_split; |
| 285 | |
| 286 | unsigned data_csum:1; |
| 287 | |
| 288 | int (*cache_miss)(struct btree *, struct search *, |
| 289 | struct bio *, unsigned); |
| 290 | int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); |
| 291 | |
| 292 | struct bio_split_pool bio_split_hook; |
| 293 | }; |
| 294 | |
| 295 | struct io { |
| 296 | /* Used to track sequential IO so it can be skipped */ |
| 297 | struct hlist_node hash; |
| 298 | struct list_head lru; |
| 299 | |
| 300 | unsigned long jiffies; |
| 301 | unsigned sequential; |
| 302 | sector_t last; |
| 303 | }; |
| 304 | |
| 305 | struct cached_dev { |
| 306 | struct list_head list; |
| 307 | struct bcache_device disk; |
| 308 | struct block_device *bdev; |
| 309 | |
| 310 | struct cache_sb sb; |
| 311 | struct bio sb_bio; |
| 312 | struct bio_vec sb_bv[1]; |
Kent Overstreet | cb7a583 | 2013-12-16 15:27:25 -0800 | [diff] [blame] | 313 | struct closure sb_write; |
| 314 | struct semaphore sb_write_mutex; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 315 | |
| 316 | /* Refcount on the cache set. Always nonzero when we're caching. */ |
| 317 | atomic_t count; |
| 318 | struct work_struct detach; |
| 319 | |
| 320 | /* |
| 321 | * Device might not be running if it's dirty and the cache set hasn't |
| 322 | * showed up yet. |
| 323 | */ |
| 324 | atomic_t running; |
| 325 | |
| 326 | /* |
| 327 | * Writes take a shared lock from start to finish; scanning for dirty |
| 328 | * data to refill the rb tree requires an exclusive lock. |
| 329 | */ |
| 330 | struct rw_semaphore writeback_lock; |
| 331 | |
| 332 | /* |
| 333 | * Nonzero, and writeback has a refcount (d->count), iff there is dirty |
| 334 | * data in the cache. Protected by writeback_lock; must have an |
| 335 | * shared lock to set and exclusive lock to clear. |
| 336 | */ |
| 337 | atomic_t has_dirty; |
| 338 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 339 | struct bch_ratelimit writeback_rate; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 340 | struct delayed_work writeback_rate_update; |
| 341 | |
| 342 | /* |
| 343 | * Internal to the writeback code, so read_dirty() can keep track of |
| 344 | * where it's at. |
| 345 | */ |
| 346 | sector_t last_read; |
| 347 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 348 | /* Limit number of writeback bios in flight */ |
| 349 | struct semaphore in_flight; |
Kent Overstreet | 5e6926d | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 350 | struct task_struct *writeback_thread; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 351 | |
| 352 | struct keybuf writeback_keys; |
| 353 | |
| 354 | /* For tracking sequential IO */ |
| 355 | #define RECENT_IO_BITS 7 |
| 356 | #define RECENT_IO (1 << RECENT_IO_BITS) |
| 357 | struct io io[RECENT_IO]; |
| 358 | struct hlist_head io_hash[RECENT_IO + 1]; |
| 359 | struct list_head io_lru; |
| 360 | spinlock_t io_lock; |
| 361 | |
| 362 | struct cache_accounting accounting; |
| 363 | |
| 364 | /* The rest of this all shows up in sysfs */ |
| 365 | unsigned sequential_cutoff; |
| 366 | unsigned readahead; |
| 367 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 368 | unsigned verify:1; |
Kent Overstreet | 5ceaaad | 2013-09-10 14:27:42 -0700 | [diff] [blame] | 369 | unsigned bypass_torture_test:1; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 370 | |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 371 | unsigned partial_stripes_expensive:1; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 372 | unsigned writeback_metadata:1; |
| 373 | unsigned writeback_running:1; |
| 374 | unsigned char writeback_percent; |
| 375 | unsigned writeback_delay; |
| 376 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 377 | uint64_t writeback_rate_target; |
Kent Overstreet | 16749c2 | 2013-11-11 13:58:34 -0800 | [diff] [blame] | 378 | int64_t writeback_rate_proportional; |
| 379 | int64_t writeback_rate_derivative; |
| 380 | int64_t writeback_rate_change; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 381 | |
| 382 | unsigned writeback_rate_update_seconds; |
| 383 | unsigned writeback_rate_d_term; |
| 384 | unsigned writeback_rate_p_term_inverse; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 385 | }; |
| 386 | |
Kent Overstreet | 7836541 | 2013-12-17 01:29:34 -0800 | [diff] [blame] | 387 | enum alloc_reserve { |
| 388 | RESERVE_BTREE, |
| 389 | RESERVE_PRIO, |
| 390 | RESERVE_MOVINGGC, |
| 391 | RESERVE_NONE, |
| 392 | RESERVE_NR, |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 393 | }; |
| 394 | |
| 395 | struct cache { |
| 396 | struct cache_set *set; |
| 397 | struct cache_sb sb; |
| 398 | struct bio sb_bio; |
| 399 | struct bio_vec sb_bv[1]; |
| 400 | |
| 401 | struct kobject kobj; |
| 402 | struct block_device *bdev; |
| 403 | |
Kent Overstreet | 119ba0f | 2013-04-24 19:01:12 -0700 | [diff] [blame] | 404 | struct task_struct *alloc_thread; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 405 | |
| 406 | struct closure prio; |
| 407 | struct prio_set *disk_buckets; |
| 408 | |
| 409 | /* |
| 410 | * When allocating new buckets, prio_write() gets first dibs - since we |
| 411 | * may not be allocate at all without writing priorities and gens. |
| 412 | * prio_buckets[] contains the last buckets we wrote priorities to (so |
| 413 | * gc can mark them as metadata), prio_next[] contains the buckets |
| 414 | * allocated for the next prio write. |
| 415 | */ |
| 416 | uint64_t *prio_buckets; |
| 417 | uint64_t *prio_last_buckets; |
| 418 | |
| 419 | /* |
| 420 | * free: Buckets that are ready to be used |
| 421 | * |
| 422 | * free_inc: Incoming buckets - these are buckets that currently have |
| 423 | * cached data in them, and we can't reuse them until after we write |
| 424 | * their new gen to disk. After prio_write() finishes writing the new |
| 425 | * gens/prios, they'll be moved to the free list (and possibly discarded |
| 426 | * in the process) |
| 427 | * |
| 428 | * unused: GC found nothing pointing into these buckets (possibly |
| 429 | * because all the data they contained was overwritten), so we only |
| 430 | * need to discard them before they can be moved to the free list. |
| 431 | */ |
Kent Overstreet | 7836541 | 2013-12-17 01:29:34 -0800 | [diff] [blame] | 432 | DECLARE_FIFO(long, free)[RESERVE_NR]; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 433 | DECLARE_FIFO(long, free_inc); |
| 434 | DECLARE_FIFO(long, unused); |
| 435 | |
| 436 | size_t fifo_last_bucket; |
| 437 | |
| 438 | /* Allocation stuff: */ |
| 439 | struct bucket *buckets; |
| 440 | |
| 441 | DECLARE_HEAP(struct bucket *, heap); |
| 442 | |
| 443 | /* |
| 444 | * max(gen - disk_gen) for all buckets. When it gets too big we have to |
| 445 | * call prio_write() to keep gens from wrapping. |
| 446 | */ |
| 447 | uint8_t need_save_prio; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 448 | |
| 449 | /* |
| 450 | * If nonzero, we know we aren't going to find any buckets to invalidate |
| 451 | * until a gc finishes - otherwise we could pointlessly burn a ton of |
| 452 | * cpu |
| 453 | */ |
| 454 | unsigned invalidate_needs_gc:1; |
| 455 | |
| 456 | bool discard; /* Get rid of? */ |
| 457 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 458 | struct journal_device journal; |
| 459 | |
| 460 | /* The rest of this all shows up in sysfs */ |
| 461 | #define IO_ERROR_SHIFT 20 |
| 462 | atomic_t io_errors; |
| 463 | atomic_t io_count; |
| 464 | |
| 465 | atomic_long_t meta_sectors_written; |
| 466 | atomic_long_t btree_sectors_written; |
| 467 | atomic_long_t sectors_written; |
| 468 | |
| 469 | struct bio_split_pool bio_split_hook; |
| 470 | }; |
| 471 | |
| 472 | struct gc_stat { |
| 473 | size_t nodes; |
| 474 | size_t key_bytes; |
| 475 | |
| 476 | size_t nkeys; |
| 477 | uint64_t data; /* sectors */ |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 478 | unsigned in_use; /* percent */ |
| 479 | }; |
| 480 | |
| 481 | /* |
| 482 | * Flag bits, for how the cache set is shutting down, and what phase it's at: |
| 483 | * |
| 484 | * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching |
| 485 | * all the backing devices first (their cached data gets invalidated, and they |
| 486 | * won't automatically reattach). |
| 487 | * |
| 488 | * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; |
| 489 | * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. |
| 490 | * flushing dirty data). |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 491 | */ |
| 492 | #define CACHE_SET_UNREGISTERING 0 |
| 493 | #define CACHE_SET_STOPPING 1 |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 494 | |
| 495 | struct cache_set { |
| 496 | struct closure cl; |
| 497 | |
| 498 | struct list_head list; |
| 499 | struct kobject kobj; |
| 500 | struct kobject internal; |
| 501 | struct dentry *debug; |
| 502 | struct cache_accounting accounting; |
| 503 | |
| 504 | unsigned long flags; |
| 505 | |
| 506 | struct cache_sb sb; |
| 507 | |
| 508 | struct cache *cache[MAX_CACHES_PER_SET]; |
| 509 | struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; |
| 510 | int caches_loaded; |
| 511 | |
| 512 | struct bcache_device **devices; |
| 513 | struct list_head cached_devs; |
| 514 | uint64_t cached_dev_sectors; |
| 515 | struct closure caching; |
| 516 | |
Kent Overstreet | cb7a583 | 2013-12-16 15:27:25 -0800 | [diff] [blame] | 517 | struct closure sb_write; |
| 518 | struct semaphore sb_write_mutex; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 519 | |
| 520 | mempool_t *search; |
| 521 | mempool_t *bio_meta; |
| 522 | struct bio_set *bio_split; |
| 523 | |
| 524 | /* For the btree cache */ |
| 525 | struct shrinker shrink; |
| 526 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 527 | /* For the btree cache and anything allocation related */ |
| 528 | struct mutex bucket_lock; |
| 529 | |
| 530 | /* log2(bucket_size), in sectors */ |
| 531 | unsigned short bucket_bits; |
| 532 | |
| 533 | /* log2(block_size), in sectors */ |
| 534 | unsigned short block_bits; |
| 535 | |
| 536 | /* |
| 537 | * Default number of pages for a new btree node - may be less than a |
| 538 | * full bucket |
| 539 | */ |
| 540 | unsigned btree_pages; |
| 541 | |
| 542 | /* |
| 543 | * Lists of struct btrees; lru is the list for structs that have memory |
| 544 | * allocated for actual btree node, freed is for structs that do not. |
| 545 | * |
| 546 | * We never free a struct btree, except on shutdown - we just put it on |
| 547 | * the btree_cache_freed list and reuse it later. This simplifies the |
| 548 | * code, and it doesn't cost us much memory as the memory usage is |
| 549 | * dominated by buffers that hold the actual btree node data and those |
| 550 | * can be freed - and the number of struct btrees allocated is |
| 551 | * effectively bounded. |
| 552 | * |
| 553 | * btree_cache_freeable effectively is a small cache - we use it because |
| 554 | * high order page allocations can be rather expensive, and it's quite |
| 555 | * common to delete and allocate btree nodes in quick succession. It |
| 556 | * should never grow past ~2-3 nodes in practice. |
| 557 | */ |
| 558 | struct list_head btree_cache; |
| 559 | struct list_head btree_cache_freeable; |
| 560 | struct list_head btree_cache_freed; |
| 561 | |
| 562 | /* Number of elements in btree_cache + btree_cache_freeable lists */ |
| 563 | unsigned bucket_cache_used; |
| 564 | |
| 565 | /* |
| 566 | * If we need to allocate memory for a new btree node and that |
| 567 | * allocation fails, we can cannibalize another node in the btree cache |
| 568 | * to satisfy the allocation. However, only one thread can be doing this |
| 569 | * at a time, for obvious reasons - try_harder and try_wait are |
| 570 | * basically a lock for this that we can wait on asynchronously. The |
| 571 | * btree_root() macro releases the lock when it returns. |
| 572 | */ |
Kent Overstreet | e8e1d46 | 2013-07-24 17:27:07 -0700 | [diff] [blame] | 573 | struct task_struct *try_harder; |
| 574 | wait_queue_head_t try_wait; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 575 | uint64_t try_harder_start; |
| 576 | |
| 577 | /* |
| 578 | * When we free a btree node, we increment the gen of the bucket the |
| 579 | * node is in - but we can't rewrite the prios and gens until we |
| 580 | * finished whatever it is we were doing, otherwise after a crash the |
| 581 | * btree node would be freed but for say a split, we might not have the |
| 582 | * pointers to the new nodes inserted into the btree yet. |
| 583 | * |
| 584 | * This is a refcount that blocks prio_write() until the new keys are |
| 585 | * written. |
| 586 | */ |
| 587 | atomic_t prio_blocked; |
Kent Overstreet | 35fcd84 | 2013-07-24 17:29:09 -0700 | [diff] [blame] | 588 | wait_queue_head_t bucket_wait; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 589 | |
| 590 | /* |
| 591 | * For any bio we don't skip we subtract the number of sectors from |
| 592 | * rescale; when it hits 0 we rescale all the bucket priorities. |
| 593 | */ |
| 594 | atomic_t rescale; |
| 595 | /* |
| 596 | * When we invalidate buckets, we use both the priority and the amount |
| 597 | * of good data to determine which buckets to reuse first - to weight |
| 598 | * those together consistently we keep track of the smallest nonzero |
| 599 | * priority of any bucket. |
| 600 | */ |
| 601 | uint16_t min_prio; |
| 602 | |
| 603 | /* |
| 604 | * max(gen - gc_gen) for all buckets. When it gets too big we have to gc |
| 605 | * to keep gens from wrapping around. |
| 606 | */ |
| 607 | uint8_t need_gc; |
| 608 | struct gc_stat gc_stats; |
| 609 | size_t nbuckets; |
| 610 | |
Kent Overstreet | 72a4451 | 2013-10-24 17:19:26 -0700 | [diff] [blame] | 611 | struct task_struct *gc_thread; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 612 | /* Where in the btree gc currently is */ |
| 613 | struct bkey gc_done; |
| 614 | |
| 615 | /* |
| 616 | * The allocation code needs gc_mark in struct bucket to be correct, but |
| 617 | * it's not while a gc is in progress. Protected by bucket_lock. |
| 618 | */ |
| 619 | int gc_mark_valid; |
| 620 | |
| 621 | /* Counts how many sectors bio_insert has added to the cache */ |
| 622 | atomic_t sectors_to_gc; |
| 623 | |
Kent Overstreet | 72a4451 | 2013-10-24 17:19:26 -0700 | [diff] [blame] | 624 | wait_queue_head_t moving_gc_wait; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 625 | struct keybuf moving_gc_keys; |
| 626 | /* Number of moving GC bios in flight */ |
Kent Overstreet | 72a4451 | 2013-10-24 17:19:26 -0700 | [diff] [blame] | 627 | struct semaphore moving_in_flight; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 628 | |
| 629 | struct btree *root; |
| 630 | |
| 631 | #ifdef CONFIG_BCACHE_DEBUG |
| 632 | struct btree *verify_data; |
Kent Overstreet | 78b77bf | 2013-12-17 22:49:08 -0800 | [diff] [blame] | 633 | struct bset *verify_ondisk; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 634 | struct mutex verify_lock; |
| 635 | #endif |
| 636 | |
| 637 | unsigned nr_uuids; |
| 638 | struct uuid_entry *uuids; |
| 639 | BKEY_PADDED(uuid_bucket); |
Kent Overstreet | cb7a583 | 2013-12-16 15:27:25 -0800 | [diff] [blame] | 640 | struct closure uuid_write; |
| 641 | struct semaphore uuid_write_mutex; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 642 | |
| 643 | /* |
| 644 | * A btree node on disk could have too many bsets for an iterator to fit |
Kent Overstreet | 5794351 | 2013-04-25 13:58:35 -0700 | [diff] [blame] | 645 | * on the stack - have to dynamically allocate them |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 646 | */ |
Kent Overstreet | 5794351 | 2013-04-25 13:58:35 -0700 | [diff] [blame] | 647 | mempool_t *fill_iter; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 648 | |
Kent Overstreet | 67539e8 | 2013-09-10 22:53:34 -0700 | [diff] [blame] | 649 | struct bset_sort_state sort; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 650 | |
| 651 | /* List of buckets we're currently writing data to */ |
| 652 | struct list_head data_buckets; |
| 653 | spinlock_t data_bucket_lock; |
| 654 | |
| 655 | struct journal journal; |
| 656 | |
| 657 | #define CONGESTED_MAX 1024 |
| 658 | unsigned congested_last_us; |
| 659 | atomic_t congested; |
| 660 | |
| 661 | /* The rest of this all shows up in sysfs */ |
| 662 | unsigned congested_read_threshold_us; |
| 663 | unsigned congested_write_threshold_us; |
| 664 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 665 | struct time_stats btree_gc_time; |
| 666 | struct time_stats btree_split_time; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 667 | struct time_stats btree_read_time; |
| 668 | struct time_stats try_harder_time; |
| 669 | |
| 670 | atomic_long_t cache_read_races; |
| 671 | atomic_long_t writeback_keys_done; |
| 672 | atomic_long_t writeback_keys_failed; |
Kent Overstreet | 77c320e | 2013-07-11 19:42:51 -0700 | [diff] [blame] | 673 | |
| 674 | enum { |
| 675 | ON_ERROR_UNREGISTER, |
| 676 | ON_ERROR_PANIC, |
| 677 | } on_error; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 678 | unsigned error_limit; |
| 679 | unsigned error_decay; |
Kent Overstreet | 77c320e | 2013-07-11 19:42:51 -0700 | [diff] [blame] | 680 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 681 | unsigned short journal_delay_ms; |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 682 | bool expensive_debug_checks; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 683 | unsigned verify:1; |
| 684 | unsigned key_merging_disabled:1; |
| 685 | unsigned gc_always_rewrite:1; |
| 686 | unsigned shrinker_disabled:1; |
| 687 | unsigned copy_gc_enabled:1; |
| 688 | |
| 689 | #define BUCKET_HASH_BITS 12 |
| 690 | struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; |
| 691 | }; |
| 692 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 693 | struct bbio { |
| 694 | unsigned submit_time_us; |
| 695 | union { |
| 696 | struct bkey key; |
| 697 | uint64_t _pad[3]; |
| 698 | /* |
| 699 | * We only need pad = 3 here because we only ever carry around a |
| 700 | * single pointer - i.e. the pointer we're doing io to/from. |
| 701 | */ |
| 702 | }; |
| 703 | struct bio bio; |
| 704 | }; |
| 705 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 706 | #define BTREE_PRIO USHRT_MAX |
Kent Overstreet | e0a985a | 2013-11-12 13:49:10 -0800 | [diff] [blame^] | 707 | #define INITIAL_PRIO 32768U |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 708 | |
| 709 | #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) |
| 710 | #define btree_blocks(b) \ |
| 711 | ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) |
| 712 | |
| 713 | #define btree_default_blocks(c) \ |
| 714 | ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) |
| 715 | |
| 716 | #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) |
| 717 | #define bucket_bytes(c) ((c)->sb.bucket_size << 9) |
| 718 | #define block_bytes(c) ((c)->sb.block_size << 9) |
| 719 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 720 | #define prios_per_bucket(c) \ |
| 721 | ((bucket_bytes(c) - sizeof(struct prio_set)) / \ |
| 722 | sizeof(struct bucket_disk)) |
| 723 | #define prio_buckets(c) \ |
| 724 | DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) |
| 725 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 726 | static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) |
| 727 | { |
| 728 | return s >> c->bucket_bits; |
| 729 | } |
| 730 | |
| 731 | static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) |
| 732 | { |
| 733 | return ((sector_t) b) << c->bucket_bits; |
| 734 | } |
| 735 | |
| 736 | static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) |
| 737 | { |
| 738 | return s & (c->sb.bucket_size - 1); |
| 739 | } |
| 740 | |
| 741 | static inline struct cache *PTR_CACHE(struct cache_set *c, |
| 742 | const struct bkey *k, |
| 743 | unsigned ptr) |
| 744 | { |
| 745 | return c->cache[PTR_DEV(k, ptr)]; |
| 746 | } |
| 747 | |
| 748 | static inline size_t PTR_BUCKET_NR(struct cache_set *c, |
| 749 | const struct bkey *k, |
| 750 | unsigned ptr) |
| 751 | { |
| 752 | return sector_to_bucket(c, PTR_OFFSET(k, ptr)); |
| 753 | } |
| 754 | |
| 755 | static inline struct bucket *PTR_BUCKET(struct cache_set *c, |
| 756 | const struct bkey *k, |
| 757 | unsigned ptr) |
| 758 | { |
| 759 | return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); |
| 760 | } |
| 761 | |
Kent Overstreet | 9a02b7e | 2013-12-20 17:24:46 -0800 | [diff] [blame] | 762 | static inline uint8_t gen_after(uint8_t a, uint8_t b) |
| 763 | { |
| 764 | uint8_t r = a - b; |
| 765 | return r > 128U ? 0 : r; |
| 766 | } |
| 767 | |
| 768 | static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, |
| 769 | unsigned i) |
| 770 | { |
| 771 | return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); |
| 772 | } |
| 773 | |
| 774 | static inline bool ptr_available(struct cache_set *c, const struct bkey *k, |
| 775 | unsigned i) |
| 776 | { |
| 777 | return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); |
| 778 | } |
| 779 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 780 | /* Btree key macros */ |
| 781 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 782 | /* |
| 783 | * This is used for various on disk data structures - cache_sb, prio_set, bset, |
| 784 | * jset: The checksum is _always_ the first 8 bytes of these structs |
| 785 | */ |
| 786 | #define csum_set(i) \ |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 787 | bch_crc64(((void *) (i)) + sizeof(uint64_t), \ |
Kent Overstreet | fafff81 | 2013-12-17 21:56:21 -0800 | [diff] [blame] | 788 | ((void *) bset_bkey_last(i)) - \ |
| 789 | (((void *) (i)) + sizeof(uint64_t))) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 790 | |
| 791 | /* Error handling macros */ |
| 792 | |
| 793 | #define btree_bug(b, ...) \ |
| 794 | do { \ |
| 795 | if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ |
| 796 | dump_stack(); \ |
| 797 | } while (0) |
| 798 | |
| 799 | #define cache_bug(c, ...) \ |
| 800 | do { \ |
| 801 | if (bch_cache_set_error(c, __VA_ARGS__)) \ |
| 802 | dump_stack(); \ |
| 803 | } while (0) |
| 804 | |
| 805 | #define btree_bug_on(cond, b, ...) \ |
| 806 | do { \ |
| 807 | if (cond) \ |
| 808 | btree_bug(b, __VA_ARGS__); \ |
| 809 | } while (0) |
| 810 | |
| 811 | #define cache_bug_on(cond, c, ...) \ |
| 812 | do { \ |
| 813 | if (cond) \ |
| 814 | cache_bug(c, __VA_ARGS__); \ |
| 815 | } while (0) |
| 816 | |
| 817 | #define cache_set_err_on(cond, c, ...) \ |
| 818 | do { \ |
| 819 | if (cond) \ |
| 820 | bch_cache_set_error(c, __VA_ARGS__); \ |
| 821 | } while (0) |
| 822 | |
| 823 | /* Looping macros */ |
| 824 | |
| 825 | #define for_each_cache(ca, cs, iter) \ |
| 826 | for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) |
| 827 | |
| 828 | #define for_each_bucket(b, ca) \ |
| 829 | for (b = (ca)->buckets + (ca)->sb.first_bucket; \ |
| 830 | b < (ca)->buckets + (ca)->sb.nbuckets; b++) |
| 831 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 832 | static inline void cached_dev_put(struct cached_dev *dc) |
| 833 | { |
| 834 | if (atomic_dec_and_test(&dc->count)) |
| 835 | schedule_work(&dc->detach); |
| 836 | } |
| 837 | |
| 838 | static inline bool cached_dev_get(struct cached_dev *dc) |
| 839 | { |
| 840 | if (!atomic_inc_not_zero(&dc->count)) |
| 841 | return false; |
| 842 | |
| 843 | /* Paired with the mb in cached_dev_attach */ |
| 844 | smp_mb__after_atomic_inc(); |
| 845 | return true; |
| 846 | } |
| 847 | |
| 848 | /* |
| 849 | * bucket_gc_gen() returns the difference between the bucket's current gen and |
| 850 | * the oldest gen of any pointer into that bucket in the btree (last_gc). |
| 851 | * |
| 852 | * bucket_disk_gen() returns the difference between the current gen and the gen |
| 853 | * on disk; they're both used to make sure gens don't wrap around. |
| 854 | */ |
| 855 | |
| 856 | static inline uint8_t bucket_gc_gen(struct bucket *b) |
| 857 | { |
| 858 | return b->gen - b->last_gc; |
| 859 | } |
| 860 | |
| 861 | static inline uint8_t bucket_disk_gen(struct bucket *b) |
| 862 | { |
| 863 | return b->gen - b->disk_gen; |
| 864 | } |
| 865 | |
| 866 | #define BUCKET_GC_GEN_MAX 96U |
| 867 | #define BUCKET_DISK_GEN_MAX 64U |
| 868 | |
| 869 | #define kobj_attribute_write(n, fn) \ |
| 870 | static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) |
| 871 | |
| 872 | #define kobj_attribute_rw(n, show, store) \ |
| 873 | static struct kobj_attribute ksysfs_##n = \ |
| 874 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) |
| 875 | |
Kent Overstreet | 119ba0f | 2013-04-24 19:01:12 -0700 | [diff] [blame] | 876 | static inline void wake_up_allocators(struct cache_set *c) |
| 877 | { |
| 878 | struct cache *ca; |
| 879 | unsigned i; |
| 880 | |
| 881 | for_each_cache(ca, c, i) |
| 882 | wake_up_process(ca->alloc_thread); |
| 883 | } |
| 884 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 885 | /* Forward declarations */ |
| 886 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 887 | void bch_count_io_errors(struct cache *, int, const char *); |
| 888 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, |
| 889 | int, const char *); |
| 890 | void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); |
| 891 | void bch_bbio_free(struct bio *, struct cache_set *); |
| 892 | struct bio *bch_bbio_alloc(struct cache_set *); |
| 893 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 894 | void bch_generic_make_request(struct bio *, struct bio_split_pool *); |
| 895 | void __bch_submit_bbio(struct bio *, struct cache_set *); |
| 896 | void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); |
| 897 | |
| 898 | uint8_t bch_inc_gen(struct cache *, struct bucket *); |
| 899 | void bch_rescale_priorities(struct cache_set *, int); |
| 900 | bool bch_bucket_add_unused(struct cache *, struct bucket *); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 901 | |
Kent Overstreet | 35fcd84 | 2013-07-24 17:29:09 -0700 | [diff] [blame] | 902 | long bch_bucket_alloc(struct cache *, unsigned, bool); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 903 | void bch_bucket_free(struct cache_set *, struct bkey *); |
| 904 | |
| 905 | int __bch_bucket_alloc_set(struct cache_set *, unsigned, |
Kent Overstreet | 35fcd84 | 2013-07-24 17:29:09 -0700 | [diff] [blame] | 906 | struct bkey *, int, bool); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 907 | int bch_bucket_alloc_set(struct cache_set *, unsigned, |
Kent Overstreet | 35fcd84 | 2013-07-24 17:29:09 -0700 | [diff] [blame] | 908 | struct bkey *, int, bool); |
Kent Overstreet | 2599b53 | 2013-07-24 18:11:11 -0700 | [diff] [blame] | 909 | bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, |
| 910 | unsigned, unsigned, bool); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 911 | |
| 912 | __printf(2, 3) |
| 913 | bool bch_cache_set_error(struct cache_set *, const char *, ...); |
| 914 | |
| 915 | void bch_prio_write(struct cache *); |
| 916 | void bch_write_bdev_super(struct cached_dev *, struct closure *); |
| 917 | |
Kent Overstreet | 72a4451 | 2013-10-24 17:19:26 -0700 | [diff] [blame] | 918 | extern struct workqueue_struct *bcache_wq; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 919 | extern const char * const bch_cache_modes[]; |
| 920 | extern struct mutex bch_register_lock; |
| 921 | extern struct list_head bch_cache_sets; |
| 922 | |
| 923 | extern struct kobj_type bch_cached_dev_ktype; |
| 924 | extern struct kobj_type bch_flash_dev_ktype; |
| 925 | extern struct kobj_type bch_cache_set_ktype; |
| 926 | extern struct kobj_type bch_cache_set_internal_ktype; |
| 927 | extern struct kobj_type bch_cache_ktype; |
| 928 | |
| 929 | void bch_cached_dev_release(struct kobject *); |
| 930 | void bch_flash_dev_release(struct kobject *); |
| 931 | void bch_cache_set_release(struct kobject *); |
| 932 | void bch_cache_release(struct kobject *); |
| 933 | |
| 934 | int bch_uuid_write(struct cache_set *); |
| 935 | void bcache_write_super(struct cache_set *); |
| 936 | |
| 937 | int bch_flash_dev_create(struct cache_set *c, uint64_t size); |
| 938 | |
| 939 | int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); |
| 940 | void bch_cached_dev_detach(struct cached_dev *); |
| 941 | void bch_cached_dev_run(struct cached_dev *); |
| 942 | void bcache_device_stop(struct bcache_device *); |
| 943 | |
| 944 | void bch_cache_set_unregister(struct cache_set *); |
| 945 | void bch_cache_set_stop(struct cache_set *); |
| 946 | |
| 947 | struct cache_set *bch_cache_set_alloc(struct cache_sb *); |
| 948 | void bch_btree_cache_free(struct cache_set *); |
| 949 | int bch_btree_cache_alloc(struct cache_set *); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 950 | void bch_moving_init_cache_set(struct cache_set *); |
Kent Overstreet | 2599b53 | 2013-07-24 18:11:11 -0700 | [diff] [blame] | 951 | int bch_open_buckets_alloc(struct cache_set *); |
| 952 | void bch_open_buckets_free(struct cache_set *); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 953 | |
Kent Overstreet | 119ba0f | 2013-04-24 19:01:12 -0700 | [diff] [blame] | 954 | int bch_cache_allocator_start(struct cache *ca); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 955 | int bch_cache_allocator_init(struct cache *ca); |
| 956 | |
| 957 | void bch_debug_exit(void); |
| 958 | int bch_debug_init(struct kobject *); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 959 | void bch_request_exit(void); |
| 960 | int bch_request_init(void); |
| 961 | void bch_btree_exit(void); |
| 962 | int bch_btree_init(void); |
| 963 | |
| 964 | #endif /* _BCACHE_H */ |