Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 Red Hat. All rights reserved. |
| 3 | * |
| 4 | * writeback cache policy supporting flushing out dirty cache blocks. |
| 5 | * |
| 6 | * This file is released under the GPL. |
| 7 | */ |
| 8 | |
| 9 | #include "dm-cache-policy.h" |
| 10 | #include "dm.h" |
| 11 | |
| 12 | #include <linux/hash.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | |
| 17 | /*----------------------------------------------------------------*/ |
| 18 | |
| 19 | #define DM_MSG_PREFIX "cache cleaner" |
Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 20 | |
| 21 | /* Cache entry struct. */ |
| 22 | struct wb_cache_entry { |
| 23 | struct list_head list; |
| 24 | struct hlist_node hlist; |
| 25 | |
| 26 | dm_oblock_t oblock; |
| 27 | dm_cblock_t cblock; |
| 28 | bool dirty:1; |
| 29 | bool pending:1; |
| 30 | }; |
| 31 | |
| 32 | struct hash { |
| 33 | struct hlist_head *table; |
| 34 | dm_block_t hash_bits; |
| 35 | unsigned nr_buckets; |
| 36 | }; |
| 37 | |
| 38 | struct policy { |
| 39 | struct dm_cache_policy policy; |
| 40 | spinlock_t lock; |
| 41 | |
| 42 | struct list_head free; |
| 43 | struct list_head clean; |
| 44 | struct list_head clean_pending; |
| 45 | struct list_head dirty; |
| 46 | |
| 47 | /* |
| 48 | * We know exactly how many cblocks will be needed, |
| 49 | * so we can allocate them up front. |
| 50 | */ |
| 51 | dm_cblock_t cache_size, nr_cblocks_allocated; |
| 52 | struct wb_cache_entry *cblocks; |
| 53 | struct hash chash; |
| 54 | }; |
| 55 | |
| 56 | /*----------------------------------------------------------------------------*/ |
| 57 | |
| 58 | /* |
| 59 | * Low-level functions. |
| 60 | */ |
| 61 | static unsigned next_power(unsigned n, unsigned min) |
| 62 | { |
| 63 | return roundup_pow_of_two(max(n, min)); |
| 64 | } |
| 65 | |
| 66 | static struct policy *to_policy(struct dm_cache_policy *p) |
| 67 | { |
| 68 | return container_of(p, struct policy, policy); |
| 69 | } |
| 70 | |
| 71 | static struct list_head *list_pop(struct list_head *q) |
| 72 | { |
| 73 | struct list_head *r = q->next; |
| 74 | |
| 75 | list_del(r); |
| 76 | |
| 77 | return r; |
| 78 | } |
| 79 | |
| 80 | /*----------------------------------------------------------------------------*/ |
| 81 | |
| 82 | /* Allocate/free various resources. */ |
| 83 | static int alloc_hash(struct hash *hash, unsigned elts) |
| 84 | { |
| 85 | hash->nr_buckets = next_power(elts >> 4, 16); |
Mikulas Patocka | a3d939a | 2015-10-02 11:21:24 -0400 | [diff] [blame] | 86 | hash->hash_bits = __ffs(hash->nr_buckets); |
Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 87 | hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets); |
| 88 | |
| 89 | return hash->table ? 0 : -ENOMEM; |
| 90 | } |
| 91 | |
| 92 | static void free_hash(struct hash *hash) |
| 93 | { |
| 94 | vfree(hash->table); |
| 95 | } |
| 96 | |
| 97 | static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size) |
| 98 | { |
| 99 | int r = -ENOMEM; |
| 100 | |
| 101 | p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size)); |
| 102 | if (p->cblocks) { |
| 103 | unsigned u = from_cblock(cache_size); |
| 104 | |
| 105 | while (u--) |
| 106 | list_add(&p->cblocks[u].list, &p->free); |
| 107 | |
| 108 | p->nr_cblocks_allocated = 0; |
| 109 | |
| 110 | /* Cache entries hash. */ |
| 111 | r = alloc_hash(&p->chash, from_cblock(cache_size)); |
| 112 | if (r) |
| 113 | vfree(p->cblocks); |
| 114 | } |
| 115 | |
| 116 | return r; |
| 117 | } |
| 118 | |
| 119 | static void free_cache_blocks_and_hash(struct policy *p) |
| 120 | { |
| 121 | free_hash(&p->chash); |
| 122 | vfree(p->cblocks); |
| 123 | } |
| 124 | |
| 125 | static struct wb_cache_entry *alloc_cache_entry(struct policy *p) |
| 126 | { |
| 127 | struct wb_cache_entry *e; |
| 128 | |
| 129 | BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size)); |
| 130 | |
| 131 | e = list_entry(list_pop(&p->free), struct wb_cache_entry, list); |
| 132 | p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1); |
| 133 | |
| 134 | return e; |
| 135 | } |
| 136 | |
| 137 | /*----------------------------------------------------------------------------*/ |
| 138 | |
| 139 | /* Hash functions (lookup, insert, remove). */ |
| 140 | static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock) |
| 141 | { |
| 142 | struct hash *hash = &p->chash; |
| 143 | unsigned h = hash_64(from_oblock(oblock), hash->hash_bits); |
| 144 | struct wb_cache_entry *cur; |
| 145 | struct hlist_head *bucket = &hash->table[h]; |
| 146 | |
| 147 | hlist_for_each_entry(cur, bucket, hlist) { |
| 148 | if (cur->oblock == oblock) { |
| 149 | /* Move upfront bucket for faster access. */ |
| 150 | hlist_del(&cur->hlist); |
| 151 | hlist_add_head(&cur->hlist, bucket); |
| 152 | return cur; |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | return NULL; |
| 157 | } |
| 158 | |
| 159 | static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e) |
| 160 | { |
| 161 | unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits); |
| 162 | |
| 163 | hlist_add_head(&e->hlist, &p->chash.table[h]); |
| 164 | } |
| 165 | |
| 166 | static void remove_cache_hash_entry(struct wb_cache_entry *e) |
| 167 | { |
| 168 | hlist_del(&e->hlist); |
| 169 | } |
| 170 | |
| 171 | /* Public interface (see dm-cache-policy.h */ |
| 172 | static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock, |
| 173 | bool can_block, bool can_migrate, bool discarded_oblock, |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 174 | struct bio *bio, struct policy_locker *locker, |
| 175 | struct policy_result *result) |
Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 176 | { |
| 177 | struct policy *p = to_policy(pe); |
| 178 | struct wb_cache_entry *e; |
| 179 | unsigned long flags; |
| 180 | |
| 181 | result->op = POLICY_MISS; |
| 182 | |
| 183 | if (can_block) |
| 184 | spin_lock_irqsave(&p->lock, flags); |
| 185 | |
| 186 | else if (!spin_trylock_irqsave(&p->lock, flags)) |
| 187 | return -EWOULDBLOCK; |
| 188 | |
| 189 | e = lookup_cache_entry(p, oblock); |
| 190 | if (e) { |
| 191 | result->op = POLICY_HIT; |
| 192 | result->cblock = e->cblock; |
| 193 | |
| 194 | } |
| 195 | |
| 196 | spin_unlock_irqrestore(&p->lock, flags); |
| 197 | |
| 198 | return 0; |
| 199 | } |
| 200 | |
| 201 | static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock) |
| 202 | { |
| 203 | int r; |
| 204 | struct policy *p = to_policy(pe); |
| 205 | struct wb_cache_entry *e; |
| 206 | unsigned long flags; |
| 207 | |
| 208 | if (!spin_trylock_irqsave(&p->lock, flags)) |
| 209 | return -EWOULDBLOCK; |
| 210 | |
| 211 | e = lookup_cache_entry(p, oblock); |
| 212 | if (e) { |
| 213 | *cblock = e->cblock; |
| 214 | r = 0; |
| 215 | |
| 216 | } else |
| 217 | r = -ENOENT; |
| 218 | |
| 219 | spin_unlock_irqrestore(&p->lock, flags); |
| 220 | |
| 221 | return r; |
| 222 | } |
| 223 | |
| 224 | static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set) |
| 225 | { |
| 226 | struct policy *p = to_policy(pe); |
| 227 | struct wb_cache_entry *e; |
| 228 | |
| 229 | e = lookup_cache_entry(p, oblock); |
| 230 | BUG_ON(!e); |
| 231 | |
| 232 | if (set) { |
| 233 | if (!e->dirty) { |
| 234 | e->dirty = true; |
| 235 | list_move(&e->list, &p->dirty); |
| 236 | } |
| 237 | |
| 238 | } else { |
| 239 | if (e->dirty) { |
| 240 | e->pending = false; |
| 241 | e->dirty = false; |
| 242 | list_move(&e->list, &p->clean); |
| 243 | } |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock) |
| 248 | { |
| 249 | struct policy *p = to_policy(pe); |
| 250 | unsigned long flags; |
| 251 | |
| 252 | spin_lock_irqsave(&p->lock, flags); |
| 253 | __set_clear_dirty(pe, oblock, true); |
| 254 | spin_unlock_irqrestore(&p->lock, flags); |
| 255 | } |
| 256 | |
| 257 | static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock) |
| 258 | { |
| 259 | struct policy *p = to_policy(pe); |
| 260 | unsigned long flags; |
| 261 | |
| 262 | spin_lock_irqsave(&p->lock, flags); |
| 263 | __set_clear_dirty(pe, oblock, false); |
| 264 | spin_unlock_irqrestore(&p->lock, flags); |
| 265 | } |
| 266 | |
| 267 | static void add_cache_entry(struct policy *p, struct wb_cache_entry *e) |
| 268 | { |
| 269 | insert_cache_hash_entry(p, e); |
| 270 | if (e->dirty) |
| 271 | list_add(&e->list, &p->dirty); |
| 272 | else |
| 273 | list_add(&e->list, &p->clean); |
| 274 | } |
| 275 | |
| 276 | static int wb_load_mapping(struct dm_cache_policy *pe, |
| 277 | dm_oblock_t oblock, dm_cblock_t cblock, |
| 278 | uint32_t hint, bool hint_valid) |
| 279 | { |
| 280 | int r; |
| 281 | struct policy *p = to_policy(pe); |
| 282 | struct wb_cache_entry *e = alloc_cache_entry(p); |
| 283 | |
| 284 | if (e) { |
| 285 | e->cblock = cblock; |
| 286 | e->oblock = oblock; |
| 287 | e->dirty = false; /* blocks default to clean */ |
| 288 | add_cache_entry(p, e); |
| 289 | r = 0; |
| 290 | |
| 291 | } else |
| 292 | r = -ENOMEM; |
| 293 | |
| 294 | return r; |
| 295 | } |
| 296 | |
| 297 | static void wb_destroy(struct dm_cache_policy *pe) |
| 298 | { |
| 299 | struct policy *p = to_policy(pe); |
| 300 | |
| 301 | free_cache_blocks_and_hash(p); |
| 302 | kfree(p); |
| 303 | } |
| 304 | |
| 305 | static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock) |
| 306 | { |
| 307 | struct wb_cache_entry *r = lookup_cache_entry(p, oblock); |
| 308 | |
| 309 | BUG_ON(!r); |
| 310 | |
| 311 | remove_cache_hash_entry(r); |
| 312 | list_del(&r->list); |
| 313 | |
| 314 | return r; |
| 315 | } |
| 316 | |
| 317 | static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock) |
| 318 | { |
| 319 | struct policy *p = to_policy(pe); |
| 320 | struct wb_cache_entry *e; |
| 321 | unsigned long flags; |
| 322 | |
| 323 | spin_lock_irqsave(&p->lock, flags); |
| 324 | e = __wb_force_remove_mapping(p, oblock); |
| 325 | list_add_tail(&e->list, &p->free); |
| 326 | BUG_ON(!from_cblock(p->nr_cblocks_allocated)); |
| 327 | p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1); |
| 328 | spin_unlock_irqrestore(&p->lock, flags); |
| 329 | } |
| 330 | |
| 331 | static void wb_force_mapping(struct dm_cache_policy *pe, |
| 332 | dm_oblock_t current_oblock, dm_oblock_t oblock) |
| 333 | { |
| 334 | struct policy *p = to_policy(pe); |
| 335 | struct wb_cache_entry *e; |
| 336 | unsigned long flags; |
| 337 | |
| 338 | spin_lock_irqsave(&p->lock, flags); |
| 339 | e = __wb_force_remove_mapping(p, current_oblock); |
| 340 | e->oblock = oblock; |
| 341 | add_cache_entry(p, e); |
| 342 | spin_unlock_irqrestore(&p->lock, flags); |
| 343 | } |
| 344 | |
| 345 | static struct wb_cache_entry *get_next_dirty_entry(struct policy *p) |
| 346 | { |
| 347 | struct list_head *l; |
| 348 | struct wb_cache_entry *r; |
| 349 | |
| 350 | if (list_empty(&p->dirty)) |
| 351 | return NULL; |
| 352 | |
| 353 | l = list_pop(&p->dirty); |
| 354 | r = container_of(l, struct wb_cache_entry, list); |
| 355 | list_add(l, &p->clean_pending); |
| 356 | |
| 357 | return r; |
| 358 | } |
| 359 | |
| 360 | static int wb_writeback_work(struct dm_cache_policy *pe, |
| 361 | dm_oblock_t *oblock, |
Joe Thornber | 20f6814 | 2015-05-15 15:20:09 +0100 | [diff] [blame] | 362 | dm_cblock_t *cblock, |
| 363 | bool critical_only) |
Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 364 | { |
| 365 | int r = -ENOENT; |
| 366 | struct policy *p = to_policy(pe); |
| 367 | struct wb_cache_entry *e; |
| 368 | unsigned long flags; |
| 369 | |
| 370 | spin_lock_irqsave(&p->lock, flags); |
| 371 | |
| 372 | e = get_next_dirty_entry(p); |
| 373 | if (e) { |
| 374 | *oblock = e->oblock; |
| 375 | *cblock = e->cblock; |
| 376 | r = 0; |
| 377 | } |
| 378 | |
| 379 | spin_unlock_irqrestore(&p->lock, flags); |
| 380 | |
| 381 | return r; |
| 382 | } |
| 383 | |
| 384 | static dm_cblock_t wb_residency(struct dm_cache_policy *pe) |
| 385 | { |
| 386 | return to_policy(pe)->nr_cblocks_allocated; |
| 387 | } |
| 388 | |
| 389 | /* Init the policy plugin interface function pointers. */ |
| 390 | static void init_policy_functions(struct policy *p) |
| 391 | { |
| 392 | p->policy.destroy = wb_destroy; |
| 393 | p->policy.map = wb_map; |
| 394 | p->policy.lookup = wb_lookup; |
| 395 | p->policy.set_dirty = wb_set_dirty; |
| 396 | p->policy.clear_dirty = wb_clear_dirty; |
| 397 | p->policy.load_mapping = wb_load_mapping; |
Joe Thornber | 4e781b4 | 2016-09-15 09:23:46 -0400 | [diff] [blame] | 398 | p->policy.get_hint = NULL; |
Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 399 | p->policy.remove_mapping = wb_remove_mapping; |
| 400 | p->policy.writeback_work = wb_writeback_work; |
| 401 | p->policy.force_mapping = wb_force_mapping; |
| 402 | p->policy.residency = wb_residency; |
| 403 | p->policy.tick = NULL; |
| 404 | } |
| 405 | |
| 406 | static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, |
| 407 | sector_t origin_size, |
| 408 | sector_t cache_block_size) |
| 409 | { |
| 410 | int r; |
| 411 | struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL); |
| 412 | |
| 413 | if (!p) |
| 414 | return NULL; |
| 415 | |
| 416 | init_policy_functions(p); |
| 417 | INIT_LIST_HEAD(&p->free); |
| 418 | INIT_LIST_HEAD(&p->clean); |
| 419 | INIT_LIST_HEAD(&p->clean_pending); |
| 420 | INIT_LIST_HEAD(&p->dirty); |
| 421 | |
| 422 | p->cache_size = cache_size; |
| 423 | spin_lock_init(&p->lock); |
| 424 | |
| 425 | /* Allocate cache entry structs and add them to free list. */ |
| 426 | r = alloc_cache_blocks_with_hash(p, cache_size); |
| 427 | if (!r) |
| 428 | return &p->policy; |
| 429 | |
| 430 | kfree(p); |
| 431 | |
| 432 | return NULL; |
| 433 | } |
| 434 | /*----------------------------------------------------------------------------*/ |
| 435 | |
| 436 | static struct dm_cache_policy_type wb_policy_type = { |
| 437 | .name = "cleaner", |
Mike Snitzer | 4e7f506 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 438 | .version = {1, 0, 0}, |
Joe Thornber | 2bffa15 | 2015-10-09 14:03:38 +0100 | [diff] [blame] | 439 | .hint_size = 4, |
Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 440 | .owner = THIS_MODULE, |
| 441 | .create = wb_create |
| 442 | }; |
| 443 | |
| 444 | static int __init wb_init(void) |
| 445 | { |
| 446 | int r = dm_cache_policy_register(&wb_policy_type); |
| 447 | |
| 448 | if (r < 0) |
| 449 | DMERR("register failed %d", r); |
| 450 | else |
Mike Snitzer | 4e7f506 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 451 | DMINFO("version %u.%u.%u loaded", |
| 452 | wb_policy_type.version[0], |
| 453 | wb_policy_type.version[1], |
| 454 | wb_policy_type.version[2]); |
Heinz Mauelshagen | 8735a81 | 2013-03-01 22:45:52 +0000 | [diff] [blame] | 455 | |
| 456 | return r; |
| 457 | } |
| 458 | |
| 459 | static void __exit wb_exit(void) |
| 460 | { |
| 461 | dm_cache_policy_unregister(&wb_policy_type); |
| 462 | } |
| 463 | |
| 464 | module_init(wb_init); |
| 465 | module_exit(wb_exit); |
| 466 | |
| 467 | MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>"); |
| 468 | MODULE_LICENSE("GPL"); |
| 469 | MODULE_DESCRIPTION("cleaner cache policy"); |