blob: 14aaaf059f06a6a8197a8d3eb875abaf671b6305 [file] [log] [blame]
Heinz Mauelshagen8735a812013-03-01 22:45:52 +00001/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * writeback cache policy supporting flushing out dirty cache blocks.
5 *
6 * This file is released under the GPL.
7 */
8
9#include "dm-cache-policy.h"
10#include "dm.h"
11
12#include <linux/hash.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16
17/*----------------------------------------------------------------*/
18
19#define DM_MSG_PREFIX "cache cleaner"
Heinz Mauelshagen8735a812013-03-01 22:45:52 +000020
21/* Cache entry struct. */
22struct wb_cache_entry {
23 struct list_head list;
24 struct hlist_node hlist;
25
26 dm_oblock_t oblock;
27 dm_cblock_t cblock;
28 bool dirty:1;
29 bool pending:1;
30};
31
32struct hash {
33 struct hlist_head *table;
34 dm_block_t hash_bits;
35 unsigned nr_buckets;
36};
37
38struct policy {
39 struct dm_cache_policy policy;
40 spinlock_t lock;
41
42 struct list_head free;
43 struct list_head clean;
44 struct list_head clean_pending;
45 struct list_head dirty;
46
47 /*
48 * We know exactly how many cblocks will be needed,
49 * so we can allocate them up front.
50 */
51 dm_cblock_t cache_size, nr_cblocks_allocated;
52 struct wb_cache_entry *cblocks;
53 struct hash chash;
54};
55
56/*----------------------------------------------------------------------------*/
57
58/*
59 * Low-level functions.
60 */
61static unsigned next_power(unsigned n, unsigned min)
62{
63 return roundup_pow_of_two(max(n, min));
64}
65
66static struct policy *to_policy(struct dm_cache_policy *p)
67{
68 return container_of(p, struct policy, policy);
69}
70
71static struct list_head *list_pop(struct list_head *q)
72{
73 struct list_head *r = q->next;
74
75 list_del(r);
76
77 return r;
78}
79
80/*----------------------------------------------------------------------------*/
81
82/* Allocate/free various resources. */
83static int alloc_hash(struct hash *hash, unsigned elts)
84{
85 hash->nr_buckets = next_power(elts >> 4, 16);
Mikulas Patockaa3d939a2015-10-02 11:21:24 -040086 hash->hash_bits = __ffs(hash->nr_buckets);
Heinz Mauelshagen8735a812013-03-01 22:45:52 +000087 hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
88
89 return hash->table ? 0 : -ENOMEM;
90}
91
92static void free_hash(struct hash *hash)
93{
94 vfree(hash->table);
95}
96
97static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
98{
99 int r = -ENOMEM;
100
101 p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
102 if (p->cblocks) {
103 unsigned u = from_cblock(cache_size);
104
105 while (u--)
106 list_add(&p->cblocks[u].list, &p->free);
107
108 p->nr_cblocks_allocated = 0;
109
110 /* Cache entries hash. */
111 r = alloc_hash(&p->chash, from_cblock(cache_size));
112 if (r)
113 vfree(p->cblocks);
114 }
115
116 return r;
117}
118
119static void free_cache_blocks_and_hash(struct policy *p)
120{
121 free_hash(&p->chash);
122 vfree(p->cblocks);
123}
124
125static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
126{
127 struct wb_cache_entry *e;
128
129 BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
130
131 e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
132 p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
133
134 return e;
135}
136
137/*----------------------------------------------------------------------------*/
138
139/* Hash functions (lookup, insert, remove). */
140static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
141{
142 struct hash *hash = &p->chash;
143 unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
144 struct wb_cache_entry *cur;
145 struct hlist_head *bucket = &hash->table[h];
146
147 hlist_for_each_entry(cur, bucket, hlist) {
148 if (cur->oblock == oblock) {
149 /* Move upfront bucket for faster access. */
150 hlist_del(&cur->hlist);
151 hlist_add_head(&cur->hlist, bucket);
152 return cur;
153 }
154 }
155
156 return NULL;
157}
158
159static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
160{
161 unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
162
163 hlist_add_head(&e->hlist, &p->chash.table[h]);
164}
165
166static void remove_cache_hash_entry(struct wb_cache_entry *e)
167{
168 hlist_del(&e->hlist);
169}
170
171/* Public interface (see dm-cache-policy.h */
172static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
173 bool can_block, bool can_migrate, bool discarded_oblock,
Joe Thornberfb4100a2015-05-20 10:30:32 +0100174 struct bio *bio, struct policy_locker *locker,
175 struct policy_result *result)
Heinz Mauelshagen8735a812013-03-01 22:45:52 +0000176{
177 struct policy *p = to_policy(pe);
178 struct wb_cache_entry *e;
179 unsigned long flags;
180
181 result->op = POLICY_MISS;
182
183 if (can_block)
184 spin_lock_irqsave(&p->lock, flags);
185
186 else if (!spin_trylock_irqsave(&p->lock, flags))
187 return -EWOULDBLOCK;
188
189 e = lookup_cache_entry(p, oblock);
190 if (e) {
191 result->op = POLICY_HIT;
192 result->cblock = e->cblock;
193
194 }
195
196 spin_unlock_irqrestore(&p->lock, flags);
197
198 return 0;
199}
200
201static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
202{
203 int r;
204 struct policy *p = to_policy(pe);
205 struct wb_cache_entry *e;
206 unsigned long flags;
207
208 if (!spin_trylock_irqsave(&p->lock, flags))
209 return -EWOULDBLOCK;
210
211 e = lookup_cache_entry(p, oblock);
212 if (e) {
213 *cblock = e->cblock;
214 r = 0;
215
216 } else
217 r = -ENOENT;
218
219 spin_unlock_irqrestore(&p->lock, flags);
220
221 return r;
222}
223
224static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
225{
226 struct policy *p = to_policy(pe);
227 struct wb_cache_entry *e;
228
229 e = lookup_cache_entry(p, oblock);
230 BUG_ON(!e);
231
232 if (set) {
233 if (!e->dirty) {
234 e->dirty = true;
235 list_move(&e->list, &p->dirty);
236 }
237
238 } else {
239 if (e->dirty) {
240 e->pending = false;
241 e->dirty = false;
242 list_move(&e->list, &p->clean);
243 }
244 }
245}
246
247static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
248{
249 struct policy *p = to_policy(pe);
250 unsigned long flags;
251
252 spin_lock_irqsave(&p->lock, flags);
253 __set_clear_dirty(pe, oblock, true);
254 spin_unlock_irqrestore(&p->lock, flags);
255}
256
257static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
258{
259 struct policy *p = to_policy(pe);
260 unsigned long flags;
261
262 spin_lock_irqsave(&p->lock, flags);
263 __set_clear_dirty(pe, oblock, false);
264 spin_unlock_irqrestore(&p->lock, flags);
265}
266
267static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
268{
269 insert_cache_hash_entry(p, e);
270 if (e->dirty)
271 list_add(&e->list, &p->dirty);
272 else
273 list_add(&e->list, &p->clean);
274}
275
276static int wb_load_mapping(struct dm_cache_policy *pe,
277 dm_oblock_t oblock, dm_cblock_t cblock,
278 uint32_t hint, bool hint_valid)
279{
280 int r;
281 struct policy *p = to_policy(pe);
282 struct wb_cache_entry *e = alloc_cache_entry(p);
283
284 if (e) {
285 e->cblock = cblock;
286 e->oblock = oblock;
287 e->dirty = false; /* blocks default to clean */
288 add_cache_entry(p, e);
289 r = 0;
290
291 } else
292 r = -ENOMEM;
293
294 return r;
295}
296
297static void wb_destroy(struct dm_cache_policy *pe)
298{
299 struct policy *p = to_policy(pe);
300
301 free_cache_blocks_and_hash(p);
302 kfree(p);
303}
304
305static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
306{
307 struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
308
309 BUG_ON(!r);
310
311 remove_cache_hash_entry(r);
312 list_del(&r->list);
313
314 return r;
315}
316
317static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
318{
319 struct policy *p = to_policy(pe);
320 struct wb_cache_entry *e;
321 unsigned long flags;
322
323 spin_lock_irqsave(&p->lock, flags);
324 e = __wb_force_remove_mapping(p, oblock);
325 list_add_tail(&e->list, &p->free);
326 BUG_ON(!from_cblock(p->nr_cblocks_allocated));
327 p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
328 spin_unlock_irqrestore(&p->lock, flags);
329}
330
331static void wb_force_mapping(struct dm_cache_policy *pe,
332 dm_oblock_t current_oblock, dm_oblock_t oblock)
333{
334 struct policy *p = to_policy(pe);
335 struct wb_cache_entry *e;
336 unsigned long flags;
337
338 spin_lock_irqsave(&p->lock, flags);
339 e = __wb_force_remove_mapping(p, current_oblock);
340 e->oblock = oblock;
341 add_cache_entry(p, e);
342 spin_unlock_irqrestore(&p->lock, flags);
343}
344
345static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
346{
347 struct list_head *l;
348 struct wb_cache_entry *r;
349
350 if (list_empty(&p->dirty))
351 return NULL;
352
353 l = list_pop(&p->dirty);
354 r = container_of(l, struct wb_cache_entry, list);
355 list_add(l, &p->clean_pending);
356
357 return r;
358}
359
360static int wb_writeback_work(struct dm_cache_policy *pe,
361 dm_oblock_t *oblock,
Joe Thornber20f68142015-05-15 15:20:09 +0100362 dm_cblock_t *cblock,
363 bool critical_only)
Heinz Mauelshagen8735a812013-03-01 22:45:52 +0000364{
365 int r = -ENOENT;
366 struct policy *p = to_policy(pe);
367 struct wb_cache_entry *e;
368 unsigned long flags;
369
370 spin_lock_irqsave(&p->lock, flags);
371
372 e = get_next_dirty_entry(p);
373 if (e) {
374 *oblock = e->oblock;
375 *cblock = e->cblock;
376 r = 0;
377 }
378
379 spin_unlock_irqrestore(&p->lock, flags);
380
381 return r;
382}
383
384static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
385{
386 return to_policy(pe)->nr_cblocks_allocated;
387}
388
389/* Init the policy plugin interface function pointers. */
390static void init_policy_functions(struct policy *p)
391{
392 p->policy.destroy = wb_destroy;
393 p->policy.map = wb_map;
394 p->policy.lookup = wb_lookup;
395 p->policy.set_dirty = wb_set_dirty;
396 p->policy.clear_dirty = wb_clear_dirty;
397 p->policy.load_mapping = wb_load_mapping;
398 p->policy.walk_mappings = NULL;
399 p->policy.remove_mapping = wb_remove_mapping;
400 p->policy.writeback_work = wb_writeback_work;
401 p->policy.force_mapping = wb_force_mapping;
402 p->policy.residency = wb_residency;
403 p->policy.tick = NULL;
404}
405
406static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
407 sector_t origin_size,
408 sector_t cache_block_size)
409{
410 int r;
411 struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
412
413 if (!p)
414 return NULL;
415
416 init_policy_functions(p);
417 INIT_LIST_HEAD(&p->free);
418 INIT_LIST_HEAD(&p->clean);
419 INIT_LIST_HEAD(&p->clean_pending);
420 INIT_LIST_HEAD(&p->dirty);
421
422 p->cache_size = cache_size;
423 spin_lock_init(&p->lock);
424
425 /* Allocate cache entry structs and add them to free list. */
426 r = alloc_cache_blocks_with_hash(p, cache_size);
427 if (!r)
428 return &p->policy;
429
430 kfree(p);
431
432 return NULL;
433}
434/*----------------------------------------------------------------------------*/
435
436static struct dm_cache_policy_type wb_policy_type = {
437 .name = "cleaner",
Mike Snitzer4e7f5062013-03-20 17:21:27 +0000438 .version = {1, 0, 0},
Joe Thornber2bffa152015-10-09 14:03:38 +0100439 .hint_size = 4,
Heinz Mauelshagen8735a812013-03-01 22:45:52 +0000440 .owner = THIS_MODULE,
441 .create = wb_create
442};
443
444static int __init wb_init(void)
445{
446 int r = dm_cache_policy_register(&wb_policy_type);
447
448 if (r < 0)
449 DMERR("register failed %d", r);
450 else
Mike Snitzer4e7f5062013-03-20 17:21:27 +0000451 DMINFO("version %u.%u.%u loaded",
452 wb_policy_type.version[0],
453 wb_policy_type.version[1],
454 wb_policy_type.version[2]);
Heinz Mauelshagen8735a812013-03-01 22:45:52 +0000455
456 return r;
457}
458
459static void __exit wb_exit(void)
460{
461 dm_cache_policy_unregister(&wb_policy_type);
462}
463
464module_init(wb_init);
465module_exit(wb_exit);
466
467MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
468MODULE_LICENSE("GPL");
469MODULE_DESCRIPTION("cleaner cache policy");