blob: aefb78e3cbf9beda9a1bfe3170123a22d02ccfbb [file] [log] [blame]
Mike Snitzer4f81a412012-10-12 21:02:13 +01001/*
2 * Copyright (C) 2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-prison.h"
9
10#include <linux/spinlock.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14
15/*----------------------------------------------------------------*/
16
17struct dm_bio_prison_cell {
18 struct hlist_node list;
19 struct dm_bio_prison *prison;
20 struct dm_cell_key key;
21 struct bio *holder;
22 struct bio_list bios;
23};
24
25struct dm_bio_prison {
26 spinlock_t lock;
27 mempool_t *cell_pool;
28
29 unsigned nr_buckets;
30 unsigned hash_mask;
31 struct hlist_head *cells;
32};
33
34/*----------------------------------------------------------------*/
35
36static uint32_t calc_nr_buckets(unsigned nr_cells)
37{
38 uint32_t n = 128;
39
40 nr_cells /= 4;
41 nr_cells = min(nr_cells, 8192u);
42
43 while (n < nr_cells)
44 n <<= 1;
45
46 return n;
47}
48
49static struct kmem_cache *_cell_cache;
50
51/*
52 * @nr_cells should be the number of cells you want in use _concurrently_.
53 * Don't confuse it with the number of distinct keys.
54 */
55struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
56{
57 unsigned i;
58 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
59 size_t len = sizeof(struct dm_bio_prison) +
60 (sizeof(struct hlist_head) * nr_buckets);
61 struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
62
63 if (!prison)
64 return NULL;
65
66 spin_lock_init(&prison->lock);
67 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
68 if (!prison->cell_pool) {
69 kfree(prison);
70 return NULL;
71 }
72
73 prison->nr_buckets = nr_buckets;
74 prison->hash_mask = nr_buckets - 1;
75 prison->cells = (struct hlist_head *) (prison + 1);
76 for (i = 0; i < nr_buckets; i++)
77 INIT_HLIST_HEAD(prison->cells + i);
78
79 return prison;
80}
81EXPORT_SYMBOL_GPL(dm_bio_prison_create);
82
83void dm_bio_prison_destroy(struct dm_bio_prison *prison)
84{
85 mempool_destroy(prison->cell_pool);
86 kfree(prison);
87}
88EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
89
90static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
91{
92 const unsigned long BIG_PRIME = 4294967291UL;
93 uint64_t hash = key->block * BIG_PRIME;
94
95 return (uint32_t) (hash & prison->hash_mask);
96}
97
98static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
99{
100 return (lhs->virtual == rhs->virtual) &&
101 (lhs->dev == rhs->dev) &&
102 (lhs->block == rhs->block);
103}
104
105static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
106 struct dm_cell_key *key)
107{
108 struct dm_bio_prison_cell *cell;
109 struct hlist_node *tmp;
110
111 hlist_for_each_entry(cell, tmp, bucket, list)
112 if (keys_equal(&cell->key, key))
113 return cell;
114
115 return NULL;
116}
117
118/*
119 * This may block if a new cell needs allocating. You must ensure that
120 * cells will be unlocked even if the calling thread is blocked.
121 *
122 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
123 */
124int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
125 struct bio *inmate, struct dm_bio_prison_cell **ref)
126{
127 int r = 1;
128 unsigned long flags;
129 uint32_t hash = hash_key(prison, key);
130 struct dm_bio_prison_cell *cell, *cell2;
131
132 BUG_ON(hash > prison->nr_buckets);
133
134 spin_lock_irqsave(&prison->lock, flags);
135
136 cell = __search_bucket(prison->cells + hash, key);
137 if (cell) {
138 bio_list_add(&cell->bios, inmate);
139 goto out;
140 }
141
142 /*
143 * Allocate a new cell
144 */
145 spin_unlock_irqrestore(&prison->lock, flags);
146 cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
147 spin_lock_irqsave(&prison->lock, flags);
148
149 /*
150 * We've been unlocked, so we have to double check that
151 * nobody else has inserted this cell in the meantime.
152 */
153 cell = __search_bucket(prison->cells + hash, key);
154 if (cell) {
155 mempool_free(cell2, prison->cell_pool);
156 bio_list_add(&cell->bios, inmate);
157 goto out;
158 }
159
160 /*
161 * Use new cell.
162 */
163 cell = cell2;
164
165 cell->prison = prison;
166 memcpy(&cell->key, key, sizeof(cell->key));
167 cell->holder = inmate;
168 bio_list_init(&cell->bios);
169 hlist_add_head(&cell->list, prison->cells + hash);
170
171 r = 0;
172
173out:
174 spin_unlock_irqrestore(&prison->lock, flags);
175
176 *ref = cell;
177
178 return r;
179}
180EXPORT_SYMBOL_GPL(dm_bio_detain);
181
182/*
183 * @inmates must have been initialised prior to this call
184 */
185static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
186{
187 struct dm_bio_prison *prison = cell->prison;
188
189 hlist_del(&cell->list);
190
191 if (inmates) {
192 bio_list_add(inmates, cell->holder);
193 bio_list_merge(inmates, &cell->bios);
194 }
195
196 mempool_free(cell, prison->cell_pool);
197}
198
199void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
200{
201 unsigned long flags;
202 struct dm_bio_prison *prison = cell->prison;
203
204 spin_lock_irqsave(&prison->lock, flags);
205 __cell_release(cell, bios);
206 spin_unlock_irqrestore(&prison->lock, flags);
207}
208EXPORT_SYMBOL_GPL(dm_cell_release);
209
210/*
Mike Snitzer4f81a412012-10-12 21:02:13 +0100211 * Sometimes we don't want the holder, just the additional bios.
212 */
213static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
214{
215 struct dm_bio_prison *prison = cell->prison;
216
217 hlist_del(&cell->list);
218 bio_list_merge(inmates, &cell->bios);
219
220 mempool_free(cell, prison->cell_pool);
221}
222
223void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
224{
225 unsigned long flags;
226 struct dm_bio_prison *prison = cell->prison;
227
228 spin_lock_irqsave(&prison->lock, flags);
229 __cell_release_no_holder(cell, inmates);
230 spin_unlock_irqrestore(&prison->lock, flags);
231}
232EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
233
234void dm_cell_error(struct dm_bio_prison_cell *cell)
235{
236 struct dm_bio_prison *prison = cell->prison;
237 struct bio_list bios;
238 struct bio *bio;
239 unsigned long flags;
240
241 bio_list_init(&bios);
242
243 spin_lock_irqsave(&prison->lock, flags);
244 __cell_release(cell, &bios);
245 spin_unlock_irqrestore(&prison->lock, flags);
246
247 while ((bio = bio_list_pop(&bios)))
248 bio_io_error(bio);
249}
250EXPORT_SYMBOL_GPL(dm_cell_error);
251
252/*----------------------------------------------------------------*/
253
254#define DEFERRED_SET_SIZE 64
255
256struct dm_deferred_entry {
257 struct dm_deferred_set *ds;
258 unsigned count;
259 struct list_head work_items;
260};
261
262struct dm_deferred_set {
263 spinlock_t lock;
264 unsigned current_entry;
265 unsigned sweeper;
266 struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
267};
268
269struct dm_deferred_set *dm_deferred_set_create(void)
270{
271 int i;
272 struct dm_deferred_set *ds;
273
274 ds = kmalloc(sizeof(*ds), GFP_KERNEL);
275 if (!ds)
276 return NULL;
277
278 spin_lock_init(&ds->lock);
279 ds->current_entry = 0;
280 ds->sweeper = 0;
281 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
282 ds->entries[i].ds = ds;
283 ds->entries[i].count = 0;
284 INIT_LIST_HEAD(&ds->entries[i].work_items);
285 }
286
287 return ds;
288}
289EXPORT_SYMBOL_GPL(dm_deferred_set_create);
290
291void dm_deferred_set_destroy(struct dm_deferred_set *ds)
292{
293 kfree(ds);
294}
295EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
296
297struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
298{
299 unsigned long flags;
300 struct dm_deferred_entry *entry;
301
302 spin_lock_irqsave(&ds->lock, flags);
303 entry = ds->entries + ds->current_entry;
304 entry->count++;
305 spin_unlock_irqrestore(&ds->lock, flags);
306
307 return entry;
308}
309EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
310
311static unsigned ds_next(unsigned index)
312{
313 return (index + 1) % DEFERRED_SET_SIZE;
314}
315
316static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
317{
318 while ((ds->sweeper != ds->current_entry) &&
319 !ds->entries[ds->sweeper].count) {
320 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
321 ds->sweeper = ds_next(ds->sweeper);
322 }
323
324 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
325 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
326}
327
328void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
329{
330 unsigned long flags;
331
332 spin_lock_irqsave(&entry->ds->lock, flags);
333 BUG_ON(!entry->count);
334 --entry->count;
335 __sweep(entry->ds, head);
336 spin_unlock_irqrestore(&entry->ds->lock, flags);
337}
338EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
339
340/*
341 * Returns 1 if deferred or 0 if no pending items to delay job.
342 */
343int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
344{
345 int r = 1;
346 unsigned long flags;
347 unsigned next_entry;
348
349 spin_lock_irqsave(&ds->lock, flags);
350 if ((ds->sweeper == ds->current_entry) &&
351 !ds->entries[ds->current_entry].count)
352 r = 0;
353 else {
354 list_add(work, &ds->entries[ds->current_entry].work_items);
355 next_entry = ds_next(ds->current_entry);
356 if (!ds->entries[next_entry].count)
357 ds->current_entry = next_entry;
358 }
359 spin_unlock_irqrestore(&ds->lock, flags);
360
361 return r;
362}
363EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
364
365/*----------------------------------------------------------------*/
366
367static int __init dm_bio_prison_init(void)
368{
369 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
370 if (!_cell_cache)
371 return -ENOMEM;
372
373 return 0;
374}
375
376static void __exit dm_bio_prison_exit(void)
377{
378 kmem_cache_destroy(_cell_cache);
379 _cell_cache = NULL;
380}
381
382/*
383 * module hooks
384 */
385module_init(dm_bio_prison_init);
386module_exit(dm_bio_prison_exit);
387
388MODULE_DESCRIPTION(DM_NAME " bio prison");
389MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
390MODULE_LICENSE("GPL");