blob: 862612eebd63f82eaf0c381c7ce7f28abb4562b1 [file] [log] [blame]
Joe Thornber991d9fa2011-10-31 20:21:18 +00001/*
2 * Copyright (C) 2011 Red Hat UK.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
8
9#include <linux/device-mapper.h>
10#include <linux/dm-io.h>
11#include <linux/dm-kcopyd.h>
12#include <linux/list.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/slab.h>
Joe Thornbera5a5fc92015-07-03 10:22:42 +010016#include <linux/vmalloc.h>
Joe Thornber991d9fa2011-10-31 20:21:18 +000017
18#define DM_MSG_PREFIX "thin"
19
20/*
21 * Tunable constants
22 */
Alasdair G Kergone53d9262012-07-27 15:07:57 +010023#define ENDIO_HOOK_POOL_SIZE 1024
Joe Thornber991d9fa2011-10-31 20:21:18 +000024#define DEFERRED_SET_SIZE 64
25#define MAPPING_POOL_SIZE 1024
26#define PRISON_CELLS 1024
Joe Thornber905e51b2012-03-28 18:41:27 +010027#define COMMIT_PERIOD HZ
Joe Thornber991d9fa2011-10-31 20:21:18 +000028
29/*
30 * The block size of the device holding pool data must be
31 * between 64KB and 1GB.
32 */
33#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
34#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
35
36/*
Joe Thornber991d9fa2011-10-31 20:21:18 +000037 * Device id is restricted to 24 bits.
38 */
39#define MAX_DEV_ID ((1 << 24) - 1)
40
41/*
42 * How do we handle breaking sharing of data blocks?
43 * =================================================
44 *
45 * We use a standard copy-on-write btree to store the mappings for the
46 * devices (note I'm talking about copy-on-write of the metadata here, not
47 * the data). When you take an internal snapshot you clone the root node
48 * of the origin btree. After this there is no concept of an origin or a
49 * snapshot. They are just two device trees that happen to point to the
50 * same data blocks.
51 *
52 * When we get a write in we decide if it's to a shared data block using
53 * some timestamp magic. If it is, we have to break sharing.
54 *
55 * Let's say we write to a shared block in what was the origin. The
56 * steps are:
57 *
58 * i) plug io further to this physical block. (see bio_prison code).
59 *
60 * ii) quiesce any read io to that shared data block. Obviously
61 * including all devices that share this block. (see deferred_set code)
62 *
63 * iii) copy the data block to a newly allocate block. This step can be
64 * missed out if the io covers the block. (schedule_copy).
65 *
66 * iv) insert the new mapping into the origin's btree
Joe Thornberfe878f32012-03-28 18:41:24 +010067 * (process_prepared_mapping). This act of inserting breaks some
Joe Thornber991d9fa2011-10-31 20:21:18 +000068 * sharing of btree nodes between the two devices. Breaking sharing only
69 * effects the btree of that specific device. Btrees for the other
70 * devices that share the block never change. The btree for the origin
71 * device as it was after the last commit is untouched, ie. we're using
72 * persistent data structures in the functional programming sense.
73 *
74 * v) unplug io to this physical block, including the io that triggered
75 * the breaking of sharing.
76 *
77 * Steps (ii) and (iii) occur in parallel.
78 *
79 * The metadata _doesn't_ need to be committed before the io continues. We
80 * get away with this because the io is always written to a _new_ block.
81 * If there's a crash, then:
82 *
83 * - The origin mapping will point to the old origin block (the shared
84 * one). This will contain the data as it was before the io that triggered
85 * the breaking of sharing came in.
86 *
87 * - The snap mapping still points to the old block. As it would after
88 * the commit.
89 *
90 * The downside of this scheme is the timestamp magic isn't perfect, and
91 * will continue to think that data block in the snapshot device is shared
92 * even after the write to the origin has broken sharing. I suspect data
93 * blocks will typically be shared by many different devices, so we're
94 * breaking sharing n + 1 times, rather than n, where n is the number of
95 * devices that reference this data block. At the moment I think the
96 * benefits far, far outweigh the disadvantages.
97 */
98
99/*----------------------------------------------------------------*/
100
101/*
102 * Sometimes we can't deal with a bio straight away. We put them in prison
103 * where they can't cause any mischief. Bios are put in a cell identified
104 * by a key, multiple bios can be in the same cell. When the cell is
105 * subsequently unlocked the bios become available.
106 */
107struct bio_prison;
108
109struct cell_key {
110 int virtual;
111 dm_thin_id dev;
112 dm_block_t block;
113};
114
115struct cell {
116 struct hlist_node list;
117 struct bio_prison *prison;
118 struct cell_key key;
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100119 struct bio *holder;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000120 struct bio_list bios;
121};
122
123struct bio_prison {
124 spinlock_t lock;
125 mempool_t *cell_pool;
126
127 unsigned nr_buckets;
128 unsigned hash_mask;
129 struct hlist_head *cells;
130};
131
132static uint32_t calc_nr_buckets(unsigned nr_cells)
133{
134 uint32_t n = 128;
135
136 nr_cells /= 4;
137 nr_cells = min(nr_cells, 8192u);
138
139 while (n < nr_cells)
140 n <<= 1;
141
142 return n;
143}
144
145/*
146 * @nr_cells should be the number of cells you want in use _concurrently_.
147 * Don't confuse it with the number of distinct keys.
148 */
149static struct bio_prison *prison_create(unsigned nr_cells)
150{
151 unsigned i;
152 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
Joe Thornbera5a5fc92015-07-03 10:22:42 +0100153 struct bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000154
155 if (!prison)
156 return NULL;
157
158 spin_lock_init(&prison->lock);
159 prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
160 sizeof(struct cell));
161 if (!prison->cell_pool) {
162 kfree(prison);
163 return NULL;
164 }
165
Joe Thornbera5a5fc92015-07-03 10:22:42 +0100166 prison->cells = vmalloc(sizeof(*prison->cells) * nr_buckets);
167 if (!prison->cells) {
168 mempool_destroy(prison->cell_pool);
169 kfree(prison);
170 return NULL;
171 }
172
Joe Thornber991d9fa2011-10-31 20:21:18 +0000173 prison->nr_buckets = nr_buckets;
174 prison->hash_mask = nr_buckets - 1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000175 for (i = 0; i < nr_buckets; i++)
176 INIT_HLIST_HEAD(prison->cells + i);
177
178 return prison;
179}
180
181static void prison_destroy(struct bio_prison *prison)
182{
Joe Thornbera5a5fc92015-07-03 10:22:42 +0100183 vfree(prison->cells);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000184 mempool_destroy(prison->cell_pool);
185 kfree(prison);
186}
187
188static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
189{
190 const unsigned long BIG_PRIME = 4294967291UL;
191 uint64_t hash = key->block * BIG_PRIME;
192
193 return (uint32_t) (hash & prison->hash_mask);
194}
195
196static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
197{
198 return (lhs->virtual == rhs->virtual) &&
199 (lhs->dev == rhs->dev) &&
200 (lhs->block == rhs->block);
201}
202
203static struct cell *__search_bucket(struct hlist_head *bucket,
204 struct cell_key *key)
205{
206 struct cell *cell;
207 struct hlist_node *tmp;
208
209 hlist_for_each_entry(cell, tmp, bucket, list)
210 if (keys_equal(&cell->key, key))
211 return cell;
212
213 return NULL;
214}
215
216/*
217 * This may block if a new cell needs allocating. You must ensure that
218 * cells will be unlocked even if the calling thread is blocked.
219 *
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100220 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
Joe Thornber991d9fa2011-10-31 20:21:18 +0000221 */
222static int bio_detain(struct bio_prison *prison, struct cell_key *key,
223 struct bio *inmate, struct cell **ref)
224{
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100225 int r = 1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000226 unsigned long flags;
227 uint32_t hash = hash_key(prison, key);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100228 struct cell *cell, *cell2;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000229
230 BUG_ON(hash > prison->nr_buckets);
231
232 spin_lock_irqsave(&prison->lock, flags);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100233
Joe Thornber991d9fa2011-10-31 20:21:18 +0000234 cell = __search_bucket(prison->cells + hash, key);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100235 if (cell) {
236 bio_list_add(&cell->bios, inmate);
237 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000238 }
239
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100240 /*
241 * Allocate a new cell
242 */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000243 spin_unlock_irqrestore(&prison->lock, flags);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100244 cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
245 spin_lock_irqsave(&prison->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000246
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100247 /*
248 * We've been unlocked, so we have to double check that
249 * nobody else has inserted this cell in the meantime.
250 */
251 cell = __search_bucket(prison->cells + hash, key);
252 if (cell) {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000253 mempool_free(cell2, prison->cell_pool);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100254 bio_list_add(&cell->bios, inmate);
255 goto out;
256 }
257
258 /*
259 * Use new cell.
260 */
261 cell = cell2;
262
263 cell->prison = prison;
264 memcpy(&cell->key, key, sizeof(cell->key));
265 cell->holder = inmate;
266 bio_list_init(&cell->bios);
267 hlist_add_head(&cell->list, prison->cells + hash);
268
269 r = 0;
270
271out:
272 spin_unlock_irqrestore(&prison->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000273
274 *ref = cell;
275
276 return r;
277}
278
279/*
280 * @inmates must have been initialised prior to this call
281 */
282static void __cell_release(struct cell *cell, struct bio_list *inmates)
283{
284 struct bio_prison *prison = cell->prison;
285
286 hlist_del(&cell->list);
287
Mike Snitzer03aaae72012-05-12 01:43:12 +0100288 if (inmates) {
289 bio_list_add(inmates, cell->holder);
290 bio_list_merge(inmates, &cell->bios);
291 }
Joe Thornber991d9fa2011-10-31 20:21:18 +0000292
293 mempool_free(cell, prison->cell_pool);
294}
295
296static void cell_release(struct cell *cell, struct bio_list *bios)
297{
298 unsigned long flags;
299 struct bio_prison *prison = cell->prison;
300
301 spin_lock_irqsave(&prison->lock, flags);
302 __cell_release(cell, bios);
303 spin_unlock_irqrestore(&prison->lock, flags);
304}
305
306/*
307 * There are a couple of places where we put a bio into a cell briefly
308 * before taking it out again. In these situations we know that no other
309 * bio may be in the cell. This function releases the cell, and also does
310 * a sanity check.
311 */
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100312static void __cell_release_singleton(struct cell *cell, struct bio *bio)
313{
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100314 BUG_ON(cell->holder != bio);
315 BUG_ON(!bio_list_empty(&cell->bios));
Mike Snitzer03aaae72012-05-12 01:43:12 +0100316
317 __cell_release(cell, NULL);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100318}
319
Joe Thornber991d9fa2011-10-31 20:21:18 +0000320static void cell_release_singleton(struct cell *cell, struct bio *bio)
321{
Joe Thornber991d9fa2011-10-31 20:21:18 +0000322 unsigned long flags;
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100323 struct bio_prison *prison = cell->prison;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000324
325 spin_lock_irqsave(&prison->lock, flags);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100326 __cell_release_singleton(cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000327 spin_unlock_irqrestore(&prison->lock, flags);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100328}
Joe Thornber991d9fa2011-10-31 20:21:18 +0000329
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100330/*
331 * Sometimes we don't want the holder, just the additional bios.
332 */
333static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
334{
335 struct bio_prison *prison = cell->prison;
336
337 hlist_del(&cell->list);
338 bio_list_merge(inmates, &cell->bios);
339
340 mempool_free(cell, prison->cell_pool);
341}
342
343static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
344{
345 unsigned long flags;
346 struct bio_prison *prison = cell->prison;
347
348 spin_lock_irqsave(&prison->lock, flags);
349 __cell_release_no_holder(cell, inmates);
350 spin_unlock_irqrestore(&prison->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000351}
352
353static void cell_error(struct cell *cell)
354{
355 struct bio_prison *prison = cell->prison;
356 struct bio_list bios;
357 struct bio *bio;
358 unsigned long flags;
359
360 bio_list_init(&bios);
361
362 spin_lock_irqsave(&prison->lock, flags);
363 __cell_release(cell, &bios);
364 spin_unlock_irqrestore(&prison->lock, flags);
365
366 while ((bio = bio_list_pop(&bios)))
367 bio_io_error(bio);
368}
369
370/*----------------------------------------------------------------*/
371
372/*
373 * We use the deferred set to keep track of pending reads to shared blocks.
374 * We do this to ensure the new mapping caused by a write isn't performed
375 * until these prior reads have completed. Otherwise the insertion of the
376 * new mapping could free the old block that the read bios are mapped to.
377 */
378
379struct deferred_set;
380struct deferred_entry {
381 struct deferred_set *ds;
382 unsigned count;
383 struct list_head work_items;
384};
385
386struct deferred_set {
387 spinlock_t lock;
388 unsigned current_entry;
389 unsigned sweeper;
390 struct deferred_entry entries[DEFERRED_SET_SIZE];
391};
392
393static void ds_init(struct deferred_set *ds)
394{
395 int i;
396
397 spin_lock_init(&ds->lock);
398 ds->current_entry = 0;
399 ds->sweeper = 0;
400 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
401 ds->entries[i].ds = ds;
402 ds->entries[i].count = 0;
403 INIT_LIST_HEAD(&ds->entries[i].work_items);
404 }
405}
406
407static struct deferred_entry *ds_inc(struct deferred_set *ds)
408{
409 unsigned long flags;
410 struct deferred_entry *entry;
411
412 spin_lock_irqsave(&ds->lock, flags);
413 entry = ds->entries + ds->current_entry;
414 entry->count++;
415 spin_unlock_irqrestore(&ds->lock, flags);
416
417 return entry;
418}
419
420static unsigned ds_next(unsigned index)
421{
422 return (index + 1) % DEFERRED_SET_SIZE;
423}
424
425static void __sweep(struct deferred_set *ds, struct list_head *head)
426{
427 while ((ds->sweeper != ds->current_entry) &&
428 !ds->entries[ds->sweeper].count) {
429 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
430 ds->sweeper = ds_next(ds->sweeper);
431 }
432
433 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
434 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
435}
436
437static void ds_dec(struct deferred_entry *entry, struct list_head *head)
438{
439 unsigned long flags;
440
441 spin_lock_irqsave(&entry->ds->lock, flags);
442 BUG_ON(!entry->count);
443 --entry->count;
444 __sweep(entry->ds, head);
445 spin_unlock_irqrestore(&entry->ds->lock, flags);
446}
447
448/*
449 * Returns 1 if deferred or 0 if no pending items to delay job.
450 */
451static int ds_add_work(struct deferred_set *ds, struct list_head *work)
452{
453 int r = 1;
454 unsigned long flags;
455 unsigned next_entry;
456
457 spin_lock_irqsave(&ds->lock, flags);
458 if ((ds->sweeper == ds->current_entry) &&
459 !ds->entries[ds->current_entry].count)
460 r = 0;
461 else {
462 list_add(work, &ds->entries[ds->current_entry].work_items);
463 next_entry = ds_next(ds->current_entry);
464 if (!ds->entries[next_entry].count)
465 ds->current_entry = next_entry;
466 }
467 spin_unlock_irqrestore(&ds->lock, flags);
468
469 return r;
470}
471
472/*----------------------------------------------------------------*/
473
474/*
475 * Key building.
476 */
477static void build_data_key(struct dm_thin_device *td,
478 dm_block_t b, struct cell_key *key)
479{
480 key->virtual = 0;
481 key->dev = dm_thin_dev_id(td);
482 key->block = b;
483}
484
485static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
486 struct cell_key *key)
487{
488 key->virtual = 1;
489 key->dev = dm_thin_dev_id(td);
490 key->block = b;
491}
492
493/*----------------------------------------------------------------*/
494
495/*
496 * A pool device ties together a metadata device and a data device. It
497 * also provides the interface for creating and destroying internal
498 * devices.
499 */
500struct new_mapping;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100501
502struct pool_features {
503 unsigned zero_new_blocks:1;
504 unsigned discard_enabled:1;
505 unsigned discard_passdown:1;
506};
507
Joe Thornber991d9fa2011-10-31 20:21:18 +0000508struct pool {
509 struct list_head list;
510 struct dm_target *ti; /* Only set if a pool target is bound */
511
512 struct mapped_device *pool_md;
513 struct block_device *md_dev;
514 struct dm_pool_metadata *pmd;
515
516 uint32_t sectors_per_block;
517 unsigned block_shift;
518 dm_block_t offset_mask;
519 dm_block_t low_water_blocks;
520
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100521 struct pool_features pf;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000522 unsigned low_water_triggered:1; /* A dm event has been sent */
523 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
524
525 struct bio_prison *prison;
526 struct dm_kcopyd_client *copier;
527
528 struct workqueue_struct *wq;
529 struct work_struct worker;
Joe Thornber905e51b2012-03-28 18:41:27 +0100530 struct delayed_work waker;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000531
532 unsigned ref_count;
Joe Thornber905e51b2012-03-28 18:41:27 +0100533 unsigned long last_commit_jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000534
535 spinlock_t lock;
536 struct bio_list deferred_bios;
537 struct bio_list deferred_flush_bios;
538 struct list_head prepared_mappings;
Joe Thornber104655f2012-03-28 18:41:28 +0100539 struct list_head prepared_discards;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000540
541 struct bio_list retry_on_resume_list;
542
Joe Thornbereb2aa482012-03-28 18:41:28 +0100543 struct deferred_set shared_read_ds;
Joe Thornber104655f2012-03-28 18:41:28 +0100544 struct deferred_set all_io_ds;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000545
546 struct new_mapping *next_mapping;
547 mempool_t *mapping_pool;
548 mempool_t *endio_hook_pool;
549};
550
551/*
552 * Target context for a pool.
553 */
554struct pool_c {
555 struct dm_target *ti;
556 struct pool *pool;
557 struct dm_dev *data_dev;
558 struct dm_dev *metadata_dev;
559 struct dm_target_callbacks callbacks;
560
561 dm_block_t low_water_blocks;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100562 struct pool_features pf;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000563};
564
565/*
566 * Target context for a thin.
567 */
568struct thin_c {
569 struct dm_dev *pool_dev;
Joe Thornber2dd9c252012-03-28 18:41:28 +0100570 struct dm_dev *origin_dev;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000571 dm_thin_id dev_id;
572
573 struct pool *pool;
574 struct dm_thin_device *td;
575};
576
577/*----------------------------------------------------------------*/
578
579/*
580 * A global list of pools that uses a struct mapped_device as a key.
581 */
582static struct dm_thin_pool_table {
583 struct mutex mutex;
584 struct list_head pools;
585} dm_thin_pool_table;
586
587static void pool_table_init(void)
588{
589 mutex_init(&dm_thin_pool_table.mutex);
590 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
591}
592
593static void __pool_table_insert(struct pool *pool)
594{
595 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
596 list_add(&pool->list, &dm_thin_pool_table.pools);
597}
598
599static void __pool_table_remove(struct pool *pool)
600{
601 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
602 list_del(&pool->list);
603}
604
605static struct pool *__pool_table_lookup(struct mapped_device *md)
606{
607 struct pool *pool = NULL, *tmp;
608
609 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
610
611 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
612 if (tmp->pool_md == md) {
613 pool = tmp;
614 break;
615 }
616 }
617
618 return pool;
619}
620
621static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
622{
623 struct pool *pool = NULL, *tmp;
624
625 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
626
627 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
628 if (tmp->md_dev == md_dev) {
629 pool = tmp;
630 break;
631 }
632 }
633
634 return pool;
635}
636
637/*----------------------------------------------------------------*/
638
Joe Thornbereb2aa482012-03-28 18:41:28 +0100639struct endio_hook {
640 struct thin_c *tc;
641 struct deferred_entry *shared_read_entry;
Joe Thornber104655f2012-03-28 18:41:28 +0100642 struct deferred_entry *all_io_entry;
Joe Thornbereb2aa482012-03-28 18:41:28 +0100643 struct new_mapping *overwrite_mapping;
644};
645
Joe Thornber991d9fa2011-10-31 20:21:18 +0000646static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
647{
648 struct bio *bio;
649 struct bio_list bios;
650
651 bio_list_init(&bios);
652 bio_list_merge(&bios, master);
653 bio_list_init(master);
654
655 while ((bio = bio_list_pop(&bios))) {
Joe Thornbereb2aa482012-03-28 18:41:28 +0100656 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
657 if (h->tc == tc)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000658 bio_endio(bio, DM_ENDIO_REQUEUE);
659 else
660 bio_list_add(master, bio);
661 }
662}
663
664static void requeue_io(struct thin_c *tc)
665{
666 struct pool *pool = tc->pool;
667 unsigned long flags;
668
669 spin_lock_irqsave(&pool->lock, flags);
670 __requeue_bio_list(tc, &pool->deferred_bios);
671 __requeue_bio_list(tc, &pool->retry_on_resume_list);
672 spin_unlock_irqrestore(&pool->lock, flags);
673}
674
675/*
676 * This section of code contains the logic for processing a thin device's IO.
677 * Much of the code depends on pool object resources (lists, workqueues, etc)
678 * but most is exclusively called from the thin target rather than the thin-pool
679 * target.
680 */
681
682static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
683{
684 return bio->bi_sector >> tc->pool->block_shift;
685}
686
687static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
688{
689 struct pool *pool = tc->pool;
690
691 bio->bi_bdev = tc->pool_dev->bdev;
692 bio->bi_sector = (block << pool->block_shift) +
693 (bio->bi_sector & pool->offset_mask);
694}
695
Joe Thornber2dd9c252012-03-28 18:41:28 +0100696static void remap_to_origin(struct thin_c *tc, struct bio *bio)
697{
698 bio->bi_bdev = tc->origin_dev->bdev;
699}
700
701static void issue(struct thin_c *tc, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000702{
703 struct pool *pool = tc->pool;
704 unsigned long flags;
705
Joe Thornber991d9fa2011-10-31 20:21:18 +0000706 /*
707 * Batch together any FUA/FLUSH bios we find and then issue
708 * a single commit for them in process_deferred_bios().
709 */
710 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
711 spin_lock_irqsave(&pool->lock, flags);
712 bio_list_add(&pool->deferred_flush_bios, bio);
713 spin_unlock_irqrestore(&pool->lock, flags);
714 } else
715 generic_make_request(bio);
716}
717
Joe Thornber2dd9c252012-03-28 18:41:28 +0100718static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
719{
720 remap_to_origin(tc, bio);
721 issue(tc, bio);
722}
723
724static void remap_and_issue(struct thin_c *tc, struct bio *bio,
725 dm_block_t block)
726{
727 remap(tc, bio, block);
728 issue(tc, bio);
729}
730
Joe Thornber991d9fa2011-10-31 20:21:18 +0000731/*
732 * wake_worker() is used when new work is queued and when pool_resume is
733 * ready to continue deferred IO processing.
734 */
735static void wake_worker(struct pool *pool)
736{
737 queue_work(pool->wq, &pool->worker);
738}
739
740/*----------------------------------------------------------------*/
741
742/*
743 * Bio endio functions.
744 */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000745struct new_mapping {
746 struct list_head list;
747
Joe Thornbereb2aa482012-03-28 18:41:28 +0100748 unsigned quiesced:1;
749 unsigned prepared:1;
Joe Thornber104655f2012-03-28 18:41:28 +0100750 unsigned pass_discard:1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000751
752 struct thin_c *tc;
753 dm_block_t virt_block;
754 dm_block_t data_block;
Joe Thornber104655f2012-03-28 18:41:28 +0100755 struct cell *cell, *cell2;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000756 int err;
757
758 /*
759 * If the bio covers the whole area of a block then we can avoid
760 * zeroing or copying. Instead this bio is hooked. The bio will
761 * still be in the cell, so care has to be taken to avoid issuing
762 * the bio twice.
763 */
764 struct bio *bio;
765 bio_end_io_t *saved_bi_end_io;
766};
767
768static void __maybe_add_mapping(struct new_mapping *m)
769{
770 struct pool *pool = m->tc->pool;
771
Joe Thornbereb2aa482012-03-28 18:41:28 +0100772 if (m->quiesced && m->prepared) {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000773 list_add(&m->list, &pool->prepared_mappings);
774 wake_worker(pool);
775 }
776}
777
778static void copy_complete(int read_err, unsigned long write_err, void *context)
779{
780 unsigned long flags;
781 struct new_mapping *m = context;
782 struct pool *pool = m->tc->pool;
783
784 m->err = read_err || write_err ? -EIO : 0;
785
786 spin_lock_irqsave(&pool->lock, flags);
787 m->prepared = 1;
788 __maybe_add_mapping(m);
789 spin_unlock_irqrestore(&pool->lock, flags);
790}
791
792static void overwrite_endio(struct bio *bio, int err)
793{
794 unsigned long flags;
Joe Thornbereb2aa482012-03-28 18:41:28 +0100795 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
796 struct new_mapping *m = h->overwrite_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000797 struct pool *pool = m->tc->pool;
798
799 m->err = err;
800
801 spin_lock_irqsave(&pool->lock, flags);
802 m->prepared = 1;
803 __maybe_add_mapping(m);
804 spin_unlock_irqrestore(&pool->lock, flags);
805}
806
Joe Thornber991d9fa2011-10-31 20:21:18 +0000807/*----------------------------------------------------------------*/
808
809/*
810 * Workqueue.
811 */
812
813/*
814 * Prepared mapping jobs.
815 */
816
817/*
818 * This sends the bios in the cell back to the deferred_bios list.
819 */
820static void cell_defer(struct thin_c *tc, struct cell *cell,
821 dm_block_t data_block)
822{
823 struct pool *pool = tc->pool;
824 unsigned long flags;
825
826 spin_lock_irqsave(&pool->lock, flags);
827 cell_release(cell, &pool->deferred_bios);
828 spin_unlock_irqrestore(&tc->pool->lock, flags);
829
830 wake_worker(pool);
831}
832
833/*
834 * Same as cell_defer above, except it omits one particular detainee,
835 * a write bio that covers the block and has already been processed.
836 */
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100837static void cell_defer_except(struct thin_c *tc, struct cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000838{
839 struct bio_list bios;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000840 struct pool *pool = tc->pool;
841 unsigned long flags;
842
843 bio_list_init(&bios);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000844
845 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100846 cell_release_no_holder(cell, &pool->deferred_bios);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000847 spin_unlock_irqrestore(&pool->lock, flags);
848
849 wake_worker(pool);
850}
851
852static void process_prepared_mapping(struct new_mapping *m)
853{
854 struct thin_c *tc = m->tc;
855 struct bio *bio;
856 int r;
857
858 bio = m->bio;
859 if (bio)
860 bio->bi_end_io = m->saved_bi_end_io;
861
862 if (m->err) {
863 cell_error(m->cell);
Joe Thornber78104a02012-07-27 15:08:05 +0100864 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000865 }
866
867 /*
868 * Commit the prepared block into the mapping btree.
869 * Any I/O for this block arriving after this point will get
870 * remapped to it directly.
871 */
872 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
873 if (r) {
874 DMERR("dm_thin_insert_block() failed");
875 cell_error(m->cell);
Joe Thornber78104a02012-07-27 15:08:05 +0100876 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000877 }
878
879 /*
880 * Release any bios held while the block was being provisioned.
881 * If we are processing a write bio that completely covers the block,
882 * we already processed it so can ignore it now when processing
883 * the bios in the cell.
884 */
885 if (bio) {
Joe Thornber6f94a4c2012-03-28 18:41:23 +0100886 cell_defer_except(tc, m->cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000887 bio_endio(bio, 0);
888 } else
889 cell_defer(tc, m->cell, m->data_block);
890
Joe Thornber78104a02012-07-27 15:08:05 +0100891out:
Joe Thornber991d9fa2011-10-31 20:21:18 +0000892 list_del(&m->list);
893 mempool_free(m, tc->pool->mapping_pool);
894}
895
Joe Thornber104655f2012-03-28 18:41:28 +0100896static void process_prepared_discard(struct new_mapping *m)
897{
898 int r;
899 struct thin_c *tc = m->tc;
900
901 r = dm_thin_remove_block(tc->td, m->virt_block);
902 if (r)
903 DMERR("dm_thin_remove_block() failed");
904
905 /*
906 * Pass the discard down to the underlying device?
907 */
908 if (m->pass_discard)
909 remap_and_issue(tc, m->bio, m->data_block);
910 else
911 bio_endio(m->bio, 0);
912
913 cell_defer_except(tc, m->cell);
914 cell_defer_except(tc, m->cell2);
915 mempool_free(m, tc->pool->mapping_pool);
916}
917
918static void process_prepared(struct pool *pool, struct list_head *head,
919 void (*fn)(struct new_mapping *))
Joe Thornber991d9fa2011-10-31 20:21:18 +0000920{
921 unsigned long flags;
922 struct list_head maps;
923 struct new_mapping *m, *tmp;
924
925 INIT_LIST_HEAD(&maps);
926 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +0100927 list_splice_init(head, &maps);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000928 spin_unlock_irqrestore(&pool->lock, flags);
929
930 list_for_each_entry_safe(m, tmp, &maps, list)
Joe Thornber104655f2012-03-28 18:41:28 +0100931 fn(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000932}
933
934/*
935 * Deferred bio jobs.
936 */
Joe Thornber104655f2012-03-28 18:41:28 +0100937static int io_overlaps_block(struct pool *pool, struct bio *bio)
938{
939 return !(bio->bi_sector & pool->offset_mask) &&
940 (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
941
942}
943
Joe Thornber991d9fa2011-10-31 20:21:18 +0000944static int io_overwrites_block(struct pool *pool, struct bio *bio)
945{
Joe Thornber104655f2012-03-28 18:41:28 +0100946 return (bio_data_dir(bio) == WRITE) &&
947 io_overlaps_block(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000948}
949
950static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
951 bio_end_io_t *fn)
952{
953 *save = bio->bi_end_io;
954 bio->bi_end_io = fn;
955}
956
957static int ensure_next_mapping(struct pool *pool)
958{
959 if (pool->next_mapping)
960 return 0;
961
962 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
963
964 return pool->next_mapping ? 0 : -ENOMEM;
965}
966
967static struct new_mapping *get_next_mapping(struct pool *pool)
968{
969 struct new_mapping *r = pool->next_mapping;
970
971 BUG_ON(!pool->next_mapping);
972
973 pool->next_mapping = NULL;
974
975 return r;
976}
977
978static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
Joe Thornber2dd9c252012-03-28 18:41:28 +0100979 struct dm_dev *origin, dm_block_t data_origin,
980 dm_block_t data_dest,
Joe Thornber991d9fa2011-10-31 20:21:18 +0000981 struct cell *cell, struct bio *bio)
982{
983 int r;
984 struct pool *pool = tc->pool;
985 struct new_mapping *m = get_next_mapping(pool);
986
987 INIT_LIST_HEAD(&m->list);
Joe Thornbereb2aa482012-03-28 18:41:28 +0100988 m->quiesced = 0;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000989 m->prepared = 0;
990 m->tc = tc;
991 m->virt_block = virt_block;
992 m->data_block = data_dest;
993 m->cell = cell;
994 m->err = 0;
995 m->bio = NULL;
996
Joe Thornbereb2aa482012-03-28 18:41:28 +0100997 if (!ds_add_work(&pool->shared_read_ds, &m->list))
998 m->quiesced = 1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000999
1000 /*
1001 * IO to pool_dev remaps to the pool target's data_dev.
1002 *
1003 * If the whole block of data is being overwritten, we can issue the
1004 * bio immediately. Otherwise we use kcopyd to clone the data first.
1005 */
1006 if (io_overwrites_block(pool, bio)) {
Joe Thornbereb2aa482012-03-28 18:41:28 +01001007 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1008 h->overwrite_mapping = m;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001009 m->bio = bio;
1010 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001011 remap_and_issue(tc, bio, data_dest);
1012 } else {
1013 struct dm_io_region from, to;
1014
Joe Thornber2dd9c252012-03-28 18:41:28 +01001015 from.bdev = origin->bdev;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001016 from.sector = data_origin * pool->sectors_per_block;
1017 from.count = pool->sectors_per_block;
1018
1019 to.bdev = tc->pool_dev->bdev;
1020 to.sector = data_dest * pool->sectors_per_block;
1021 to.count = pool->sectors_per_block;
1022
1023 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1024 0, copy_complete, m);
1025 if (r < 0) {
1026 mempool_free(m, pool->mapping_pool);
1027 DMERR("dm_kcopyd_copy() failed");
1028 cell_error(cell);
1029 }
1030 }
1031}
1032
Joe Thornber2dd9c252012-03-28 18:41:28 +01001033static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1034 dm_block_t data_origin, dm_block_t data_dest,
1035 struct cell *cell, struct bio *bio)
1036{
1037 schedule_copy(tc, virt_block, tc->pool_dev,
1038 data_origin, data_dest, cell, bio);
1039}
1040
1041static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1042 dm_block_t data_dest,
1043 struct cell *cell, struct bio *bio)
1044{
1045 schedule_copy(tc, virt_block, tc->origin_dev,
1046 virt_block, data_dest, cell, bio);
1047}
1048
Joe Thornber991d9fa2011-10-31 20:21:18 +00001049static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1050 dm_block_t data_block, struct cell *cell,
1051 struct bio *bio)
1052{
1053 struct pool *pool = tc->pool;
1054 struct new_mapping *m = get_next_mapping(pool);
1055
1056 INIT_LIST_HEAD(&m->list);
Joe Thornbereb2aa482012-03-28 18:41:28 +01001057 m->quiesced = 1;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001058 m->prepared = 0;
1059 m->tc = tc;
1060 m->virt_block = virt_block;
1061 m->data_block = data_block;
1062 m->cell = cell;
1063 m->err = 0;
1064 m->bio = NULL;
1065
1066 /*
1067 * If the whole block of data is being overwritten or we are not
1068 * zeroing pre-existing data, we can issue the bio immediately.
1069 * Otherwise we use kcopyd to zero the data first.
1070 */
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001071 if (!pool->pf.zero_new_blocks)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001072 process_prepared_mapping(m);
1073
1074 else if (io_overwrites_block(pool, bio)) {
Joe Thornbereb2aa482012-03-28 18:41:28 +01001075 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1076 h->overwrite_mapping = m;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001077 m->bio = bio;
1078 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001079 remap_and_issue(tc, bio, data_block);
1080
1081 } else {
1082 int r;
1083 struct dm_io_region to;
1084
1085 to.bdev = tc->pool_dev->bdev;
1086 to.sector = data_block * pool->sectors_per_block;
1087 to.count = pool->sectors_per_block;
1088
1089 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
1090 if (r < 0) {
1091 mempool_free(m, pool->mapping_pool);
1092 DMERR("dm_kcopyd_zero() failed");
1093 cell_error(cell);
1094 }
1095 }
1096}
1097
1098static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1099{
1100 int r;
1101 dm_block_t free_blocks;
1102 unsigned long flags;
1103 struct pool *pool = tc->pool;
1104
1105 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1106 if (r)
1107 return r;
1108
1109 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1110 DMWARN("%s: reached low water mark, sending event.",
1111 dm_device_name(pool->pool_md));
1112 spin_lock_irqsave(&pool->lock, flags);
1113 pool->low_water_triggered = 1;
1114 spin_unlock_irqrestore(&pool->lock, flags);
1115 dm_table_event(pool->ti->table);
1116 }
1117
1118 if (!free_blocks) {
1119 if (pool->no_free_space)
1120 return -ENOSPC;
1121 else {
1122 /*
1123 * Try to commit to see if that will free up some
1124 * more space.
1125 */
1126 r = dm_pool_commit_metadata(pool->pmd);
1127 if (r) {
1128 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1129 __func__, r);
1130 return r;
1131 }
1132
1133 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1134 if (r)
1135 return r;
1136
1137 /*
1138 * If we still have no space we set a flag to avoid
1139 * doing all this checking and return -ENOSPC.
1140 */
1141 if (!free_blocks) {
1142 DMWARN("%s: no free space available.",
1143 dm_device_name(pool->pool_md));
1144 spin_lock_irqsave(&pool->lock, flags);
1145 pool->no_free_space = 1;
1146 spin_unlock_irqrestore(&pool->lock, flags);
1147 return -ENOSPC;
1148 }
1149 }
1150 }
1151
1152 r = dm_pool_alloc_data_block(pool->pmd, result);
1153 if (r)
1154 return r;
1155
1156 return 0;
1157}
1158
1159/*
1160 * If we have run out of space, queue bios until the device is
1161 * resumed, presumably after having been reloaded with more space.
1162 */
1163static void retry_on_resume(struct bio *bio)
1164{
Joe Thornbereb2aa482012-03-28 18:41:28 +01001165 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1166 struct thin_c *tc = h->tc;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001167 struct pool *pool = tc->pool;
1168 unsigned long flags;
1169
1170 spin_lock_irqsave(&pool->lock, flags);
1171 bio_list_add(&pool->retry_on_resume_list, bio);
1172 spin_unlock_irqrestore(&pool->lock, flags);
1173}
1174
1175static void no_space(struct cell *cell)
1176{
1177 struct bio *bio;
1178 struct bio_list bios;
1179
1180 bio_list_init(&bios);
1181 cell_release(cell, &bios);
1182
1183 while ((bio = bio_list_pop(&bios)))
1184 retry_on_resume(bio);
1185}
1186
Joe Thornber104655f2012-03-28 18:41:28 +01001187static void process_discard(struct thin_c *tc, struct bio *bio)
1188{
1189 int r;
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01001190 unsigned long flags;
Joe Thornber104655f2012-03-28 18:41:28 +01001191 struct pool *pool = tc->pool;
1192 struct cell *cell, *cell2;
1193 struct cell_key key, key2;
1194 dm_block_t block = get_bio_block(tc, bio);
1195 struct dm_thin_lookup_result lookup_result;
1196 struct new_mapping *m;
1197
1198 build_virtual_key(tc->td, block, &key);
1199 if (bio_detain(tc->pool->prison, &key, bio, &cell))
1200 return;
1201
1202 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1203 switch (r) {
1204 case 0:
1205 /*
1206 * Check nobody is fiddling with this pool block. This can
1207 * happen if someone's in the process of breaking sharing
1208 * on this block.
1209 */
1210 build_data_key(tc->td, lookup_result.block, &key2);
1211 if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1212 cell_release_singleton(cell, bio);
1213 break;
1214 }
1215
1216 if (io_overlaps_block(pool, bio)) {
1217 /*
1218 * IO may still be going to the destination block. We must
1219 * quiesce before we can do the removal.
1220 */
1221 m = get_next_mapping(pool);
1222 m->tc = tc;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001223 m->pass_discard = (!lookup_result.shared) & pool->pf.discard_passdown;
Joe Thornber104655f2012-03-28 18:41:28 +01001224 m->virt_block = block;
1225 m->data_block = lookup_result.block;
1226 m->cell = cell;
1227 m->cell2 = cell2;
1228 m->err = 0;
1229 m->bio = bio;
1230
1231 if (!ds_add_work(&pool->all_io_ds, &m->list)) {
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01001232 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +01001233 list_add(&m->list, &pool->prepared_discards);
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01001234 spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +01001235 wake_worker(pool);
1236 }
1237 } else {
1238 /*
1239 * This path is hit if people are ignoring
1240 * limits->discard_granularity. It ignores any
1241 * part of the discard that is in a subsequent
1242 * block.
1243 */
1244 sector_t offset = bio->bi_sector - (block << pool->block_shift);
1245 unsigned remaining = (pool->sectors_per_block - offset) << 9;
1246 bio->bi_size = min(bio->bi_size, remaining);
1247
1248 cell_release_singleton(cell, bio);
1249 cell_release_singleton(cell2, bio);
Mikulas Patocka0ba2a0d2012-07-20 14:25:05 +01001250 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1251 remap_and_issue(tc, bio, lookup_result.block);
1252 else
1253 bio_endio(bio, 0);
Joe Thornber104655f2012-03-28 18:41:28 +01001254 }
1255 break;
1256
1257 case -ENODATA:
1258 /*
1259 * It isn't provisioned, just forget it.
1260 */
1261 cell_release_singleton(cell, bio);
1262 bio_endio(bio, 0);
1263 break;
1264
1265 default:
1266 DMERR("discard: find block unexpectedly returned %d", r);
1267 cell_release_singleton(cell, bio);
1268 bio_io_error(bio);
1269 break;
1270 }
1271}
1272
Joe Thornber991d9fa2011-10-31 20:21:18 +00001273static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1274 struct cell_key *key,
1275 struct dm_thin_lookup_result *lookup_result,
1276 struct cell *cell)
1277{
1278 int r;
1279 dm_block_t data_block;
1280
1281 r = alloc_data_block(tc, &data_block);
1282 switch (r) {
1283 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001284 schedule_internal_copy(tc, block, lookup_result->block,
1285 data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001286 break;
1287
1288 case -ENOSPC:
1289 no_space(cell);
1290 break;
1291
1292 default:
1293 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1294 cell_error(cell);
1295 break;
1296 }
1297}
1298
1299static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1300 dm_block_t block,
1301 struct dm_thin_lookup_result *lookup_result)
1302{
1303 struct cell *cell;
1304 struct pool *pool = tc->pool;
1305 struct cell_key key;
1306
1307 /*
1308 * If cell is already occupied, then sharing is already in the process
1309 * of being broken so we have nothing further to do here.
1310 */
1311 build_data_key(tc->td, lookup_result->block, &key);
1312 if (bio_detain(pool->prison, &key, bio, &cell))
1313 return;
1314
1315 if (bio_data_dir(bio) == WRITE)
1316 break_sharing(tc, bio, block, &key, lookup_result, cell);
1317 else {
Joe Thornbereb2aa482012-03-28 18:41:28 +01001318 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001319
Joe Thornbereb2aa482012-03-28 18:41:28 +01001320 h->shared_read_entry = ds_inc(&pool->shared_read_ds);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001321
1322 cell_release_singleton(cell, bio);
1323 remap_and_issue(tc, bio, lookup_result->block);
1324 }
1325}
1326
1327static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1328 struct cell *cell)
1329{
1330 int r;
1331 dm_block_t data_block;
1332
1333 /*
1334 * Remap empty bios (flushes) immediately, without provisioning.
1335 */
1336 if (!bio->bi_size) {
1337 cell_release_singleton(cell, bio);
1338 remap_and_issue(tc, bio, 0);
1339 return;
1340 }
1341
1342 /*
1343 * Fill read bios with zeroes and complete them immediately.
1344 */
1345 if (bio_data_dir(bio) == READ) {
1346 zero_fill_bio(bio);
1347 cell_release_singleton(cell, bio);
1348 bio_endio(bio, 0);
1349 return;
1350 }
1351
1352 r = alloc_data_block(tc, &data_block);
1353 switch (r) {
1354 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001355 if (tc->origin_dev)
1356 schedule_external_copy(tc, block, data_block, cell, bio);
1357 else
1358 schedule_zero(tc, block, data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001359 break;
1360
1361 case -ENOSPC:
1362 no_space(cell);
1363 break;
1364
1365 default:
1366 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1367 cell_error(cell);
1368 break;
1369 }
1370}
1371
1372static void process_bio(struct thin_c *tc, struct bio *bio)
1373{
1374 int r;
1375 dm_block_t block = get_bio_block(tc, bio);
1376 struct cell *cell;
1377 struct cell_key key;
1378 struct dm_thin_lookup_result lookup_result;
1379
1380 /*
1381 * If cell is already occupied, then the block is already
1382 * being provisioned so we have nothing further to do here.
1383 */
1384 build_virtual_key(tc->td, block, &key);
1385 if (bio_detain(tc->pool->prison, &key, bio, &cell))
1386 return;
1387
1388 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1389 switch (r) {
1390 case 0:
1391 /*
1392 * We can release this cell now. This thread is the only
1393 * one that puts bios into a cell, and we know there were
1394 * no preceding bios.
1395 */
1396 /*
1397 * TODO: this will probably have to change when discard goes
1398 * back in.
1399 */
1400 cell_release_singleton(cell, bio);
1401
1402 if (lookup_result.shared)
1403 process_shared_bio(tc, bio, block, &lookup_result);
1404 else
1405 remap_and_issue(tc, bio, lookup_result.block);
1406 break;
1407
1408 case -ENODATA:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001409 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1410 cell_release_singleton(cell, bio);
1411 remap_to_origin_and_issue(tc, bio);
1412 } else
1413 provision_block(tc, bio, block, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001414 break;
1415
1416 default:
1417 DMERR("dm_thin_find_block() failed, error = %d", r);
Joe Thornber104655f2012-03-28 18:41:28 +01001418 cell_release_singleton(cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001419 bio_io_error(bio);
1420 break;
1421 }
1422}
1423
Joe Thornber905e51b2012-03-28 18:41:27 +01001424static int need_commit_due_to_time(struct pool *pool)
1425{
1426 return jiffies < pool->last_commit_jiffies ||
1427 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1428}
1429
Joe Thornber991d9fa2011-10-31 20:21:18 +00001430static void process_deferred_bios(struct pool *pool)
1431{
1432 unsigned long flags;
1433 struct bio *bio;
1434 struct bio_list bios;
1435 int r;
1436
1437 bio_list_init(&bios);
1438
1439 spin_lock_irqsave(&pool->lock, flags);
1440 bio_list_merge(&bios, &pool->deferred_bios);
1441 bio_list_init(&pool->deferred_bios);
1442 spin_unlock_irqrestore(&pool->lock, flags);
1443
1444 while ((bio = bio_list_pop(&bios))) {
Joe Thornbereb2aa482012-03-28 18:41:28 +01001445 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1446 struct thin_c *tc = h->tc;
1447
Joe Thornber991d9fa2011-10-31 20:21:18 +00001448 /*
1449 * If we've got no free new_mapping structs, and processing
1450 * this bio might require one, we pause until there are some
1451 * prepared mappings to process.
1452 */
1453 if (ensure_next_mapping(pool)) {
1454 spin_lock_irqsave(&pool->lock, flags);
Mike Snitzer354b2be2014-03-28 02:15:02 -04001455 bio_list_add(&pool->deferred_bios, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001456 bio_list_merge(&pool->deferred_bios, &bios);
1457 spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001458 break;
1459 }
Joe Thornber104655f2012-03-28 18:41:28 +01001460
1461 if (bio->bi_rw & REQ_DISCARD)
1462 process_discard(tc, bio);
1463 else
1464 process_bio(tc, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001465 }
1466
1467 /*
1468 * If there are any deferred flush bios, we must commit
1469 * the metadata before issuing them.
1470 */
1471 bio_list_init(&bios);
1472 spin_lock_irqsave(&pool->lock, flags);
1473 bio_list_merge(&bios, &pool->deferred_flush_bios);
1474 bio_list_init(&pool->deferred_flush_bios);
1475 spin_unlock_irqrestore(&pool->lock, flags);
1476
Joe Thornber905e51b2012-03-28 18:41:27 +01001477 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
Joe Thornber991d9fa2011-10-31 20:21:18 +00001478 return;
1479
1480 r = dm_pool_commit_metadata(pool->pmd);
1481 if (r) {
1482 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1483 __func__, r);
1484 while ((bio = bio_list_pop(&bios)))
1485 bio_io_error(bio);
1486 return;
1487 }
Joe Thornber905e51b2012-03-28 18:41:27 +01001488 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001489
1490 while ((bio = bio_list_pop(&bios)))
1491 generic_make_request(bio);
1492}
1493
1494static void do_worker(struct work_struct *ws)
1495{
1496 struct pool *pool = container_of(ws, struct pool, worker);
1497
Joe Thornber104655f2012-03-28 18:41:28 +01001498 process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping);
1499 process_prepared(pool, &pool->prepared_discards, process_prepared_discard);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001500 process_deferred_bios(pool);
1501}
1502
Joe Thornber905e51b2012-03-28 18:41:27 +01001503/*
1504 * We want to commit periodically so that not too much
1505 * unwritten data builds up.
1506 */
1507static void do_waker(struct work_struct *ws)
1508{
1509 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1510 wake_worker(pool);
1511 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1512}
1513
Joe Thornber991d9fa2011-10-31 20:21:18 +00001514/*----------------------------------------------------------------*/
1515
1516/*
1517 * Mapping functions.
1518 */
1519
1520/*
1521 * Called only while mapping a thin bio to hand it over to the workqueue.
1522 */
1523static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1524{
1525 unsigned long flags;
1526 struct pool *pool = tc->pool;
1527
1528 spin_lock_irqsave(&pool->lock, flags);
1529 bio_list_add(&pool->deferred_bios, bio);
1530 spin_unlock_irqrestore(&pool->lock, flags);
1531
1532 wake_worker(pool);
1533}
1534
Joe Thornbereb2aa482012-03-28 18:41:28 +01001535static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1536{
1537 struct pool *pool = tc->pool;
1538 struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1539
1540 h->tc = tc;
1541 h->shared_read_entry = NULL;
Joe Thornber104655f2012-03-28 18:41:28 +01001542 h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
Joe Thornbereb2aa482012-03-28 18:41:28 +01001543 h->overwrite_mapping = NULL;
1544
1545 return h;
1546}
1547
Joe Thornber991d9fa2011-10-31 20:21:18 +00001548/*
1549 * Non-blocking function called from the thin target's map function.
1550 */
1551static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1552 union map_info *map_context)
1553{
1554 int r;
1555 struct thin_c *tc = ti->private;
1556 dm_block_t block = get_bio_block(tc, bio);
1557 struct dm_thin_device *td = tc->td;
1558 struct dm_thin_lookup_result result;
1559
Joe Thornbereb2aa482012-03-28 18:41:28 +01001560 map_context->ptr = thin_hook_bio(tc, bio);
Joe Thornber104655f2012-03-28 18:41:28 +01001561 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001562 thin_defer_bio(tc, bio);
1563 return DM_MAPIO_SUBMITTED;
1564 }
1565
1566 r = dm_thin_find_block(td, block, 0, &result);
1567
1568 /*
1569 * Note that we defer readahead too.
1570 */
1571 switch (r) {
1572 case 0:
1573 if (unlikely(result.shared)) {
1574 /*
1575 * We have a race condition here between the
1576 * result.shared value returned by the lookup and
1577 * snapshot creation, which may cause new
1578 * sharing.
1579 *
1580 * To avoid this always quiesce the origin before
1581 * taking the snap. You want to do this anyway to
1582 * ensure a consistent application view
1583 * (i.e. lockfs).
1584 *
1585 * More distant ancestors are irrelevant. The
1586 * shared flag will be set in their case.
1587 */
1588 thin_defer_bio(tc, bio);
1589 r = DM_MAPIO_SUBMITTED;
1590 } else {
1591 remap(tc, bio, result.block);
1592 r = DM_MAPIO_REMAPPED;
1593 }
1594 break;
1595
1596 case -ENODATA:
1597 /*
1598 * In future, the failed dm_thin_find_block above could
1599 * provide the hint to load the metadata into cache.
1600 */
1601 case -EWOULDBLOCK:
1602 thin_defer_bio(tc, bio);
1603 r = DM_MAPIO_SUBMITTED;
1604 break;
1605 }
1606
1607 return r;
1608}
1609
1610static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1611{
1612 int r;
1613 unsigned long flags;
1614 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1615
1616 spin_lock_irqsave(&pt->pool->lock, flags);
1617 r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1618 spin_unlock_irqrestore(&pt->pool->lock, flags);
1619
1620 if (!r) {
1621 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1622 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1623 }
1624
1625 return r;
1626}
1627
1628static void __requeue_bios(struct pool *pool)
1629{
1630 bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1631 bio_list_init(&pool->retry_on_resume_list);
1632}
1633
1634/*----------------------------------------------------------------
1635 * Binding of control targets to a pool object
1636 *--------------------------------------------------------------*/
1637static int bind_control_target(struct pool *pool, struct dm_target *ti)
1638{
1639 struct pool_c *pt = ti->private;
1640
1641 pool->ti = ti;
1642 pool->low_water_blocks = pt->low_water_blocks;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001643 pool->pf = pt->pf;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001644
Mike Snitzerf4026932012-05-19 01:01:01 +01001645 /*
1646 * If discard_passdown was enabled verify that the data device
1647 * supports discards. Disable discard_passdown if not; otherwise
1648 * -EOPNOTSUPP will be returned.
1649 */
1650 if (pt->pf.discard_passdown) {
1651 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1652 if (!q || !blk_queue_discard(q)) {
1653 char buf[BDEVNAME_SIZE];
1654 DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
1655 bdevname(pt->data_dev->bdev, buf));
1656 pool->pf.discard_passdown = 0;
1657 }
1658 }
1659
Joe Thornber991d9fa2011-10-31 20:21:18 +00001660 return 0;
1661}
1662
1663static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1664{
1665 if (pool->ti == ti)
1666 pool->ti = NULL;
1667}
1668
1669/*----------------------------------------------------------------
1670 * Pool creation
1671 *--------------------------------------------------------------*/
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001672/* Initialize pool features. */
1673static void pool_features_init(struct pool_features *pf)
1674{
1675 pf->zero_new_blocks = 1;
1676 pf->discard_enabled = 1;
1677 pf->discard_passdown = 1;
1678}
1679
Joe Thornber991d9fa2011-10-31 20:21:18 +00001680static void __pool_destroy(struct pool *pool)
1681{
1682 __pool_table_remove(pool);
1683
1684 if (dm_pool_metadata_close(pool->pmd) < 0)
1685 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1686
1687 prison_destroy(pool->prison);
1688 dm_kcopyd_client_destroy(pool->copier);
1689
1690 if (pool->wq)
1691 destroy_workqueue(pool->wq);
1692
1693 if (pool->next_mapping)
1694 mempool_free(pool->next_mapping, pool->mapping_pool);
1695 mempool_destroy(pool->mapping_pool);
1696 mempool_destroy(pool->endio_hook_pool);
1697 kfree(pool);
1698}
1699
1700static struct pool *pool_create(struct mapped_device *pool_md,
1701 struct block_device *metadata_dev,
1702 unsigned long block_size, char **error)
1703{
1704 int r;
1705 void *err_p;
1706 struct pool *pool;
1707 struct dm_pool_metadata *pmd;
1708
1709 pmd = dm_pool_metadata_open(metadata_dev, block_size);
1710 if (IS_ERR(pmd)) {
1711 *error = "Error creating metadata object";
1712 return (struct pool *)pmd;
1713 }
1714
1715 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1716 if (!pool) {
1717 *error = "Error allocating memory for pool";
1718 err_p = ERR_PTR(-ENOMEM);
1719 goto bad_pool;
1720 }
1721
1722 pool->pmd = pmd;
1723 pool->sectors_per_block = block_size;
1724 pool->block_shift = ffs(block_size) - 1;
1725 pool->offset_mask = block_size - 1;
1726 pool->low_water_blocks = 0;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001727 pool_features_init(&pool->pf);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001728 pool->prison = prison_create(PRISON_CELLS);
1729 if (!pool->prison) {
1730 *error = "Error creating pool's bio prison";
1731 err_p = ERR_PTR(-ENOMEM);
1732 goto bad_prison;
1733 }
1734
1735 pool->copier = dm_kcopyd_client_create();
1736 if (IS_ERR(pool->copier)) {
1737 r = PTR_ERR(pool->copier);
1738 *error = "Error creating pool's kcopyd client";
1739 err_p = ERR_PTR(r);
1740 goto bad_kcopyd_client;
1741 }
1742
1743 /*
1744 * Create singlethreaded workqueue that will service all devices
1745 * that use this metadata.
1746 */
1747 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1748 if (!pool->wq) {
1749 *error = "Error creating pool's workqueue";
1750 err_p = ERR_PTR(-ENOMEM);
1751 goto bad_wq;
1752 }
1753
1754 INIT_WORK(&pool->worker, do_worker);
Joe Thornber905e51b2012-03-28 18:41:27 +01001755 INIT_DELAYED_WORK(&pool->waker, do_waker);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001756 spin_lock_init(&pool->lock);
1757 bio_list_init(&pool->deferred_bios);
1758 bio_list_init(&pool->deferred_flush_bios);
1759 INIT_LIST_HEAD(&pool->prepared_mappings);
Joe Thornber104655f2012-03-28 18:41:28 +01001760 INIT_LIST_HEAD(&pool->prepared_discards);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001761 pool->low_water_triggered = 0;
1762 pool->no_free_space = 0;
1763 bio_list_init(&pool->retry_on_resume_list);
Joe Thornbereb2aa482012-03-28 18:41:28 +01001764 ds_init(&pool->shared_read_ds);
Joe Thornber104655f2012-03-28 18:41:28 +01001765 ds_init(&pool->all_io_ds);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001766
1767 pool->next_mapping = NULL;
1768 pool->mapping_pool =
1769 mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
1770 if (!pool->mapping_pool) {
1771 *error = "Error creating pool's mapping mempool";
1772 err_p = ERR_PTR(-ENOMEM);
1773 goto bad_mapping_pool;
1774 }
1775
1776 pool->endio_hook_pool =
1777 mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
1778 if (!pool->endio_hook_pool) {
1779 *error = "Error creating pool's endio_hook mempool";
1780 err_p = ERR_PTR(-ENOMEM);
1781 goto bad_endio_hook_pool;
1782 }
1783 pool->ref_count = 1;
Joe Thornber905e51b2012-03-28 18:41:27 +01001784 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001785 pool->pool_md = pool_md;
1786 pool->md_dev = metadata_dev;
1787 __pool_table_insert(pool);
1788
1789 return pool;
1790
1791bad_endio_hook_pool:
1792 mempool_destroy(pool->mapping_pool);
1793bad_mapping_pool:
1794 destroy_workqueue(pool->wq);
1795bad_wq:
1796 dm_kcopyd_client_destroy(pool->copier);
1797bad_kcopyd_client:
1798 prison_destroy(pool->prison);
1799bad_prison:
1800 kfree(pool);
1801bad_pool:
1802 if (dm_pool_metadata_close(pmd))
1803 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1804
1805 return err_p;
1806}
1807
1808static void __pool_inc(struct pool *pool)
1809{
1810 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1811 pool->ref_count++;
1812}
1813
1814static void __pool_dec(struct pool *pool)
1815{
1816 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1817 BUG_ON(!pool->ref_count);
1818 if (!--pool->ref_count)
1819 __pool_destroy(pool);
1820}
1821
1822static struct pool *__pool_find(struct mapped_device *pool_md,
1823 struct block_device *metadata_dev,
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001824 unsigned long block_size, char **error,
1825 int *created)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001826{
1827 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1828
1829 if (pool) {
1830 if (pool->pool_md != pool_md)
1831 return ERR_PTR(-EBUSY);
1832 __pool_inc(pool);
1833
1834 } else {
1835 pool = __pool_table_lookup(pool_md);
1836 if (pool) {
1837 if (pool->md_dev != metadata_dev)
1838 return ERR_PTR(-EINVAL);
1839 __pool_inc(pool);
1840
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001841 } else {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001842 pool = pool_create(pool_md, metadata_dev, block_size, error);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001843 *created = 1;
1844 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001845 }
1846
1847 return pool;
1848}
1849
1850/*----------------------------------------------------------------
1851 * Pool target methods
1852 *--------------------------------------------------------------*/
1853static void pool_dtr(struct dm_target *ti)
1854{
1855 struct pool_c *pt = ti->private;
1856
1857 mutex_lock(&dm_thin_pool_table.mutex);
1858
1859 unbind_control_target(pt->pool, ti);
1860 __pool_dec(pt->pool);
1861 dm_put_device(ti, pt->metadata_dev);
1862 dm_put_device(ti, pt->data_dev);
1863 kfree(pt);
1864
1865 mutex_unlock(&dm_thin_pool_table.mutex);
1866}
1867
Joe Thornber991d9fa2011-10-31 20:21:18 +00001868static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1869 struct dm_target *ti)
1870{
1871 int r;
1872 unsigned argc;
1873 const char *arg_name;
1874
1875 static struct dm_arg _args[] = {
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001876 {0, 3, "Invalid number of pool feature arguments"},
Joe Thornber991d9fa2011-10-31 20:21:18 +00001877 };
1878
1879 /*
1880 * No feature arguments supplied.
1881 */
1882 if (!as->argc)
1883 return 0;
1884
1885 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1886 if (r)
1887 return -EINVAL;
1888
1889 while (argc && !r) {
1890 arg_name = dm_shift_arg(as);
1891 argc--;
1892
1893 if (!strcasecmp(arg_name, "skip_block_zeroing")) {
1894 pf->zero_new_blocks = 0;
1895 continue;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001896 } else if (!strcasecmp(arg_name, "ignore_discard")) {
1897 pf->discard_enabled = 0;
1898 continue;
1899 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1900 pf->discard_passdown = 0;
1901 continue;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001902 }
1903
1904 ti->error = "Unrecognised pool feature requested";
1905 r = -EINVAL;
1906 }
1907
1908 return r;
1909}
1910
1911/*
1912 * thin-pool <metadata dev> <data dev>
1913 * <data block size (sectors)>
1914 * <low water mark (blocks)>
1915 * [<#feature args> [<arg>]*]
1916 *
1917 * Optional feature arguments are:
1918 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001919 * ignore_discard: disable discard
1920 * no_discard_passdown: don't pass discards down to the data device
Joe Thornber991d9fa2011-10-31 20:21:18 +00001921 */
1922static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1923{
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001924 int r, pool_created = 0;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001925 struct pool_c *pt;
1926 struct pool *pool;
1927 struct pool_features pf;
1928 struct dm_arg_set as;
1929 struct dm_dev *data_dev;
1930 unsigned long block_size;
1931 dm_block_t low_water_blocks;
1932 struct dm_dev *metadata_dev;
1933 sector_t metadata_dev_size;
Mike Snitzerc4a69ec2012-03-28 18:41:28 +01001934 char b[BDEVNAME_SIZE];
Joe Thornber991d9fa2011-10-31 20:21:18 +00001935
1936 /*
1937 * FIXME Remove validation from scope of lock.
1938 */
1939 mutex_lock(&dm_thin_pool_table.mutex);
1940
1941 if (argc < 4) {
1942 ti->error = "Invalid argument count";
1943 r = -EINVAL;
1944 goto out_unlock;
1945 }
1946 as.argc = argc;
1947 as.argv = argv;
1948
1949 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1950 if (r) {
1951 ti->error = "Error opening metadata block device";
1952 goto out_unlock;
1953 }
1954
1955 metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
Mike Snitzerc4a69ec2012-03-28 18:41:28 +01001956 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
1957 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1958 bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001959
1960 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1961 if (r) {
1962 ti->error = "Error getting data device";
1963 goto out_metadata;
1964 }
1965
1966 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1967 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1968 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1969 !is_power_of_2(block_size)) {
1970 ti->error = "Invalid block size";
1971 r = -EINVAL;
1972 goto out;
1973 }
1974
1975 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1976 ti->error = "Invalid low water mark";
1977 r = -EINVAL;
1978 goto out;
1979 }
1980
1981 /*
1982 * Set default pool features.
1983 */
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001984 pool_features_init(&pf);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001985
1986 dm_consume_args(&as, 4);
1987 r = parse_pool_features(&as, &pf, ti);
1988 if (r)
1989 goto out;
1990
1991 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1992 if (!pt) {
1993 r = -ENOMEM;
1994 goto out;
1995 }
1996
1997 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001998 block_size, &ti->error, &pool_created);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001999 if (IS_ERR(pool)) {
2000 r = PTR_ERR(pool);
2001 goto out_free_pt;
2002 }
2003
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002004 /*
2005 * 'pool_created' reflects whether this is the first table load.
2006 * Top level discard support is not allowed to be changed after
2007 * initial load. This would require a pool reload to trigger thin
2008 * device changes.
2009 */
2010 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2011 ti->error = "Discard support cannot be disabled once enabled";
2012 r = -EINVAL;
2013 goto out_flags_changed;
2014 }
2015
Joe Thornber991d9fa2011-10-31 20:21:18 +00002016 pt->pool = pool;
2017 pt->ti = ti;
2018 pt->metadata_dev = metadata_dev;
2019 pt->data_dev = data_dev;
2020 pt->low_water_blocks = low_water_blocks;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002021 pt->pf = pf;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002022 ti->num_flush_requests = 1;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002023 /*
2024 * Only need to enable discards if the pool should pass
2025 * them down to the data device. The thin device's discard
2026 * processing will cause mappings to be removed from the btree.
2027 */
2028 if (pf.discard_enabled && pf.discard_passdown) {
2029 ti->num_discard_requests = 1;
2030 /*
2031 * Setting 'discards_supported' circumvents the normal
2032 * stacking of discard limits (this keeps the pool and
2033 * thin devices' discard limits consistent).
2034 */
2035 ti->discards_supported = 1;
Mike Snitzer8edf61c2012-09-26 23:45:39 +01002036 ti->discard_zeroes_data_unsupported = 1;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002037 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002038 ti->private = pt;
2039
2040 pt->callbacks.congested_fn = pool_is_congested;
2041 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2042
2043 mutex_unlock(&dm_thin_pool_table.mutex);
2044
2045 return 0;
2046
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002047out_flags_changed:
2048 __pool_dec(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002049out_free_pt:
2050 kfree(pt);
2051out:
2052 dm_put_device(ti, data_dev);
2053out_metadata:
2054 dm_put_device(ti, metadata_dev);
2055out_unlock:
2056 mutex_unlock(&dm_thin_pool_table.mutex);
2057
2058 return r;
2059}
2060
2061static int pool_map(struct dm_target *ti, struct bio *bio,
2062 union map_info *map_context)
2063{
2064 int r;
2065 struct pool_c *pt = ti->private;
2066 struct pool *pool = pt->pool;
2067 unsigned long flags;
2068
2069 /*
2070 * As this is a singleton target, ti->begin is always zero.
2071 */
2072 spin_lock_irqsave(&pool->lock, flags);
2073 bio->bi_bdev = pt->data_dev->bdev;
2074 r = DM_MAPIO_REMAPPED;
2075 spin_unlock_irqrestore(&pool->lock, flags);
2076
2077 return r;
2078}
2079
2080/*
2081 * Retrieves the number of blocks of the data device from
2082 * the superblock and compares it to the actual device size,
2083 * thus resizing the data device in case it has grown.
2084 *
2085 * This both copes with opening preallocated data devices in the ctr
2086 * being followed by a resume
2087 * -and-
2088 * calling the resume method individually after userspace has
2089 * grown the data device in reaction to a table event.
2090 */
2091static int pool_preresume(struct dm_target *ti)
2092{
2093 int r;
2094 struct pool_c *pt = ti->private;
2095 struct pool *pool = pt->pool;
2096 dm_block_t data_size, sb_data_size;
2097
2098 /*
2099 * Take control of the pool object.
2100 */
2101 r = bind_control_target(pool, ti);
2102 if (r)
2103 return r;
2104
2105 data_size = ti->len >> pool->block_shift;
2106 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2107 if (r) {
2108 DMERR("failed to retrieve data device size");
2109 return r;
2110 }
2111
2112 if (data_size < sb_data_size) {
2113 DMERR("pool target too small, is %llu blocks (expected %llu)",
2114 data_size, sb_data_size);
2115 return -EINVAL;
2116
2117 } else if (data_size > sb_data_size) {
2118 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2119 if (r) {
2120 DMERR("failed to resize data device");
2121 return r;
2122 }
2123
2124 r = dm_pool_commit_metadata(pool->pmd);
2125 if (r) {
2126 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2127 __func__, r);
2128 return r;
2129 }
2130 }
2131
2132 return 0;
2133}
2134
2135static void pool_resume(struct dm_target *ti)
2136{
2137 struct pool_c *pt = ti->private;
2138 struct pool *pool = pt->pool;
2139 unsigned long flags;
2140
2141 spin_lock_irqsave(&pool->lock, flags);
2142 pool->low_water_triggered = 0;
2143 pool->no_free_space = 0;
2144 __requeue_bios(pool);
2145 spin_unlock_irqrestore(&pool->lock, flags);
2146
Joe Thornber905e51b2012-03-28 18:41:27 +01002147 do_waker(&pool->waker.work);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002148}
2149
2150static void pool_postsuspend(struct dm_target *ti)
2151{
2152 int r;
2153 struct pool_c *pt = ti->private;
2154 struct pool *pool = pt->pool;
2155
Joe Thornber905e51b2012-03-28 18:41:27 +01002156 cancel_delayed_work(&pool->waker);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002157 flush_workqueue(pool->wq);
2158
2159 r = dm_pool_commit_metadata(pool->pmd);
2160 if (r < 0) {
2161 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2162 __func__, r);
2163 /* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/
2164 }
2165}
2166
2167static int check_arg_count(unsigned argc, unsigned args_required)
2168{
2169 if (argc != args_required) {
2170 DMWARN("Message received with %u arguments instead of %u.",
2171 argc, args_required);
2172 return -EINVAL;
2173 }
2174
2175 return 0;
2176}
2177
2178static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2179{
2180 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2181 *dev_id <= MAX_DEV_ID)
2182 return 0;
2183
2184 if (warning)
2185 DMWARN("Message received with invalid device id: %s", arg);
2186
2187 return -EINVAL;
2188}
2189
2190static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2191{
2192 dm_thin_id dev_id;
2193 int r;
2194
2195 r = check_arg_count(argc, 2);
2196 if (r)
2197 return r;
2198
2199 r = read_dev_id(argv[1], &dev_id, 1);
2200 if (r)
2201 return r;
2202
2203 r = dm_pool_create_thin(pool->pmd, dev_id);
2204 if (r) {
2205 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2206 argv[1]);
2207 return r;
2208 }
2209
2210 return 0;
2211}
2212
2213static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2214{
2215 dm_thin_id dev_id;
2216 dm_thin_id origin_dev_id;
2217 int r;
2218
2219 r = check_arg_count(argc, 3);
2220 if (r)
2221 return r;
2222
2223 r = read_dev_id(argv[1], &dev_id, 1);
2224 if (r)
2225 return r;
2226
2227 r = read_dev_id(argv[2], &origin_dev_id, 1);
2228 if (r)
2229 return r;
2230
2231 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2232 if (r) {
2233 DMWARN("Creation of new snapshot %s of device %s failed.",
2234 argv[1], argv[2]);
2235 return r;
2236 }
2237
2238 return 0;
2239}
2240
2241static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2242{
2243 dm_thin_id dev_id;
2244 int r;
2245
2246 r = check_arg_count(argc, 2);
2247 if (r)
2248 return r;
2249
2250 r = read_dev_id(argv[1], &dev_id, 1);
2251 if (r)
2252 return r;
2253
2254 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2255 if (r)
2256 DMWARN("Deletion of thin device %s failed.", argv[1]);
2257
2258 return r;
2259}
2260
2261static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2262{
2263 dm_thin_id old_id, new_id;
2264 int r;
2265
2266 r = check_arg_count(argc, 3);
2267 if (r)
2268 return r;
2269
2270 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2271 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2272 return -EINVAL;
2273 }
2274
2275 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2276 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2277 return -EINVAL;
2278 }
2279
2280 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2281 if (r) {
2282 DMWARN("Failed to change transaction id from %s to %s.",
2283 argv[1], argv[2]);
2284 return r;
2285 }
2286
2287 return 0;
2288}
2289
2290/*
2291 * Messages supported:
2292 * create_thin <dev_id>
2293 * create_snap <dev_id> <origin_id>
2294 * delete <dev_id>
2295 * trim <dev_id> <new_size_in_sectors>
2296 * set_transaction_id <current_trans_id> <new_trans_id>
2297 */
2298static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2299{
2300 int r = -EINVAL;
2301 struct pool_c *pt = ti->private;
2302 struct pool *pool = pt->pool;
2303
2304 if (!strcasecmp(argv[0], "create_thin"))
2305 r = process_create_thin_mesg(argc, argv, pool);
2306
2307 else if (!strcasecmp(argv[0], "create_snap"))
2308 r = process_create_snap_mesg(argc, argv, pool);
2309
2310 else if (!strcasecmp(argv[0], "delete"))
2311 r = process_delete_mesg(argc, argv, pool);
2312
2313 else if (!strcasecmp(argv[0], "set_transaction_id"))
2314 r = process_set_transaction_id_mesg(argc, argv, pool);
2315
2316 else
2317 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2318
2319 if (!r) {
2320 r = dm_pool_commit_metadata(pool->pmd);
2321 if (r)
2322 DMERR("%s message: dm_pool_commit_metadata() failed, error = %d",
2323 argv[0], r);
2324 }
2325
2326 return r;
2327}
2328
2329/*
2330 * Status line is:
2331 * <transaction id> <used metadata sectors>/<total metadata sectors>
2332 * <used data sectors>/<total data sectors> <held metadata root>
2333 */
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002334static void pool_status(struct dm_target *ti, status_type_t type,
2335 char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002336{
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002337 int r, count;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002338 unsigned sz = 0;
2339 uint64_t transaction_id;
2340 dm_block_t nr_free_blocks_data;
2341 dm_block_t nr_free_blocks_metadata;
2342 dm_block_t nr_blocks_data;
2343 dm_block_t nr_blocks_metadata;
2344 dm_block_t held_root;
2345 char buf[BDEVNAME_SIZE];
2346 char buf2[BDEVNAME_SIZE];
2347 struct pool_c *pt = ti->private;
2348 struct pool *pool = pt->pool;
2349
2350 switch (type) {
2351 case STATUSTYPE_INFO:
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002352 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2353 if (r) {
2354 DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
2355 goto err;
2356 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002357
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002358 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2359 if (r) {
2360 DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
2361 goto err;
2362 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002363
2364 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002365 if (r) {
2366 DMERR("dm_pool_get_metadata_dev_size returned %d", r);
2367 goto err;
2368 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002369
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002370 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2371 if (r) {
2372 DMERR("dm_pool_get_free_block_count returned %d", r);
2373 goto err;
2374 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002375
2376 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002377 if (r) {
2378 DMERR("dm_pool_get_data_dev_size returned %d", r);
2379 goto err;
2380 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002381
2382 r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002383 if (r) {
2384 DMERR("dm_pool_get_metadata_snap returned %d", r);
2385 goto err;
2386 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002387
2388 DMEMIT("%llu %llu/%llu %llu/%llu ",
2389 (unsigned long long)transaction_id,
2390 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2391 (unsigned long long)nr_blocks_metadata,
2392 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2393 (unsigned long long)nr_blocks_data);
2394
2395 if (held_root)
2396 DMEMIT("%llu", held_root);
2397 else
2398 DMEMIT("-");
2399
2400 break;
2401
2402 case STATUSTYPE_TABLE:
2403 DMEMIT("%s %s %lu %llu ",
2404 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2405 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2406 (unsigned long)pool->sectors_per_block,
2407 (unsigned long long)pt->low_water_blocks);
2408
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002409 count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
Mike Snitzerf4026932012-05-19 01:01:01 +01002410 !pt->pf.discard_passdown;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002411 DMEMIT("%u ", count);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002412
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002413 if (!pool->pf.zero_new_blocks)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002414 DMEMIT("skip_block_zeroing ");
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002415
2416 if (!pool->pf.discard_enabled)
2417 DMEMIT("ignore_discard ");
2418
Mike Snitzerf4026932012-05-19 01:01:01 +01002419 if (!pt->pf.discard_passdown)
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002420 DMEMIT("no_discard_passdown ");
2421
Joe Thornber991d9fa2011-10-31 20:21:18 +00002422 break;
2423 }
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002424 return;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002425
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002426err:
2427 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00002428}
2429
2430static int pool_iterate_devices(struct dm_target *ti,
2431 iterate_devices_callout_fn fn, void *data)
2432{
2433 struct pool_c *pt = ti->private;
2434
2435 return fn(ti, pt->data_dev, 0, ti->len, data);
2436}
2437
2438static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2439 struct bio_vec *biovec, int max_size)
2440{
2441 struct pool_c *pt = ti->private;
2442 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2443
2444 if (!q->merge_bvec_fn)
2445 return max_size;
2446
2447 bvm->bi_bdev = pt->data_dev->bdev;
2448
2449 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2450}
2451
Joe Thornber104655f2012-03-28 18:41:28 +01002452static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
2453{
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002454 /*
2455 * FIXME: these limits may be incompatible with the pool's data device
2456 */
Joe Thornber104655f2012-03-28 18:41:28 +01002457 limits->max_discard_sectors = pool->sectors_per_block;
2458
2459 /*
2460 * This is just a hint, and not enforced. We have to cope with
2461 * bios that overlap 2 blocks.
2462 */
2463 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2464}
2465
Joe Thornber991d9fa2011-10-31 20:21:18 +00002466static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2467{
2468 struct pool_c *pt = ti->private;
2469 struct pool *pool = pt->pool;
2470
2471 blk_limits_io_min(limits, 0);
2472 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002473 if (pool->pf.discard_enabled)
2474 set_discard_limits(pool, limits);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002475}
2476
2477static struct target_type pool_target = {
2478 .name = "thin-pool",
2479 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2480 DM_TARGET_IMMUTABLE,
Joe Thornber43dd8e62013-03-20 17:21:24 +00002481 .version = {1, 1, 1},
Joe Thornber991d9fa2011-10-31 20:21:18 +00002482 .module = THIS_MODULE,
2483 .ctr = pool_ctr,
2484 .dtr = pool_dtr,
2485 .map = pool_map,
2486 .postsuspend = pool_postsuspend,
2487 .preresume = pool_preresume,
2488 .resume = pool_resume,
2489 .message = pool_message,
2490 .status = pool_status,
2491 .merge = pool_merge,
2492 .iterate_devices = pool_iterate_devices,
2493 .io_hints = pool_io_hints,
2494};
2495
2496/*----------------------------------------------------------------
2497 * Thin target methods
2498 *--------------------------------------------------------------*/
2499static void thin_dtr(struct dm_target *ti)
2500{
2501 struct thin_c *tc = ti->private;
2502
2503 mutex_lock(&dm_thin_pool_table.mutex);
2504
2505 __pool_dec(tc->pool);
2506 dm_pool_close_thin_device(tc->td);
2507 dm_put_device(ti, tc->pool_dev);
Joe Thornber2dd9c252012-03-28 18:41:28 +01002508 if (tc->origin_dev)
2509 dm_put_device(ti, tc->origin_dev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002510 kfree(tc);
2511
2512 mutex_unlock(&dm_thin_pool_table.mutex);
2513}
2514
2515/*
2516 * Thin target parameters:
2517 *
Joe Thornber2dd9c252012-03-28 18:41:28 +01002518 * <pool_dev> <dev_id> [origin_dev]
Joe Thornber991d9fa2011-10-31 20:21:18 +00002519 *
2520 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2521 * dev_id: the internal device identifier
Joe Thornber2dd9c252012-03-28 18:41:28 +01002522 * origin_dev: a device external to the pool that should act as the origin
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002523 *
2524 * If the pool device has discards disabled, they get disabled for the thin
2525 * device as well.
Joe Thornber991d9fa2011-10-31 20:21:18 +00002526 */
2527static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2528{
2529 int r;
2530 struct thin_c *tc;
Joe Thornber2dd9c252012-03-28 18:41:28 +01002531 struct dm_dev *pool_dev, *origin_dev;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002532 struct mapped_device *pool_md;
2533
2534 mutex_lock(&dm_thin_pool_table.mutex);
2535
Joe Thornber2dd9c252012-03-28 18:41:28 +01002536 if (argc != 2 && argc != 3) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00002537 ti->error = "Invalid argument count";
2538 r = -EINVAL;
2539 goto out_unlock;
2540 }
2541
2542 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2543 if (!tc) {
2544 ti->error = "Out of memory";
2545 r = -ENOMEM;
2546 goto out_unlock;
2547 }
2548
Joe Thornber2dd9c252012-03-28 18:41:28 +01002549 if (argc == 3) {
2550 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2551 if (r) {
2552 ti->error = "Error opening origin device";
2553 goto bad_origin_dev;
2554 }
2555 tc->origin_dev = origin_dev;
2556 }
2557
Joe Thornber991d9fa2011-10-31 20:21:18 +00002558 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2559 if (r) {
2560 ti->error = "Error opening pool device";
2561 goto bad_pool_dev;
2562 }
2563 tc->pool_dev = pool_dev;
2564
2565 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2566 ti->error = "Invalid device id";
2567 r = -EINVAL;
2568 goto bad_common;
2569 }
2570
2571 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2572 if (!pool_md) {
2573 ti->error = "Couldn't get pool mapped device";
2574 r = -EINVAL;
2575 goto bad_common;
2576 }
2577
2578 tc->pool = __pool_table_lookup(pool_md);
2579 if (!tc->pool) {
2580 ti->error = "Couldn't find pool object";
2581 r = -EINVAL;
2582 goto bad_pool_lookup;
2583 }
2584 __pool_inc(tc->pool);
2585
2586 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2587 if (r) {
2588 ti->error = "Couldn't open thin internal device";
2589 goto bad_thin_open;
2590 }
2591
2592 ti->split_io = tc->pool->sectors_per_block;
2593 ti->num_flush_requests = 1;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002594
2595 /* In case the pool supports discards, pass them on. */
2596 if (tc->pool->pf.discard_enabled) {
2597 ti->discards_supported = 1;
2598 ti->num_discard_requests = 1;
Mikulas Patocka0ba2a0d2012-07-20 14:25:05 +01002599 ti->discard_zeroes_data_unsupported = 1;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002600 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002601
2602 dm_put(pool_md);
2603
2604 mutex_unlock(&dm_thin_pool_table.mutex);
2605
2606 return 0;
2607
2608bad_thin_open:
2609 __pool_dec(tc->pool);
2610bad_pool_lookup:
2611 dm_put(pool_md);
2612bad_common:
2613 dm_put_device(ti, tc->pool_dev);
2614bad_pool_dev:
Joe Thornber2dd9c252012-03-28 18:41:28 +01002615 if (tc->origin_dev)
2616 dm_put_device(ti, tc->origin_dev);
2617bad_origin_dev:
Joe Thornber991d9fa2011-10-31 20:21:18 +00002618 kfree(tc);
2619out_unlock:
2620 mutex_unlock(&dm_thin_pool_table.mutex);
2621
2622 return r;
2623}
2624
2625static int thin_map(struct dm_target *ti, struct bio *bio,
2626 union map_info *map_context)
2627{
Alasdair G Kergon6efd6e82012-03-28 18:41:28 +01002628 bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002629
2630 return thin_bio_map(ti, bio, map_context);
2631}
2632
Joe Thornbereb2aa482012-03-28 18:41:28 +01002633static int thin_endio(struct dm_target *ti,
2634 struct bio *bio, int err,
2635 union map_info *map_context)
2636{
2637 unsigned long flags;
2638 struct endio_hook *h = map_context->ptr;
2639 struct list_head work;
2640 struct new_mapping *m, *tmp;
2641 struct pool *pool = h->tc->pool;
2642
2643 if (h->shared_read_entry) {
2644 INIT_LIST_HEAD(&work);
2645 ds_dec(h->shared_read_entry, &work);
2646
2647 spin_lock_irqsave(&pool->lock, flags);
2648 list_for_each_entry_safe(m, tmp, &work, list) {
2649 list_del(&m->list);
2650 m->quiesced = 1;
2651 __maybe_add_mapping(m);
2652 }
2653 spin_unlock_irqrestore(&pool->lock, flags);
2654 }
2655
Joe Thornber104655f2012-03-28 18:41:28 +01002656 if (h->all_io_entry) {
2657 INIT_LIST_HEAD(&work);
2658 ds_dec(h->all_io_entry, &work);
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01002659 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +01002660 list_for_each_entry_safe(m, tmp, &work, list)
2661 list_add(&m->list, &pool->prepared_discards);
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01002662 spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +01002663 }
2664
Joe Thornbereb2aa482012-03-28 18:41:28 +01002665 mempool_free(h, pool->endio_hook_pool);
2666
2667 return 0;
2668}
2669
Joe Thornber991d9fa2011-10-31 20:21:18 +00002670static void thin_postsuspend(struct dm_target *ti)
2671{
2672 if (dm_noflush_suspending(ti))
2673 requeue_io((struct thin_c *)ti->private);
2674}
2675
2676/*
2677 * <nr mapped sectors> <highest mapped sector>
2678 */
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002679static void thin_status(struct dm_target *ti, status_type_t type,
2680 char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002681{
2682 int r;
2683 ssize_t sz = 0;
2684 dm_block_t mapped, highest;
2685 char buf[BDEVNAME_SIZE];
2686 struct thin_c *tc = ti->private;
2687
2688 if (!tc->td)
2689 DMEMIT("-");
2690 else {
2691 switch (type) {
2692 case STATUSTYPE_INFO:
2693 r = dm_thin_get_mapped_count(tc->td, &mapped);
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002694 if (r) {
2695 DMERR("dm_thin_get_mapped_count returned %d", r);
2696 goto err;
2697 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002698
2699 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002700 if (r < 0) {
2701 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
2702 goto err;
2703 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002704
2705 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2706 if (r)
2707 DMEMIT("%llu", ((highest + 1) *
2708 tc->pool->sectors_per_block) - 1);
2709 else
2710 DMEMIT("-");
2711 break;
2712
2713 case STATUSTYPE_TABLE:
2714 DMEMIT("%s %lu",
2715 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2716 (unsigned long) tc->dev_id);
Joe Thornber2dd9c252012-03-28 18:41:28 +01002717 if (tc->origin_dev)
2718 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
Joe Thornber991d9fa2011-10-31 20:21:18 +00002719 break;
2720 }
2721 }
2722
Mikulas Patocka5b7bb4a2013-03-01 22:45:44 +00002723 return;
2724
2725err:
2726 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00002727}
2728
2729static int thin_iterate_devices(struct dm_target *ti,
2730 iterate_devices_callout_fn fn, void *data)
2731{
2732 dm_block_t blocks;
2733 struct thin_c *tc = ti->private;
2734
2735 /*
2736 * We can't call dm_pool_get_data_dev_size() since that blocks. So
2737 * we follow a more convoluted path through to the pool's target.
2738 */
2739 if (!tc->pool->ti)
2740 return 0; /* nothing is bound */
2741
2742 blocks = tc->pool->ti->len >> tc->pool->block_shift;
2743 if (blocks)
2744 return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
2745
2746 return 0;
2747}
2748
2749static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2750{
2751 struct thin_c *tc = ti->private;
Joe Thornber104655f2012-03-28 18:41:28 +01002752 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002753
2754 blk_limits_io_min(limits, 0);
Joe Thornber104655f2012-03-28 18:41:28 +01002755 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2756 set_discard_limits(pool, limits);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002757}
2758
2759static struct target_type thin_target = {
2760 .name = "thin",
Joe Thornber43dd8e62013-03-20 17:21:24 +00002761 .version = {1, 1, 1},
Joe Thornber991d9fa2011-10-31 20:21:18 +00002762 .module = THIS_MODULE,
2763 .ctr = thin_ctr,
2764 .dtr = thin_dtr,
2765 .map = thin_map,
Joe Thornbereb2aa482012-03-28 18:41:28 +01002766 .end_io = thin_endio,
Joe Thornber991d9fa2011-10-31 20:21:18 +00002767 .postsuspend = thin_postsuspend,
2768 .status = thin_status,
2769 .iterate_devices = thin_iterate_devices,
2770 .io_hints = thin_io_hints,
2771};
2772
2773/*----------------------------------------------------------------*/
2774
2775static int __init dm_thin_init(void)
2776{
2777 int r;
2778
2779 pool_table_init();
2780
2781 r = dm_register_target(&thin_target);
2782 if (r)
2783 return r;
2784
2785 r = dm_register_target(&pool_target);
2786 if (r)
2787 dm_unregister_target(&thin_target);
2788
2789 return r;
2790}
2791
2792static void dm_thin_exit(void)
2793{
2794 dm_unregister_target(&thin_target);
2795 dm_unregister_target(&pool_target);
2796}
2797
2798module_init(dm_thin_init);
2799module_exit(dm_thin_exit);
2800
Alasdair G Kergon7cab8bf2012-05-12 01:43:19 +01002801MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
Joe Thornber991d9fa2011-10-31 20:21:18 +00002802MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2803MODULE_LICENSE("GPL");