blob: 575e3ed723ccd3a1d2d628f210238d3f682b5291 [file] [log] [blame]
Joe Thornber991d9fa2011-10-31 20:21:18 +00001/*
Joe Thornbere49e5822012-07-27 15:08:16 +01002 * Copyright (C) 2011-2012 Red Hat UK.
Joe Thornber991d9fa2011-10-31 20:21:18 +00003 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
Mike Snitzer4f81a412012-10-12 21:02:13 +01008#include "dm-bio-prison.h"
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01009#include "dm.h"
Joe Thornber991d9fa2011-10-31 20:21:18 +000010
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
Mike Snitzer604ea902014-10-09 18:43:25 -040014#include <linux/log2.h>
Joe Thornber991d9fa2011-10-31 20:21:18 +000015#include <linux/list.h>
Mike Snitzerc140e1c2014-03-20 21:17:14 -040016#include <linux/rculist.h>
Joe Thornber991d9fa2011-10-31 20:21:18 +000017#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/slab.h>
Joe Thornberac4c3f32014-10-10 16:42:10 +010020#include <linux/sort.h>
Mike Snitzer67324ea2014-03-21 18:33:41 -040021#include <linux/rbtree.h>
Joe Thornber991d9fa2011-10-31 20:21:18 +000022
23#define DM_MSG_PREFIX "thin"
24
25/*
26 * Tunable constants
27 */
Alasdair G Kergon7768ed32012-07-27 15:07:57 +010028#define ENDIO_HOOK_POOL_SIZE 1024
Joe Thornber991d9fa2011-10-31 20:21:18 +000029#define MAPPING_POOL_SIZE 1024
Joe Thornber905e51b2012-03-28 18:41:27 +010030#define COMMIT_PERIOD HZ
Mike Snitzer80c57892014-05-20 13:38:33 -040031#define NO_SPACE_TIMEOUT_SECS 60
32
33static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
Joe Thornber991d9fa2011-10-31 20:21:18 +000034
Mikulas Patockadf5d2e92013-03-01 22:45:49 +000035DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
36 "A percentage of time allocated for copy on write");
37
Joe Thornber991d9fa2011-10-31 20:21:18 +000038/*
39 * The block size of the device holding pool data must be
40 * between 64KB and 1GB.
41 */
42#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
43#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
44
45/*
Joe Thornber991d9fa2011-10-31 20:21:18 +000046 * Device id is restricted to 24 bits.
47 */
48#define MAX_DEV_ID ((1 << 24) - 1)
49
50/*
51 * How do we handle breaking sharing of data blocks?
52 * =================================================
53 *
54 * We use a standard copy-on-write btree to store the mappings for the
55 * devices (note I'm talking about copy-on-write of the metadata here, not
56 * the data). When you take an internal snapshot you clone the root node
57 * of the origin btree. After this there is no concept of an origin or a
58 * snapshot. They are just two device trees that happen to point to the
59 * same data blocks.
60 *
61 * When we get a write in we decide if it's to a shared data block using
62 * some timestamp magic. If it is, we have to break sharing.
63 *
64 * Let's say we write to a shared block in what was the origin. The
65 * steps are:
66 *
67 * i) plug io further to this physical block. (see bio_prison code).
68 *
69 * ii) quiesce any read io to that shared data block. Obviously
Mike Snitzer44feb382012-10-12 21:02:10 +010070 * including all devices that share this block. (see dm_deferred_set code)
Joe Thornber991d9fa2011-10-31 20:21:18 +000071 *
72 * iii) copy the data block to a newly allocate block. This step can be
73 * missed out if the io covers the block. (schedule_copy).
74 *
75 * iv) insert the new mapping into the origin's btree
Joe Thornberfe878f32012-03-28 18:41:24 +010076 * (process_prepared_mapping). This act of inserting breaks some
Joe Thornber991d9fa2011-10-31 20:21:18 +000077 * sharing of btree nodes between the two devices. Breaking sharing only
78 * effects the btree of that specific device. Btrees for the other
79 * devices that share the block never change. The btree for the origin
80 * device as it was after the last commit is untouched, ie. we're using
81 * persistent data structures in the functional programming sense.
82 *
83 * v) unplug io to this physical block, including the io that triggered
84 * the breaking of sharing.
85 *
86 * Steps (ii) and (iii) occur in parallel.
87 *
88 * The metadata _doesn't_ need to be committed before the io continues. We
89 * get away with this because the io is always written to a _new_ block.
90 * If there's a crash, then:
91 *
92 * - The origin mapping will point to the old origin block (the shared
93 * one). This will contain the data as it was before the io that triggered
94 * the breaking of sharing came in.
95 *
96 * - The snap mapping still points to the old block. As it would after
97 * the commit.
98 *
99 * The downside of this scheme is the timestamp magic isn't perfect, and
100 * will continue to think that data block in the snapshot device is shared
101 * even after the write to the origin has broken sharing. I suspect data
102 * blocks will typically be shared by many different devices, so we're
103 * breaking sharing n + 1 times, rather than n, where n is the number of
104 * devices that reference this data block. At the moment I think the
105 * benefits far, far outweigh the disadvantages.
106 */
107
108/*----------------------------------------------------------------*/
109
110/*
Joe Thornber991d9fa2011-10-31 20:21:18 +0000111 * Key building.
112 */
113static void build_data_key(struct dm_thin_device *td,
Mike Snitzer44feb382012-10-12 21:02:10 +0100114 dm_block_t b, struct dm_cell_key *key)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000115{
116 key->virtual = 0;
117 key->dev = dm_thin_dev_id(td);
118 key->block = b;
119}
120
121static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
Mike Snitzer44feb382012-10-12 21:02:10 +0100122 struct dm_cell_key *key)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000123{
124 key->virtual = 1;
125 key->dev = dm_thin_dev_id(td);
126 key->block = b;
127}
128
129/*----------------------------------------------------------------*/
130
Joe Thornber7d327fe2014-10-06 15:45:59 +0100131#define THROTTLE_THRESHOLD (1 * HZ)
132
133struct throttle {
134 struct rw_semaphore lock;
135 unsigned long threshold;
136 bool throttle_applied;
137};
138
139static void throttle_init(struct throttle *t)
140{
141 init_rwsem(&t->lock);
142 t->throttle_applied = false;
143}
144
145static void throttle_work_start(struct throttle *t)
146{
147 t->threshold = jiffies + THROTTLE_THRESHOLD;
148}
149
150static void throttle_work_update(struct throttle *t)
151{
152 if (!t->throttle_applied && jiffies > t->threshold) {
153 down_write(&t->lock);
154 t->throttle_applied = true;
155 }
156}
157
158static void throttle_work_complete(struct throttle *t)
159{
160 if (t->throttle_applied) {
161 t->throttle_applied = false;
162 up_write(&t->lock);
163 }
164}
165
166static void throttle_lock(struct throttle *t)
167{
168 down_read(&t->lock);
169}
170
171static void throttle_unlock(struct throttle *t)
172{
173 up_read(&t->lock);
174}
175
176/*----------------------------------------------------------------*/
177
Joe Thornber991d9fa2011-10-31 20:21:18 +0000178/*
179 * A pool device ties together a metadata device and a data device. It
180 * also provides the interface for creating and destroying internal
181 * devices.
182 */
Mike Snitzera24c2562012-06-03 00:30:00 +0100183struct dm_thin_new_mapping;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100184
Joe Thornbere49e5822012-07-27 15:08:16 +0100185/*
Joe Thornber3e1a0692014-03-03 16:03:26 +0000186 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
Joe Thornbere49e5822012-07-27 15:08:16 +0100187 */
188enum pool_mode {
189 PM_WRITE, /* metadata may be changed */
Joe Thornber3e1a0692014-03-03 16:03:26 +0000190 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
Joe Thornbere49e5822012-07-27 15:08:16 +0100191 PM_READ_ONLY, /* metadata may not be changed */
192 PM_FAIL, /* all I/O fails */
193};
194
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100195struct pool_features {
Joe Thornbere49e5822012-07-27 15:08:16 +0100196 enum pool_mode mode;
197
Mike Snitzer9bc142d2012-09-26 23:45:46 +0100198 bool zero_new_blocks:1;
199 bool discard_enabled:1;
200 bool discard_passdown:1;
Mike Snitzer787a996c2013-12-06 16:21:43 -0500201 bool error_if_no_space:1;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100202};
203
Joe Thornbere49e5822012-07-27 15:08:16 +0100204struct thin_c;
205typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
Joe Thornbera374bb22014-10-10 13:43:14 +0100206typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
Joe Thornbere49e5822012-07-27 15:08:16 +0100207typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
208
Joe Thornberac4c3f32014-10-10 16:42:10 +0100209#define CELL_SORT_ARRAY_SIZE 8192
210
Joe Thornber991d9fa2011-10-31 20:21:18 +0000211struct pool {
212 struct list_head list;
213 struct dm_target *ti; /* Only set if a pool target is bound */
214
215 struct mapped_device *pool_md;
216 struct block_device *md_dev;
217 struct dm_pool_metadata *pmd;
218
Joe Thornber991d9fa2011-10-31 20:21:18 +0000219 dm_block_t low_water_blocks;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100220 uint32_t sectors_per_block;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +0100221 int sectors_per_block_shift;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000222
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100223 struct pool_features pf;
Joe Thornber88a66212013-12-04 20:16:12 -0500224 bool low_water_triggered:1; /* A dm event has been sent */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000225
Mike Snitzer44feb382012-10-12 21:02:10 +0100226 struct dm_bio_prison *prison;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000227 struct dm_kcopyd_client *copier;
228
229 struct workqueue_struct *wq;
Joe Thornber7d327fe2014-10-06 15:45:59 +0100230 struct throttle throttle;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000231 struct work_struct worker;
Joe Thornber905e51b2012-03-28 18:41:27 +0100232 struct delayed_work waker;
Joe Thornber85ad6432014-05-09 15:59:38 +0100233 struct delayed_work no_space_timeout;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000234
Joe Thornber905e51b2012-03-28 18:41:27 +0100235 unsigned long last_commit_jiffies;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100236 unsigned ref_count;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000237
238 spinlock_t lock;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000239 struct bio_list deferred_flush_bios;
240 struct list_head prepared_mappings;
Joe Thornber104655f2012-03-28 18:41:28 +0100241 struct list_head prepared_discards;
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400242 struct list_head active_thins;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000243
Mike Snitzer44feb382012-10-12 21:02:10 +0100244 struct dm_deferred_set *shared_read_ds;
245 struct dm_deferred_set *all_io_ds;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000246
Mike Snitzera24c2562012-06-03 00:30:00 +0100247 struct dm_thin_new_mapping *next_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000248 mempool_t *mapping_pool;
Joe Thornbere49e5822012-07-27 15:08:16 +0100249
250 process_bio_fn process_bio;
251 process_bio_fn process_discard;
252
Joe Thornbera374bb22014-10-10 13:43:14 +0100253 process_cell_fn process_cell;
254 process_cell_fn process_discard_cell;
255
Joe Thornbere49e5822012-07-27 15:08:16 +0100256 process_mapping_fn process_prepared_mapping;
257 process_mapping_fn process_prepared_discard;
Joe Thornberac4c3f32014-10-10 16:42:10 +0100258
259 struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
Joe Thornber991d9fa2011-10-31 20:21:18 +0000260};
261
Joe Thornbere49e5822012-07-27 15:08:16 +0100262static enum pool_mode get_pool_mode(struct pool *pool);
Joe Thornberb5330652013-12-04 19:51:33 -0500263static void metadata_operation_failed(struct pool *pool, const char *op, int r);
Joe Thornbere49e5822012-07-27 15:08:16 +0100264
Joe Thornber991d9fa2011-10-31 20:21:18 +0000265/*
266 * Target context for a pool.
267 */
268struct pool_c {
269 struct dm_target *ti;
270 struct pool *pool;
271 struct dm_dev *data_dev;
272 struct dm_dev *metadata_dev;
273 struct dm_target_callbacks callbacks;
274
275 dm_block_t low_water_blocks;
Mike Snitzer0424caa2012-09-26 23:45:47 +0100276 struct pool_features requested_pf; /* Features requested during table load */
277 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000278};
279
280/*
281 * Target context for a thin.
282 */
283struct thin_c {
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400284 struct list_head list;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000285 struct dm_dev *pool_dev;
Joe Thornber2dd9c252012-03-28 18:41:28 +0100286 struct dm_dev *origin_dev;
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100287 sector_t origin_size;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000288 dm_thin_id dev_id;
289
290 struct pool *pool;
291 struct dm_thin_device *td;
Joe Thornber738211f2014-03-03 15:52:28 +0000292 bool requeue_mode:1;
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400293 spinlock_t lock;
Joe Thornbera374bb22014-10-10 13:43:14 +0100294 struct list_head deferred_cells;
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400295 struct bio_list deferred_bio_list;
296 struct bio_list retry_on_resume_list;
Mike Snitzer67324ea2014-03-21 18:33:41 -0400297 struct rb_root sort_bio_list; /* sorted list of deferred bios */
Joe Thornberb10ebd32014-04-08 11:29:01 +0100298
299 /*
300 * Ensures the thin is not destroyed until the worker has finished
301 * iterating the active_thins list.
302 */
303 atomic_t refcount;
304 struct completion can_destroy;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000305};
306
307/*----------------------------------------------------------------*/
308
Joe Thornber025b9682013-03-01 22:45:50 +0000309/*
310 * wake_worker() is used when new work is queued and when pool_resume is
311 * ready to continue deferred IO processing.
312 */
313static void wake_worker(struct pool *pool)
314{
315 queue_work(pool->wq, &pool->worker);
316}
317
318/*----------------------------------------------------------------*/
319
Joe Thornber6beca5e2013-03-01 22:45:50 +0000320static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
321 struct dm_bio_prison_cell **cell_result)
322{
323 int r;
324 struct dm_bio_prison_cell *cell_prealloc;
325
326 /*
327 * Allocate a cell from the prison's mempool.
328 * This might block but it can't fail.
329 */
330 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
331
332 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
333 if (r)
334 /*
335 * We reused an old cell; we can get rid of
336 * the new one.
337 */
338 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
339
340 return r;
341}
342
343static void cell_release(struct pool *pool,
344 struct dm_bio_prison_cell *cell,
345 struct bio_list *bios)
346{
347 dm_cell_release(pool->prison, cell, bios);
348 dm_bio_prison_free_cell(pool->prison, cell);
349}
350
Joe Thornber2d759a42014-10-10 15:27:16 +0100351static void cell_visit_release(struct pool *pool,
352 void (*fn)(void *, struct dm_bio_prison_cell *),
353 void *context,
354 struct dm_bio_prison_cell *cell)
355{
356 dm_cell_visit_release(pool->prison, fn, context, cell);
357 dm_bio_prison_free_cell(pool->prison, cell);
358}
359
Joe Thornber6beca5e2013-03-01 22:45:50 +0000360static void cell_release_no_holder(struct pool *pool,
361 struct dm_bio_prison_cell *cell,
362 struct bio_list *bios)
363{
364 dm_cell_release_no_holder(pool->prison, cell, bios);
365 dm_bio_prison_free_cell(pool->prison, cell);
366}
367
Mike Snitzeraf918052014-05-22 14:32:51 -0400368static void cell_error_with_code(struct pool *pool,
369 struct dm_bio_prison_cell *cell, int error_code)
Joe Thornber6beca5e2013-03-01 22:45:50 +0000370{
Mike Snitzeraf918052014-05-22 14:32:51 -0400371 dm_cell_error(pool->prison, cell, error_code);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000372 dm_bio_prison_free_cell(pool->prison, cell);
373}
374
Mike Snitzeraf918052014-05-22 14:32:51 -0400375static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
376{
377 cell_error_with_code(pool, cell, -EIO);
378}
379
Joe Thornbera374bb22014-10-10 13:43:14 +0100380static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
381{
382 cell_error_with_code(pool, cell, 0);
383}
384
385static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
386{
387 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
388}
389
Joe Thornber6beca5e2013-03-01 22:45:50 +0000390/*----------------------------------------------------------------*/
391
Joe Thornber991d9fa2011-10-31 20:21:18 +0000392/*
393 * A global list of pools that uses a struct mapped_device as a key.
394 */
395static struct dm_thin_pool_table {
396 struct mutex mutex;
397 struct list_head pools;
398} dm_thin_pool_table;
399
400static void pool_table_init(void)
401{
402 mutex_init(&dm_thin_pool_table.mutex);
403 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
404}
405
406static void __pool_table_insert(struct pool *pool)
407{
408 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
409 list_add(&pool->list, &dm_thin_pool_table.pools);
410}
411
412static void __pool_table_remove(struct pool *pool)
413{
414 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
415 list_del(&pool->list);
416}
417
418static struct pool *__pool_table_lookup(struct mapped_device *md)
419{
420 struct pool *pool = NULL, *tmp;
421
422 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
423
424 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
425 if (tmp->pool_md == md) {
426 pool = tmp;
427 break;
428 }
429 }
430
431 return pool;
432}
433
434static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
435{
436 struct pool *pool = NULL, *tmp;
437
438 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
439
440 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
441 if (tmp->md_dev == md_dev) {
442 pool = tmp;
443 break;
444 }
445 }
446
447 return pool;
448}
449
450/*----------------------------------------------------------------*/
451
Mike Snitzera24c2562012-06-03 00:30:00 +0100452struct dm_thin_endio_hook {
Joe Thornbereb2aa482012-03-28 18:41:28 +0100453 struct thin_c *tc;
Mike Snitzer44feb382012-10-12 21:02:10 +0100454 struct dm_deferred_entry *shared_read_entry;
455 struct dm_deferred_entry *all_io_entry;
Mike Snitzera24c2562012-06-03 00:30:00 +0100456 struct dm_thin_new_mapping *overwrite_mapping;
Mike Snitzer67324ea2014-03-21 18:33:41 -0400457 struct rb_node rb_node;
Joe Thornbereb2aa482012-03-28 18:41:28 +0100458};
459
Joe Thornber18adc572014-03-03 15:46:42 +0000460static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000461{
462 struct bio *bio;
463 struct bio_list bios;
Joe Thornber18adc572014-03-03 15:46:42 +0000464 unsigned long flags;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000465
466 bio_list_init(&bios);
Joe Thornber18adc572014-03-03 15:46:42 +0000467
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400468 spin_lock_irqsave(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000469 bio_list_merge(&bios, master);
470 bio_list_init(master);
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400471 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000472
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400473 while ((bio = bio_list_pop(&bios)))
474 bio_endio(bio, DM_ENDIO_REQUEUE);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000475}
476
Joe Thornbera374bb22014-10-10 13:43:14 +0100477static void requeue_deferred_cells(struct thin_c *tc)
478{
479 struct pool *pool = tc->pool;
480 unsigned long flags;
481 struct list_head cells;
482 struct dm_bio_prison_cell *cell, *tmp;
483
484 INIT_LIST_HEAD(&cells);
485
486 spin_lock_irqsave(&tc->lock, flags);
487 list_splice_init(&tc->deferred_cells, &cells);
488 spin_unlock_irqrestore(&tc->lock, flags);
489
490 list_for_each_entry_safe(cell, tmp, &cells, user_list)
491 cell_requeue(pool, cell);
492}
493
Joe Thornber991d9fa2011-10-31 20:21:18 +0000494static void requeue_io(struct thin_c *tc)
495{
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400496 requeue_bio_list(tc, &tc->deferred_bio_list);
497 requeue_bio_list(tc, &tc->retry_on_resume_list);
Joe Thornbera374bb22014-10-10 13:43:14 +0100498 requeue_deferred_cells(tc);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000499}
500
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400501static void error_thin_retry_list(struct thin_c *tc)
Joe Thornber3e1a0692014-03-03 16:03:26 +0000502{
503 struct bio *bio;
504 unsigned long flags;
505 struct bio_list bios;
506
507 bio_list_init(&bios);
508
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400509 spin_lock_irqsave(&tc->lock, flags);
510 bio_list_merge(&bios, &tc->retry_on_resume_list);
511 bio_list_init(&tc->retry_on_resume_list);
512 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber3e1a0692014-03-03 16:03:26 +0000513
514 while ((bio = bio_list_pop(&bios)))
515 bio_io_error(bio);
516}
517
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400518static void error_retry_list(struct pool *pool)
519{
520 struct thin_c *tc;
521
522 rcu_read_lock();
523 list_for_each_entry_rcu(tc, &pool->active_thins, list)
524 error_thin_retry_list(tc);
525 rcu_read_unlock();
526}
527
Joe Thornber991d9fa2011-10-31 20:21:18 +0000528/*
529 * This section of code contains the logic for processing a thin device's IO.
530 * Much of the code depends on pool object resources (lists, workqueues, etc)
531 * but most is exclusively called from the thin target rather than the thin-pool
532 * target.
533 */
534
Mike Snitzer58f77a22013-03-01 22:45:45 +0000535static bool block_size_is_power_of_two(struct pool *pool)
536{
537 return pool->sectors_per_block_shift >= 0;
538}
539
Joe Thornber991d9fa2011-10-31 20:21:18 +0000540static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
541{
Mike Snitzer58f77a22013-03-01 22:45:45 +0000542 struct pool *pool = tc->pool;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700543 sector_t block_nr = bio->bi_iter.bi_sector;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100544
Mike Snitzer58f77a22013-03-01 22:45:45 +0000545 if (block_size_is_power_of_two(pool))
546 block_nr >>= pool->sectors_per_block_shift;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +0100547 else
Mike Snitzer58f77a22013-03-01 22:45:45 +0000548 (void) sector_div(block_nr, pool->sectors_per_block);
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100549
550 return block_nr;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000551}
552
553static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
554{
555 struct pool *pool = tc->pool;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700556 sector_t bi_sector = bio->bi_iter.bi_sector;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000557
558 bio->bi_bdev = tc->pool_dev->bdev;
Mike Snitzer58f77a22013-03-01 22:45:45 +0000559 if (block_size_is_power_of_two(pool))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700560 bio->bi_iter.bi_sector =
561 (block << pool->sectors_per_block_shift) |
562 (bi_sector & (pool->sectors_per_block - 1));
Mike Snitzer58f77a22013-03-01 22:45:45 +0000563 else
Kent Overstreet4f024f32013-10-11 15:44:27 -0700564 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
Mike Snitzer58f77a22013-03-01 22:45:45 +0000565 sector_div(bi_sector, pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000566}
567
Joe Thornber2dd9c252012-03-28 18:41:28 +0100568static void remap_to_origin(struct thin_c *tc, struct bio *bio)
569{
570 bio->bi_bdev = tc->origin_dev->bdev;
571}
572
Joe Thornber4afdd682012-07-27 15:08:14 +0100573static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
574{
575 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
576 dm_thin_changed_this_transaction(tc->td);
577}
578
Joe Thornbere8088072012-12-21 20:23:31 +0000579static void inc_all_io_entry(struct pool *pool, struct bio *bio)
580{
581 struct dm_thin_endio_hook *h;
582
583 if (bio->bi_rw & REQ_DISCARD)
584 return;
585
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000586 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbere8088072012-12-21 20:23:31 +0000587 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
588}
589
Joe Thornber2dd9c252012-03-28 18:41:28 +0100590static void issue(struct thin_c *tc, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000591{
592 struct pool *pool = tc->pool;
593 unsigned long flags;
594
Joe Thornbere49e5822012-07-27 15:08:16 +0100595 if (!bio_triggers_commit(tc, bio)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000596 generic_make_request(bio);
Joe Thornbere49e5822012-07-27 15:08:16 +0100597 return;
598 }
599
600 /*
601 * Complete bio with an error if earlier I/O caused changes to
602 * the metadata that can't be committed e.g, due to I/O errors
603 * on the metadata device.
604 */
605 if (dm_thin_aborted_changes(tc->td)) {
606 bio_io_error(bio);
607 return;
608 }
609
610 /*
611 * Batch together any bios that trigger commits and then issue a
612 * single commit for them in process_deferred_bios().
613 */
614 spin_lock_irqsave(&pool->lock, flags);
615 bio_list_add(&pool->deferred_flush_bios, bio);
616 spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000617}
618
Joe Thornber2dd9c252012-03-28 18:41:28 +0100619static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
620{
621 remap_to_origin(tc, bio);
622 issue(tc, bio);
623}
624
625static void remap_and_issue(struct thin_c *tc, struct bio *bio,
626 dm_block_t block)
627{
628 remap(tc, bio, block);
629 issue(tc, bio);
630}
631
Joe Thornber991d9fa2011-10-31 20:21:18 +0000632/*----------------------------------------------------------------*/
633
634/*
635 * Bio endio functions.
636 */
Mike Snitzera24c2562012-06-03 00:30:00 +0100637struct dm_thin_new_mapping {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000638 struct list_head list;
639
Mike Snitzer7f214662013-12-17 13:43:31 -0500640 bool pass_discard:1;
641 bool definitely_not_shared:1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000642
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100643 /*
644 * Track quiescing, copying and zeroing preparation actions. When this
645 * counter hits zero the block is prepared and can be inserted into the
646 * btree.
647 */
648 atomic_t prepare_actions;
649
Mike Snitzer7f214662013-12-17 13:43:31 -0500650 int err;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000651 struct thin_c *tc;
652 dm_block_t virt_block;
653 dm_block_t data_block;
Mike Snitzera24c2562012-06-03 00:30:00 +0100654 struct dm_bio_prison_cell *cell, *cell2;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000655
656 /*
657 * If the bio covers the whole area of a block then we can avoid
658 * zeroing or copying. Instead this bio is hooked. The bio will
659 * still be in the cell, so care has to be taken to avoid issuing
660 * the bio twice.
661 */
662 struct bio *bio;
663 bio_end_io_t *saved_bi_end_io;
664};
665
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100666static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000667{
668 struct pool *pool = m->tc->pool;
669
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100670 if (atomic_dec_and_test(&m->prepare_actions)) {
Mike Snitzerdaec3382013-12-11 14:01:20 -0500671 list_add_tail(&m->list, &pool->prepared_mappings);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000672 wake_worker(pool);
673 }
674}
675
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100676static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000677{
678 unsigned long flags;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000679 struct pool *pool = m->tc->pool;
680
Joe Thornber991d9fa2011-10-31 20:21:18 +0000681 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100682 __complete_mapping_preparation(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000683 spin_unlock_irqrestore(&pool->lock, flags);
684}
685
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100686static void copy_complete(int read_err, unsigned long write_err, void *context)
687{
688 struct dm_thin_new_mapping *m = context;
689
690 m->err = read_err || write_err ? -EIO : 0;
691 complete_mapping_preparation(m);
692}
693
Joe Thornber991d9fa2011-10-31 20:21:18 +0000694static void overwrite_endio(struct bio *bio, int err)
695{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000696 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzera24c2562012-06-03 00:30:00 +0100697 struct dm_thin_new_mapping *m = h->overwrite_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000698
699 m->err = err;
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100700 complete_mapping_preparation(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000701}
702
Joe Thornber991d9fa2011-10-31 20:21:18 +0000703/*----------------------------------------------------------------*/
704
705/*
706 * Workqueue.
707 */
708
709/*
710 * Prepared mapping jobs.
711 */
712
713/*
Joe Thornber2d759a42014-10-10 15:27:16 +0100714 * This sends the bios in the cell, except the original holder, back
715 * to the deferred_bios list.
Joe Thornber991d9fa2011-10-31 20:21:18 +0000716 */
Joe Thornberf286ba02012-12-21 20:23:33 +0000717static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000718{
Joe Thornber991d9fa2011-10-31 20:21:18 +0000719 struct pool *pool = tc->pool;
720 unsigned long flags;
721
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400722 spin_lock_irqsave(&tc->lock, flags);
723 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
724 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000725
726 wake_worker(pool);
727}
728
Joe Thornbera374bb22014-10-10 13:43:14 +0100729static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
730
Joe Thornber2d759a42014-10-10 15:27:16 +0100731struct remap_info {
732 struct thin_c *tc;
733 struct bio_list defer_bios;
734 struct bio_list issue_bios;
735};
736
737static void __inc_remap_and_issue_cell(void *context,
738 struct dm_bio_prison_cell *cell)
739{
740 struct remap_info *info = context;
741 struct bio *bio;
742
743 while ((bio = bio_list_pop(&cell->bios))) {
744 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
745 bio_list_add(&info->defer_bios, bio);
746 else {
747 inc_all_io_entry(info->tc->pool, bio);
748
749 /*
750 * We can't issue the bios with the bio prison lock
751 * held, so we add them to a list to issue on
752 * return from this function.
753 */
754 bio_list_add(&info->issue_bios, bio);
755 }
756 }
757}
758
Joe Thornbera374bb22014-10-10 13:43:14 +0100759static void inc_remap_and_issue_cell(struct thin_c *tc,
760 struct dm_bio_prison_cell *cell,
761 dm_block_t block)
762{
763 struct bio *bio;
Joe Thornber2d759a42014-10-10 15:27:16 +0100764 struct remap_info info;
Joe Thornbera374bb22014-10-10 13:43:14 +0100765
Joe Thornber2d759a42014-10-10 15:27:16 +0100766 info.tc = tc;
767 bio_list_init(&info.defer_bios);
768 bio_list_init(&info.issue_bios);
Joe Thornbera374bb22014-10-10 13:43:14 +0100769
Joe Thornber2d759a42014-10-10 15:27:16 +0100770 /*
771 * We have to be careful to inc any bios we're about to issue
772 * before the cell is released, and avoid a race with new bios
773 * being added to the cell.
774 */
775 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
776 &info, cell);
777
778 while ((bio = bio_list_pop(&info.defer_bios)))
779 thin_defer_bio(tc, bio);
780
781 while ((bio = bio_list_pop(&info.issue_bios)))
782 remap_and_issue(info.tc, bio, block);
Joe Thornbera374bb22014-10-10 13:43:14 +0100783}
784
Joe Thornbere49e5822012-07-27 15:08:16 +0100785static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
786{
Kent Overstreet196d38b2013-11-23 18:34:15 -0800787 if (m->bio) {
Joe Thornbere49e5822012-07-27 15:08:16 +0100788 m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet196d38b2013-11-23 18:34:15 -0800789 atomic_inc(&m->bio->bi_remaining);
790 }
Joe Thornber6beca5e2013-03-01 22:45:50 +0000791 cell_error(m->tc->pool, m->cell);
Joe Thornbere49e5822012-07-27 15:08:16 +0100792 list_del(&m->list);
793 mempool_free(m, m->tc->pool->mapping_pool);
794}
Joe Thornber025b9682013-03-01 22:45:50 +0000795
Mike Snitzera24c2562012-06-03 00:30:00 +0100796static void process_prepared_mapping(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000797{
798 struct thin_c *tc = m->tc;
Joe Thornber6beca5e2013-03-01 22:45:50 +0000799 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000800 struct bio *bio;
801 int r;
802
803 bio = m->bio;
Kent Overstreet196d38b2013-11-23 18:34:15 -0800804 if (bio) {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000805 bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet196d38b2013-11-23 18:34:15 -0800806 atomic_inc(&bio->bi_remaining);
807 }
Joe Thornber991d9fa2011-10-31 20:21:18 +0000808
809 if (m->err) {
Joe Thornber6beca5e2013-03-01 22:45:50 +0000810 cell_error(pool, m->cell);
Joe Thornber905386f2012-07-27 15:08:05 +0100811 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000812 }
813
814 /*
815 * Commit the prepared block into the mapping btree.
816 * Any I/O for this block arriving after this point will get
817 * remapped to it directly.
818 */
819 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
820 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -0500821 metadata_operation_failed(pool, "dm_thin_insert_block", r);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000822 cell_error(pool, m->cell);
Joe Thornber905386f2012-07-27 15:08:05 +0100823 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000824 }
825
826 /*
827 * Release any bios held while the block was being provisioned.
828 * If we are processing a write bio that completely covers the block,
829 * we already processed it so can ignore it now when processing
830 * the bios in the cell.
831 */
832 if (bio) {
Joe Thornber2d759a42014-10-10 15:27:16 +0100833 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000834 bio_endio(bio, 0);
Joe Thornber2d759a42014-10-10 15:27:16 +0100835 } else {
836 inc_all_io_entry(tc->pool, m->cell->holder);
837 remap_and_issue(tc, m->cell->holder, m->data_block);
838 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
839 }
Joe Thornber991d9fa2011-10-31 20:21:18 +0000840
Joe Thornber905386f2012-07-27 15:08:05 +0100841out:
Joe Thornber991d9fa2011-10-31 20:21:18 +0000842 list_del(&m->list);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000843 mempool_free(m, pool->mapping_pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000844}
845
Joe Thornbere49e5822012-07-27 15:08:16 +0100846static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber104655f2012-03-28 18:41:28 +0100847{
Joe Thornber104655f2012-03-28 18:41:28 +0100848 struct thin_c *tc = m->tc;
849
Joe Thornbere49e5822012-07-27 15:08:16 +0100850 bio_io_error(m->bio);
Joe Thornberf286ba02012-12-21 20:23:33 +0000851 cell_defer_no_holder(tc, m->cell);
852 cell_defer_no_holder(tc, m->cell2);
Joe Thornbere49e5822012-07-27 15:08:16 +0100853 mempool_free(m, tc->pool->mapping_pool);
854}
Joe Thornber104655f2012-03-28 18:41:28 +0100855
Joe Thornbere49e5822012-07-27 15:08:16 +0100856static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
857{
858 struct thin_c *tc = m->tc;
859
Joe Thornbere8088072012-12-21 20:23:31 +0000860 inc_all_io_entry(tc->pool, m->bio);
Joe Thornberf286ba02012-12-21 20:23:33 +0000861 cell_defer_no_holder(tc, m->cell);
862 cell_defer_no_holder(tc, m->cell2);
Joe Thornbere8088072012-12-21 20:23:31 +0000863
Joe Thornber104655f2012-03-28 18:41:28 +0100864 if (m->pass_discard)
Joe Thornber19fa1a62013-12-17 12:09:40 -0500865 if (m->definitely_not_shared)
866 remap_and_issue(tc, m->bio, m->data_block);
867 else {
868 bool used = false;
869 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
870 bio_endio(m->bio, 0);
871 else
872 remap_and_issue(tc, m->bio, m->data_block);
873 }
Joe Thornber104655f2012-03-28 18:41:28 +0100874 else
875 bio_endio(m->bio, 0);
876
Joe Thornber104655f2012-03-28 18:41:28 +0100877 mempool_free(m, tc->pool->mapping_pool);
878}
879
Joe Thornbere49e5822012-07-27 15:08:16 +0100880static void process_prepared_discard(struct dm_thin_new_mapping *m)
881{
882 int r;
883 struct thin_c *tc = m->tc;
884
885 r = dm_thin_remove_block(tc->td, m->virt_block);
886 if (r)
Mike Snitzerc3977412012-12-21 20:23:34 +0000887 DMERR_LIMIT("dm_thin_remove_block() failed");
Joe Thornbere49e5822012-07-27 15:08:16 +0100888
889 process_prepared_discard_passdown(m);
890}
891
Joe Thornber104655f2012-03-28 18:41:28 +0100892static void process_prepared(struct pool *pool, struct list_head *head,
Joe Thornbere49e5822012-07-27 15:08:16 +0100893 process_mapping_fn *fn)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000894{
895 unsigned long flags;
896 struct list_head maps;
Mike Snitzera24c2562012-06-03 00:30:00 +0100897 struct dm_thin_new_mapping *m, *tmp;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000898
899 INIT_LIST_HEAD(&maps);
900 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +0100901 list_splice_init(head, &maps);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000902 spin_unlock_irqrestore(&pool->lock, flags);
903
904 list_for_each_entry_safe(m, tmp, &maps, list)
Joe Thornbere49e5822012-07-27 15:08:16 +0100905 (*fn)(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000906}
907
908/*
909 * Deferred bio jobs.
910 */
Joe Thornber104655f2012-03-28 18:41:28 +0100911static int io_overlaps_block(struct pool *pool, struct bio *bio)
912{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700913 return bio->bi_iter.bi_size ==
914 (pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber104655f2012-03-28 18:41:28 +0100915}
916
Joe Thornber991d9fa2011-10-31 20:21:18 +0000917static int io_overwrites_block(struct pool *pool, struct bio *bio)
918{
Joe Thornber104655f2012-03-28 18:41:28 +0100919 return (bio_data_dir(bio) == WRITE) &&
920 io_overlaps_block(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000921}
922
923static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
924 bio_end_io_t *fn)
925{
926 *save = bio->bi_end_io;
927 bio->bi_end_io = fn;
928}
929
930static int ensure_next_mapping(struct pool *pool)
931{
932 if (pool->next_mapping)
933 return 0;
934
935 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
936
937 return pool->next_mapping ? 0 : -ENOMEM;
938}
939
Mike Snitzera24c2562012-06-03 00:30:00 +0100940static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000941{
Mike Snitzer16961b02013-12-17 13:19:11 -0500942 struct dm_thin_new_mapping *m = pool->next_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000943
944 BUG_ON(!pool->next_mapping);
945
Mike Snitzer16961b02013-12-17 13:19:11 -0500946 memset(m, 0, sizeof(struct dm_thin_new_mapping));
947 INIT_LIST_HEAD(&m->list);
948 m->bio = NULL;
949
Joe Thornber991d9fa2011-10-31 20:21:18 +0000950 pool->next_mapping = NULL;
951
Mike Snitzer16961b02013-12-17 13:19:11 -0500952 return m;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000953}
954
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100955static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
956 sector_t begin, sector_t end)
957{
958 int r;
959 struct dm_io_region to;
960
961 to.bdev = tc->pool_dev->bdev;
962 to.sector = begin;
963 to.count = end - begin;
964
965 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
966 if (r < 0) {
967 DMERR_LIMIT("dm_kcopyd_zero() failed");
968 copy_complete(1, 1, m);
969 }
970}
971
Mike Snitzer452d7a62014-10-09 19:20:21 -0400972static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
973 dm_block_t data_block,
974 struct dm_thin_new_mapping *m)
975{
976 struct pool *pool = tc->pool;
977 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
978
979 h->overwrite_mapping = m;
980 m->bio = bio;
981 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
982 inc_all_io_entry(pool, bio);
983 remap_and_issue(tc, bio, data_block);
984}
985
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100986/*
987 * A partial copy also needs to zero the uncopied region.
988 */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000989static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
Joe Thornber2dd9c252012-03-28 18:41:28 +0100990 struct dm_dev *origin, dm_block_t data_origin,
991 dm_block_t data_dest,
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100992 struct dm_bio_prison_cell *cell, struct bio *bio,
993 sector_t len)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000994{
995 int r;
996 struct pool *pool = tc->pool;
Mike Snitzera24c2562012-06-03 00:30:00 +0100997 struct dm_thin_new_mapping *m = get_next_mapping(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000998
Joe Thornber991d9fa2011-10-31 20:21:18 +0000999 m->tc = tc;
1000 m->virt_block = virt_block;
1001 m->data_block = data_dest;
1002 m->cell = cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001003
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001004 /*
1005 * quiesce action + copy action + an extra reference held for the
1006 * duration of this function (we may need to inc later for a
1007 * partial zero).
1008 */
1009 atomic_set(&m->prepare_actions, 3);
1010
Mike Snitzer44feb382012-10-12 21:02:10 +01001011 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001012 complete_mapping_preparation(m); /* already quiesced */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001013
1014 /*
1015 * IO to pool_dev remaps to the pool target's data_dev.
1016 *
1017 * If the whole block of data is being overwritten, we can issue the
1018 * bio immediately. Otherwise we use kcopyd to clone the data first.
1019 */
Mike Snitzer452d7a62014-10-09 19:20:21 -04001020 if (io_overwrites_block(pool, bio))
1021 remap_and_issue_overwrite(tc, bio, data_dest, m);
1022 else {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001023 struct dm_io_region from, to;
1024
Joe Thornber2dd9c252012-03-28 18:41:28 +01001025 from.bdev = origin->bdev;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001026 from.sector = data_origin * pool->sectors_per_block;
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001027 from.count = len;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001028
1029 to.bdev = tc->pool_dev->bdev;
1030 to.sector = data_dest * pool->sectors_per_block;
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001031 to.count = len;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001032
1033 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1034 0, copy_complete, m);
1035 if (r < 0) {
Mike Snitzerc3977412012-12-21 20:23:34 +00001036 DMERR_LIMIT("dm_kcopyd_copy() failed");
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001037 copy_complete(1, 1, m);
1038
1039 /*
1040 * We allow the zero to be issued, to simplify the
1041 * error path. Otherwise we'd need to start
1042 * worrying about decrementing the prepare_actions
1043 * counter.
1044 */
1045 }
1046
1047 /*
1048 * Do we need to zero a tail region?
1049 */
1050 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1051 atomic_inc(&m->prepare_actions);
1052 ll_zero(tc, m,
1053 data_dest * pool->sectors_per_block + len,
1054 (data_dest + 1) * pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001055 }
1056 }
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001057
1058 complete_mapping_preparation(m); /* drop our ref */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001059}
1060
Joe Thornber2dd9c252012-03-28 18:41:28 +01001061static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1062 dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzera24c2562012-06-03 00:30:00 +01001063 struct dm_bio_prison_cell *cell, struct bio *bio)
Joe Thornber2dd9c252012-03-28 18:41:28 +01001064{
1065 schedule_copy(tc, virt_block, tc->pool_dev,
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001066 data_origin, data_dest, cell, bio,
1067 tc->pool->sectors_per_block);
Joe Thornber2dd9c252012-03-28 18:41:28 +01001068}
1069
Joe Thornber991d9fa2011-10-31 20:21:18 +00001070static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzera24c2562012-06-03 00:30:00 +01001071 dm_block_t data_block, struct dm_bio_prison_cell *cell,
Joe Thornber991d9fa2011-10-31 20:21:18 +00001072 struct bio *bio)
1073{
1074 struct pool *pool = tc->pool;
Mike Snitzera24c2562012-06-03 00:30:00 +01001075 struct dm_thin_new_mapping *m = get_next_mapping(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001076
Joe Thornber50f3c3e2014-06-13 13:57:09 +01001077 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001078 m->tc = tc;
1079 m->virt_block = virt_block;
1080 m->data_block = data_block;
1081 m->cell = cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001082
1083 /*
1084 * If the whole block of data is being overwritten or we are not
1085 * zeroing pre-existing data, we can issue the bio immediately.
1086 * Otherwise we use kcopyd to zero the data first.
1087 */
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001088 if (!pool->pf.zero_new_blocks)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001089 process_prepared_mapping(m);
1090
Mike Snitzer452d7a62014-10-09 19:20:21 -04001091 else if (io_overwrites_block(pool, bio))
1092 remap_and_issue_overwrite(tc, bio, data_block, m);
Mike Snitzera24c2562012-06-03 00:30:00 +01001093
Mike Snitzer452d7a62014-10-09 19:20:21 -04001094 else
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001095 ll_zero(tc, m,
1096 data_block * pool->sectors_per_block,
1097 (data_block + 1) * pool->sectors_per_block);
1098}
Joe Thornber991d9fa2011-10-31 20:21:18 +00001099
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001100static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1101 dm_block_t data_dest,
1102 struct dm_bio_prison_cell *cell, struct bio *bio)
1103{
1104 struct pool *pool = tc->pool;
1105 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1106 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1107
1108 if (virt_block_end <= tc->origin_size)
1109 schedule_copy(tc, virt_block, tc->origin_dev,
1110 virt_block, data_dest, cell, bio,
1111 pool->sectors_per_block);
1112
1113 else if (virt_block_begin < tc->origin_size)
1114 schedule_copy(tc, virt_block, tc->origin_dev,
1115 virt_block, data_dest, cell, bio,
1116 tc->origin_size - virt_block_begin);
1117
1118 else
1119 schedule_zero(tc, virt_block, data_dest, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001120}
1121
Joe Thornbere49e5822012-07-27 15:08:16 +01001122/*
1123 * A non-zero return indicates read_only or fail_io mode.
1124 * Many callers don't care about the return value.
1125 */
Joe Thornber020cc3b2013-12-04 15:05:36 -05001126static int commit(struct pool *pool)
Joe Thornbere49e5822012-07-27 15:08:16 +01001127{
1128 int r;
1129
Joe Thornber8d07e8a2014-05-06 16:28:14 +01001130 if (get_pool_mode(pool) >= PM_READ_ONLY)
Joe Thornbere49e5822012-07-27 15:08:16 +01001131 return -EINVAL;
1132
Joe Thornber020cc3b2013-12-04 15:05:36 -05001133 r = dm_pool_commit_metadata(pool->pmd);
Joe Thornberb5330652013-12-04 19:51:33 -05001134 if (r)
1135 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
Joe Thornbere49e5822012-07-27 15:08:16 +01001136
1137 return r;
1138}
1139
Joe Thornber88a66212013-12-04 20:16:12 -05001140static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1141{
1142 unsigned long flags;
1143
1144 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1145 DMWARN("%s: reached low water mark for data device: sending event.",
1146 dm_device_name(pool->pool_md));
1147 spin_lock_irqsave(&pool->lock, flags);
1148 pool->low_water_triggered = true;
1149 spin_unlock_irqrestore(&pool->lock, flags);
1150 dm_table_event(pool->ti->table);
1151 }
1152}
1153
Joe Thornber3e1a0692014-03-03 16:03:26 +00001154static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1155
Joe Thornber991d9fa2011-10-31 20:21:18 +00001156static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1157{
1158 int r;
1159 dm_block_t free_blocks;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001160 struct pool *pool = tc->pool;
1161
Joe Thornber3e1a0692014-03-03 16:03:26 +00001162 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
Joe Thornber8d30abf2013-12-04 19:16:11 -05001163 return -EINVAL;
1164
Joe Thornber991d9fa2011-10-31 20:21:18 +00001165 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
Joe Thornberb5330652013-12-04 19:51:33 -05001166 if (r) {
1167 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001168 return r;
Joe Thornberb5330652013-12-04 19:51:33 -05001169 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001170
Joe Thornber88a66212013-12-04 20:16:12 -05001171 check_low_water_mark(pool, free_blocks);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001172
1173 if (!free_blocks) {
Mike Snitzer94563ba2013-08-22 09:56:18 -04001174 /*
1175 * Try to commit to see if that will free up some
1176 * more space.
1177 */
Joe Thornber020cc3b2013-12-04 15:05:36 -05001178 r = commit(pool);
1179 if (r)
1180 return r;
Mike Snitzer94563ba2013-08-22 09:56:18 -04001181
1182 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
Joe Thornberb5330652013-12-04 19:51:33 -05001183 if (r) {
1184 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
Mike Snitzer94563ba2013-08-22 09:56:18 -04001185 return r;
Joe Thornberb5330652013-12-04 19:51:33 -05001186 }
Mike Snitzer94563ba2013-08-22 09:56:18 -04001187
Mike Snitzer94563ba2013-08-22 09:56:18 -04001188 if (!free_blocks) {
Joe Thornber3e1a0692014-03-03 16:03:26 +00001189 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001190 return -ENOSPC;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001191 }
1192 }
1193
1194 r = dm_pool_alloc_data_block(pool->pmd, result);
Mike Snitzer4a02b342013-12-03 12:20:57 -05001195 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05001196 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001197 return r;
Mike Snitzer4a02b342013-12-03 12:20:57 -05001198 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001199
1200 return 0;
1201}
1202
1203/*
1204 * If we have run out of space, queue bios until the device is
1205 * resumed, presumably after having been reloaded with more space.
1206 */
1207static void retry_on_resume(struct bio *bio)
1208{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001209 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01001210 struct thin_c *tc = h->tc;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001211 unsigned long flags;
1212
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001213 spin_lock_irqsave(&tc->lock, flags);
1214 bio_list_add(&tc->retry_on_resume_list, bio);
1215 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001216}
1217
Mike Snitzeraf918052014-05-22 14:32:51 -04001218static int should_error_unserviceable_bio(struct pool *pool)
Joe Thornber3e1a0692014-03-03 16:03:26 +00001219{
1220 enum pool_mode m = get_pool_mode(pool);
1221
1222 switch (m) {
1223 case PM_WRITE:
1224 /* Shouldn't get here */
1225 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
Mike Snitzeraf918052014-05-22 14:32:51 -04001226 return -EIO;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001227
1228 case PM_OUT_OF_DATA_SPACE:
Mike Snitzeraf918052014-05-22 14:32:51 -04001229 return pool->pf.error_if_no_space ? -ENOSPC : 0;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001230
1231 case PM_READ_ONLY:
1232 case PM_FAIL:
Mike Snitzeraf918052014-05-22 14:32:51 -04001233 return -EIO;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001234 default:
1235 /* Shouldn't get here */
1236 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
Mike Snitzeraf918052014-05-22 14:32:51 -04001237 return -EIO;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001238 }
1239}
1240
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001241static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1242{
Mike Snitzeraf918052014-05-22 14:32:51 -04001243 int error = should_error_unserviceable_bio(pool);
1244
1245 if (error)
1246 bio_endio(bio, error);
Mike Snitzer6d162022013-12-20 18:09:02 -05001247 else
1248 retry_on_resume(bio);
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001249}
1250
Mike Snitzer399cadd2013-12-05 16:03:33 -05001251static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001252{
1253 struct bio *bio;
1254 struct bio_list bios;
Mike Snitzeraf918052014-05-22 14:32:51 -04001255 int error;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001256
Mike Snitzeraf918052014-05-22 14:32:51 -04001257 error = should_error_unserviceable_bio(pool);
1258 if (error) {
1259 cell_error_with_code(pool, cell, error);
Joe Thornber3e1a0692014-03-03 16:03:26 +00001260 return;
1261 }
1262
Joe Thornber991d9fa2011-10-31 20:21:18 +00001263 bio_list_init(&bios);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001264 cell_release(pool, cell, &bios);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001265
Mike Snitzer9d094ee2014-10-19 08:23:09 -04001266 while ((bio = bio_list_pop(&bios)))
1267 retry_on_resume(bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001268}
1269
Joe Thornbera374bb22014-10-10 13:43:14 +01001270static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber104655f2012-03-28 18:41:28 +01001271{
1272 int r;
Joe Thornbera374bb22014-10-10 13:43:14 +01001273 struct bio *bio = cell->holder;
Joe Thornber104655f2012-03-28 18:41:28 +01001274 struct pool *pool = tc->pool;
Joe Thornbera374bb22014-10-10 13:43:14 +01001275 struct dm_bio_prison_cell *cell2;
1276 struct dm_cell_key key2;
Joe Thornber104655f2012-03-28 18:41:28 +01001277 dm_block_t block = get_bio_block(tc, bio);
1278 struct dm_thin_lookup_result lookup_result;
Mike Snitzera24c2562012-06-03 00:30:00 +01001279 struct dm_thin_new_mapping *m;
Joe Thornber104655f2012-03-28 18:41:28 +01001280
Joe Thornbera374bb22014-10-10 13:43:14 +01001281 if (tc->requeue_mode) {
1282 cell_requeue(pool, cell);
Joe Thornber104655f2012-03-28 18:41:28 +01001283 return;
Joe Thornbera374bb22014-10-10 13:43:14 +01001284 }
Joe Thornber104655f2012-03-28 18:41:28 +01001285
1286 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1287 switch (r) {
1288 case 0:
1289 /*
1290 * Check nobody is fiddling with this pool block. This can
1291 * happen if someone's in the process of breaking sharing
1292 * on this block.
1293 */
1294 build_data_key(tc->td, lookup_result.block, &key2);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001295 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
Joe Thornberf286ba02012-12-21 20:23:33 +00001296 cell_defer_no_holder(tc, cell);
Joe Thornber104655f2012-03-28 18:41:28 +01001297 break;
1298 }
1299
1300 if (io_overlaps_block(pool, bio)) {
1301 /*
1302 * IO may still be going to the destination block. We must
1303 * quiesce before we can do the removal.
1304 */
1305 m = get_next_mapping(pool);
1306 m->tc = tc;
Joe Thornber19fa1a62013-12-17 12:09:40 -05001307 m->pass_discard = pool->pf.discard_passdown;
1308 m->definitely_not_shared = !lookup_result.shared;
Joe Thornber104655f2012-03-28 18:41:28 +01001309 m->virt_block = block;
1310 m->data_block = lookup_result.block;
1311 m->cell = cell;
1312 m->cell2 = cell2;
Joe Thornber104655f2012-03-28 18:41:28 +01001313 m->bio = bio;
1314
Joe Thornber7a7e97c2014-09-12 11:34:01 +01001315 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1316 pool->process_prepared_discard(m);
1317
Joe Thornber104655f2012-03-28 18:41:28 +01001318 } else {
Joe Thornbere8088072012-12-21 20:23:31 +00001319 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001320 cell_defer_no_holder(tc, cell);
1321 cell_defer_no_holder(tc, cell2);
Joe Thornbere8088072012-12-21 20:23:31 +00001322
Joe Thornber104655f2012-03-28 18:41:28 +01001323 /*
Mikulas Patocka49296302012-07-27 15:08:03 +01001324 * The DM core makes sure that the discard doesn't span
1325 * a block boundary. So we submit the discard of a
1326 * partial block appropriately.
Joe Thornber104655f2012-03-28 18:41:28 +01001327 */
Mikulas Patocka650d2a02012-07-20 14:25:05 +01001328 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1329 remap_and_issue(tc, bio, lookup_result.block);
1330 else
1331 bio_endio(bio, 0);
Joe Thornber104655f2012-03-28 18:41:28 +01001332 }
1333 break;
1334
1335 case -ENODATA:
1336 /*
1337 * It isn't provisioned, just forget it.
1338 */
Joe Thornberf286ba02012-12-21 20:23:33 +00001339 cell_defer_no_holder(tc, cell);
Joe Thornber104655f2012-03-28 18:41:28 +01001340 bio_endio(bio, 0);
1341 break;
1342
1343 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001344 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1345 __func__, r);
Joe Thornberf286ba02012-12-21 20:23:33 +00001346 cell_defer_no_holder(tc, cell);
Joe Thornber104655f2012-03-28 18:41:28 +01001347 bio_io_error(bio);
1348 break;
1349 }
1350}
1351
Joe Thornbera374bb22014-10-10 13:43:14 +01001352static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1353{
1354 struct dm_bio_prison_cell *cell;
1355 struct dm_cell_key key;
1356 dm_block_t block = get_bio_block(tc, bio);
1357
1358 build_virtual_key(tc->td, block, &key);
1359 if (bio_detain(tc->pool, &key, bio, &cell))
1360 return;
1361
1362 process_discard_cell(tc, cell);
1363}
1364
Joe Thornber991d9fa2011-10-31 20:21:18 +00001365static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer44feb382012-10-12 21:02:10 +01001366 struct dm_cell_key *key,
Joe Thornber991d9fa2011-10-31 20:21:18 +00001367 struct dm_thin_lookup_result *lookup_result,
Mike Snitzera24c2562012-06-03 00:30:00 +01001368 struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001369{
1370 int r;
1371 dm_block_t data_block;
Mike Snitzerd6fc2042013-08-21 17:40:11 -04001372 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001373
1374 r = alloc_data_block(tc, &data_block);
1375 switch (r) {
1376 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001377 schedule_internal_copy(tc, block, lookup_result->block,
1378 data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001379 break;
1380
1381 case -ENOSPC:
Mike Snitzer399cadd2013-12-05 16:03:33 -05001382 retry_bios_on_resume(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001383 break;
1384
1385 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001386 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1387 __func__, r);
Mike Snitzerd6fc2042013-08-21 17:40:11 -04001388 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001389 break;
1390 }
1391}
1392
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001393static void __remap_and_issue_shared_cell(void *context,
1394 struct dm_bio_prison_cell *cell)
1395{
1396 struct remap_info *info = context;
1397 struct bio *bio;
1398
1399 while ((bio = bio_list_pop(&cell->bios))) {
1400 if ((bio_data_dir(bio) == WRITE) ||
1401 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1402 bio_list_add(&info->defer_bios, bio);
1403 else {
1404 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1405
1406 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1407 inc_all_io_entry(info->tc->pool, bio);
1408 bio_list_add(&info->issue_bios, bio);
1409 }
1410 }
1411}
1412
1413static void remap_and_issue_shared_cell(struct thin_c *tc,
1414 struct dm_bio_prison_cell *cell,
1415 dm_block_t block)
1416{
1417 struct bio *bio;
1418 struct remap_info info;
1419
1420 info.tc = tc;
1421 bio_list_init(&info.defer_bios);
1422 bio_list_init(&info.issue_bios);
1423
1424 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1425 &info, cell);
1426
1427 while ((bio = bio_list_pop(&info.defer_bios)))
1428 thin_defer_bio(tc, bio);
1429
1430 while ((bio = bio_list_pop(&info.issue_bios)))
1431 remap_and_issue(tc, bio, block);
1432}
1433
Joe Thornber991d9fa2011-10-31 20:21:18 +00001434static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1435 dm_block_t block,
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001436 struct dm_thin_lookup_result *lookup_result,
1437 struct dm_bio_prison_cell *virt_cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001438{
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001439 struct dm_bio_prison_cell *data_cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001440 struct pool *pool = tc->pool;
Mike Snitzer44feb382012-10-12 21:02:10 +01001441 struct dm_cell_key key;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001442
1443 /*
1444 * If cell is already occupied, then sharing is already in the process
1445 * of being broken so we have nothing further to do here.
1446 */
1447 build_data_key(tc->td, lookup_result->block, &key);
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001448 if (bio_detain(pool, &key, bio, &data_cell)) {
1449 cell_defer_no_holder(tc, virt_cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001450 return;
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001451 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001452
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001453 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1454 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1455 cell_defer_no_holder(tc, virt_cell);
1456 } else {
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001457 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornber991d9fa2011-10-31 20:21:18 +00001458
Mike Snitzer44feb382012-10-12 21:02:10 +01001459 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
Joe Thornbere8088072012-12-21 20:23:31 +00001460 inc_all_io_entry(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001461 remap_and_issue(tc, bio, lookup_result->block);
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001462
1463 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1464 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001465 }
1466}
1467
1468static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzera24c2562012-06-03 00:30:00 +01001469 struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001470{
1471 int r;
1472 dm_block_t data_block;
Joe Thornber6beca5e2013-03-01 22:45:50 +00001473 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001474
1475 /*
1476 * Remap empty bios (flushes) immediately, without provisioning.
1477 */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001478 if (!bio->bi_iter.bi_size) {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001479 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001480 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001481
Joe Thornber991d9fa2011-10-31 20:21:18 +00001482 remap_and_issue(tc, bio, 0);
1483 return;
1484 }
1485
1486 /*
1487 * Fill read bios with zeroes and complete them immediately.
1488 */
1489 if (bio_data_dir(bio) == READ) {
1490 zero_fill_bio(bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001491 cell_defer_no_holder(tc, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001492 bio_endio(bio, 0);
1493 return;
1494 }
1495
1496 r = alloc_data_block(tc, &data_block);
1497 switch (r) {
1498 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001499 if (tc->origin_dev)
1500 schedule_external_copy(tc, block, data_block, cell, bio);
1501 else
1502 schedule_zero(tc, block, data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001503 break;
1504
1505 case -ENOSPC:
Mike Snitzer399cadd2013-12-05 16:03:33 -05001506 retry_bios_on_resume(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001507 break;
1508
1509 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001510 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1511 __func__, r);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001512 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001513 break;
1514 }
1515}
1516
Joe Thornbera374bb22014-10-10 13:43:14 +01001517static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001518{
1519 int r;
Joe Thornber6beca5e2013-03-01 22:45:50 +00001520 struct pool *pool = tc->pool;
Joe Thornbera374bb22014-10-10 13:43:14 +01001521 struct bio *bio = cell->holder;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001522 dm_block_t block = get_bio_block(tc, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001523 struct dm_thin_lookup_result lookup_result;
1524
Joe Thornbera374bb22014-10-10 13:43:14 +01001525 if (tc->requeue_mode) {
1526 cell_requeue(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001527 return;
Joe Thornbera374bb22014-10-10 13:43:14 +01001528 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001529
1530 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1531 switch (r) {
1532 case 0:
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001533 if (lookup_result.shared)
1534 process_shared_bio(tc, bio, block, &lookup_result, cell);
1535 else {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001536 inc_all_io_entry(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001537 remap_and_issue(tc, bio, lookup_result.block);
Joe Thornbera374bb22014-10-10 13:43:14 +01001538 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
Joe Thornbere8088072012-12-21 20:23:31 +00001539 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001540 break;
1541
1542 case -ENODATA:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001543 if (bio_data_dir(bio) == READ && tc->origin_dev) {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001544 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001545 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001546
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001547 if (bio_end_sector(bio) <= tc->origin_size)
1548 remap_to_origin_and_issue(tc, bio);
1549
1550 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1551 zero_fill_bio(bio);
1552 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1553 remap_to_origin_and_issue(tc, bio);
1554
1555 } else {
1556 zero_fill_bio(bio);
1557 bio_endio(bio, 0);
1558 }
Joe Thornber2dd9c252012-03-28 18:41:28 +01001559 } else
1560 provision_block(tc, bio, block, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001561 break;
1562
1563 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001564 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1565 __func__, r);
Joe Thornberf286ba02012-12-21 20:23:33 +00001566 cell_defer_no_holder(tc, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001567 bio_io_error(bio);
1568 break;
1569 }
1570}
1571
Joe Thornbera374bb22014-10-10 13:43:14 +01001572static void process_bio(struct thin_c *tc, struct bio *bio)
1573{
1574 struct pool *pool = tc->pool;
1575 dm_block_t block = get_bio_block(tc, bio);
1576 struct dm_bio_prison_cell *cell;
1577 struct dm_cell_key key;
1578
1579 /*
1580 * If cell is already occupied, then the block is already
1581 * being provisioned so we have nothing further to do here.
1582 */
1583 build_virtual_key(tc->td, block, &key);
1584 if (bio_detain(pool, &key, bio, &cell))
1585 return;
1586
1587 process_cell(tc, cell);
1588}
1589
1590static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1591 struct dm_bio_prison_cell *cell)
Joe Thornbere49e5822012-07-27 15:08:16 +01001592{
1593 int r;
1594 int rw = bio_data_dir(bio);
1595 dm_block_t block = get_bio_block(tc, bio);
1596 struct dm_thin_lookup_result lookup_result;
1597
1598 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1599 switch (r) {
1600 case 0:
Joe Thornbera374bb22014-10-10 13:43:14 +01001601 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001602 handle_unserviceable_bio(tc->pool, bio);
Joe Thornbera374bb22014-10-10 13:43:14 +01001603 if (cell)
1604 cell_defer_no_holder(tc, cell);
1605 } else {
Joe Thornbere8088072012-12-21 20:23:31 +00001606 inc_all_io_entry(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001607 remap_and_issue(tc, bio, lookup_result.block);
Joe Thornbera374bb22014-10-10 13:43:14 +01001608 if (cell)
1609 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
Joe Thornbere8088072012-12-21 20:23:31 +00001610 }
Joe Thornbere49e5822012-07-27 15:08:16 +01001611 break;
1612
1613 case -ENODATA:
Joe Thornbera374bb22014-10-10 13:43:14 +01001614 if (cell)
1615 cell_defer_no_holder(tc, cell);
Joe Thornbere49e5822012-07-27 15:08:16 +01001616 if (rw != READ) {
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001617 handle_unserviceable_bio(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001618 break;
1619 }
1620
1621 if (tc->origin_dev) {
Joe Thornbere8088072012-12-21 20:23:31 +00001622 inc_all_io_entry(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001623 remap_to_origin_and_issue(tc, bio);
1624 break;
1625 }
1626
1627 zero_fill_bio(bio);
1628 bio_endio(bio, 0);
1629 break;
1630
1631 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001632 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1633 __func__, r);
Joe Thornbera374bb22014-10-10 13:43:14 +01001634 if (cell)
1635 cell_defer_no_holder(tc, cell);
Joe Thornbere49e5822012-07-27 15:08:16 +01001636 bio_io_error(bio);
1637 break;
1638 }
1639}
1640
Joe Thornbera374bb22014-10-10 13:43:14 +01001641static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1642{
1643 __process_bio_read_only(tc, bio, NULL);
1644}
1645
1646static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1647{
1648 __process_bio_read_only(tc, cell->holder, cell);
1649}
1650
Joe Thornber3e1a0692014-03-03 16:03:26 +00001651static void process_bio_success(struct thin_c *tc, struct bio *bio)
1652{
1653 bio_endio(bio, 0);
1654}
1655
Joe Thornbere49e5822012-07-27 15:08:16 +01001656static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1657{
1658 bio_io_error(bio);
1659}
1660
Joe Thornbera374bb22014-10-10 13:43:14 +01001661static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1662{
1663 cell_success(tc->pool, cell);
1664}
1665
1666static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1667{
1668 cell_error(tc->pool, cell);
1669}
1670
Joe Thornberac8c3f32013-05-10 14:37:21 +01001671/*
1672 * FIXME: should we also commit due to size of transaction, measured in
1673 * metadata blocks?
1674 */
Joe Thornber905e51b2012-03-28 18:41:27 +01001675static int need_commit_due_to_time(struct pool *pool)
1676{
1677 return jiffies < pool->last_commit_jiffies ||
1678 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1679}
1680
Mike Snitzer67324ea2014-03-21 18:33:41 -04001681#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1682#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1683
1684static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1685{
1686 struct rb_node **rbp, *parent;
1687 struct dm_thin_endio_hook *pbd;
1688 sector_t bi_sector = bio->bi_iter.bi_sector;
1689
1690 rbp = &tc->sort_bio_list.rb_node;
1691 parent = NULL;
1692 while (*rbp) {
1693 parent = *rbp;
1694 pbd = thin_pbd(parent);
1695
1696 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1697 rbp = &(*rbp)->rb_left;
1698 else
1699 rbp = &(*rbp)->rb_right;
1700 }
1701
1702 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1703 rb_link_node(&pbd->rb_node, parent, rbp);
1704 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1705}
1706
1707static void __extract_sorted_bios(struct thin_c *tc)
1708{
1709 struct rb_node *node;
1710 struct dm_thin_endio_hook *pbd;
1711 struct bio *bio;
1712
1713 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1714 pbd = thin_pbd(node);
1715 bio = thin_bio(pbd);
1716
1717 bio_list_add(&tc->deferred_bio_list, bio);
1718 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1719 }
1720
1721 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1722}
1723
1724static void __sort_thin_deferred_bios(struct thin_c *tc)
1725{
1726 struct bio *bio;
1727 struct bio_list bios;
1728
1729 bio_list_init(&bios);
1730 bio_list_merge(&bios, &tc->deferred_bio_list);
1731 bio_list_init(&tc->deferred_bio_list);
1732
1733 /* Sort deferred_bio_list using rb-tree */
1734 while ((bio = bio_list_pop(&bios)))
1735 __thin_bio_rb_add(tc, bio);
1736
1737 /*
1738 * Transfer the sorted bios in sort_bio_list back to
1739 * deferred_bio_list to allow lockless submission of
1740 * all bios.
1741 */
1742 __extract_sorted_bios(tc);
1743}
1744
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001745static void process_thin_deferred_bios(struct thin_c *tc)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001746{
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001747 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001748 unsigned long flags;
1749 struct bio *bio;
1750 struct bio_list bios;
Mike Snitzer67324ea2014-03-21 18:33:41 -04001751 struct blk_plug plug;
Joe Thornber8a01a6a2014-10-06 15:28:30 +01001752 unsigned count = 0;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001753
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001754 if (tc->requeue_mode) {
1755 requeue_bio_list(tc, &tc->deferred_bio_list);
1756 return;
1757 }
1758
Joe Thornber991d9fa2011-10-31 20:21:18 +00001759 bio_list_init(&bios);
1760
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001761 spin_lock_irqsave(&tc->lock, flags);
Mike Snitzer67324ea2014-03-21 18:33:41 -04001762
1763 if (bio_list_empty(&tc->deferred_bio_list)) {
1764 spin_unlock_irqrestore(&tc->lock, flags);
1765 return;
1766 }
1767
1768 __sort_thin_deferred_bios(tc);
1769
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001770 bio_list_merge(&bios, &tc->deferred_bio_list);
1771 bio_list_init(&tc->deferred_bio_list);
Mike Snitzer67324ea2014-03-21 18:33:41 -04001772
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001773 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001774
Mike Snitzer67324ea2014-03-21 18:33:41 -04001775 blk_start_plug(&plug);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001776 while ((bio = bio_list_pop(&bios))) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001777 /*
1778 * If we've got no free new_mapping structs, and processing
1779 * this bio might require one, we pause until there are some
1780 * prepared mappings to process.
1781 */
1782 if (ensure_next_mapping(pool)) {
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001783 spin_lock_irqsave(&tc->lock, flags);
1784 bio_list_add(&tc->deferred_bio_list, bio);
1785 bio_list_merge(&tc->deferred_bio_list, &bios);
1786 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001787 break;
1788 }
Joe Thornber104655f2012-03-28 18:41:28 +01001789
1790 if (bio->bi_rw & REQ_DISCARD)
Joe Thornbere49e5822012-07-27 15:08:16 +01001791 pool->process_discard(tc, bio);
Joe Thornber104655f2012-03-28 18:41:28 +01001792 else
Joe Thornbere49e5822012-07-27 15:08:16 +01001793 pool->process_bio(tc, bio);
Joe Thornber8a01a6a2014-10-06 15:28:30 +01001794
1795 if ((count++ & 127) == 0) {
Joe Thornber7d327fe2014-10-06 15:45:59 +01001796 throttle_work_update(&pool->throttle);
Joe Thornber8a01a6a2014-10-06 15:28:30 +01001797 dm_pool_issue_prefetches(pool->pmd);
1798 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001799 }
Mike Snitzer67324ea2014-03-21 18:33:41 -04001800 blk_finish_plug(&plug);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001801}
1802
Joe Thornberac4c3f32014-10-10 16:42:10 +01001803static int cmp_cells(const void *lhs, const void *rhs)
1804{
1805 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
1806 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
1807
1808 BUG_ON(!lhs_cell->holder);
1809 BUG_ON(!rhs_cell->holder);
1810
1811 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
1812 return -1;
1813
1814 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
1815 return 1;
1816
1817 return 0;
1818}
1819
1820static unsigned sort_cells(struct pool *pool, struct list_head *cells)
1821{
1822 unsigned count = 0;
1823 struct dm_bio_prison_cell *cell, *tmp;
1824
1825 list_for_each_entry_safe(cell, tmp, cells, user_list) {
1826 if (count >= CELL_SORT_ARRAY_SIZE)
1827 break;
1828
1829 pool->cell_sort_array[count++] = cell;
1830 list_del(&cell->user_list);
1831 }
1832
1833 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1834
1835 return count;
1836}
1837
Joe Thornbera374bb22014-10-10 13:43:14 +01001838static void process_thin_deferred_cells(struct thin_c *tc)
1839{
1840 struct pool *pool = tc->pool;
1841 unsigned long flags;
1842 struct list_head cells;
Joe Thornberac4c3f32014-10-10 16:42:10 +01001843 struct dm_bio_prison_cell *cell;
1844 unsigned i, j, count;
Joe Thornbera374bb22014-10-10 13:43:14 +01001845
1846 INIT_LIST_HEAD(&cells);
1847
1848 spin_lock_irqsave(&tc->lock, flags);
1849 list_splice_init(&tc->deferred_cells, &cells);
1850 spin_unlock_irqrestore(&tc->lock, flags);
1851
1852 if (list_empty(&cells))
1853 return;
1854
Joe Thornberac4c3f32014-10-10 16:42:10 +01001855 do {
1856 count = sort_cells(tc->pool, &cells);
Joe Thornbera374bb22014-10-10 13:43:14 +01001857
Joe Thornberac4c3f32014-10-10 16:42:10 +01001858 for (i = 0; i < count; i++) {
1859 cell = pool->cell_sort_array[i];
1860 BUG_ON(!cell->holder);
1861
1862 /*
1863 * If we've got no free new_mapping structs, and processing
1864 * this bio might require one, we pause until there are some
1865 * prepared mappings to process.
1866 */
1867 if (ensure_next_mapping(pool)) {
1868 for (j = i; j < count; j++)
1869 list_add(&pool->cell_sort_array[j]->user_list, &cells);
1870
1871 spin_lock_irqsave(&tc->lock, flags);
1872 list_splice(&cells, &tc->deferred_cells);
1873 spin_unlock_irqrestore(&tc->lock, flags);
1874 return;
1875 }
1876
1877 if (cell->holder->bi_rw & REQ_DISCARD)
1878 pool->process_discard_cell(tc, cell);
1879 else
1880 pool->process_cell(tc, cell);
Joe Thornbera374bb22014-10-10 13:43:14 +01001881 }
Joe Thornberac4c3f32014-10-10 16:42:10 +01001882 } while (!list_empty(&cells));
Joe Thornbera374bb22014-10-10 13:43:14 +01001883}
1884
Joe Thornberb10ebd32014-04-08 11:29:01 +01001885static void thin_get(struct thin_c *tc);
1886static void thin_put(struct thin_c *tc);
1887
1888/*
1889 * We can't hold rcu_read_lock() around code that can block. So we
1890 * find a thin with the rcu lock held; bump a refcount; then drop
1891 * the lock.
1892 */
1893static struct thin_c *get_first_thin(struct pool *pool)
1894{
1895 struct thin_c *tc = NULL;
1896
1897 rcu_read_lock();
1898 if (!list_empty(&pool->active_thins)) {
1899 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1900 thin_get(tc);
1901 }
1902 rcu_read_unlock();
1903
1904 return tc;
1905}
1906
1907static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1908{
1909 struct thin_c *old_tc = tc;
1910
1911 rcu_read_lock();
1912 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1913 thin_get(tc);
1914 thin_put(old_tc);
1915 rcu_read_unlock();
1916 return tc;
1917 }
1918 thin_put(old_tc);
1919 rcu_read_unlock();
1920
1921 return NULL;
1922}
1923
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001924static void process_deferred_bios(struct pool *pool)
1925{
1926 unsigned long flags;
1927 struct bio *bio;
1928 struct bio_list bios;
1929 struct thin_c *tc;
1930
Joe Thornberb10ebd32014-04-08 11:29:01 +01001931 tc = get_first_thin(pool);
1932 while (tc) {
Joe Thornbera374bb22014-10-10 13:43:14 +01001933 process_thin_deferred_cells(tc);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001934 process_thin_deferred_bios(tc);
Joe Thornberb10ebd32014-04-08 11:29:01 +01001935 tc = get_next_thin(pool, tc);
1936 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001937
1938 /*
1939 * If there are any deferred flush bios, we must commit
1940 * the metadata before issuing them.
1941 */
1942 bio_list_init(&bios);
1943 spin_lock_irqsave(&pool->lock, flags);
1944 bio_list_merge(&bios, &pool->deferred_flush_bios);
1945 bio_list_init(&pool->deferred_flush_bios);
1946 spin_unlock_irqrestore(&pool->lock, flags);
1947
Mike Snitzer4d1662a2014-02-06 06:08:56 -05001948 if (bio_list_empty(&bios) &&
1949 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
Joe Thornber991d9fa2011-10-31 20:21:18 +00001950 return;
1951
Joe Thornber020cc3b2013-12-04 15:05:36 -05001952 if (commit(pool)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001953 while ((bio = bio_list_pop(&bios)))
1954 bio_io_error(bio);
1955 return;
1956 }
Joe Thornber905e51b2012-03-28 18:41:27 +01001957 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001958
1959 while ((bio = bio_list_pop(&bios)))
1960 generic_make_request(bio);
1961}
1962
1963static void do_worker(struct work_struct *ws)
1964{
1965 struct pool *pool = container_of(ws, struct pool, worker);
1966
Joe Thornber7d327fe2014-10-06 15:45:59 +01001967 throttle_work_start(&pool->throttle);
Joe Thornber8a01a6a2014-10-06 15:28:30 +01001968 dm_pool_issue_prefetches(pool->pmd);
Joe Thornber7d327fe2014-10-06 15:45:59 +01001969 throttle_work_update(&pool->throttle);
Joe Thornbere49e5822012-07-27 15:08:16 +01001970 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
Joe Thornber7d327fe2014-10-06 15:45:59 +01001971 throttle_work_update(&pool->throttle);
Joe Thornbere49e5822012-07-27 15:08:16 +01001972 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
Joe Thornber7d327fe2014-10-06 15:45:59 +01001973 throttle_work_update(&pool->throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001974 process_deferred_bios(pool);
Joe Thornber7d327fe2014-10-06 15:45:59 +01001975 throttle_work_complete(&pool->throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001976}
1977
Joe Thornber905e51b2012-03-28 18:41:27 +01001978/*
1979 * We want to commit periodically so that not too much
1980 * unwritten data builds up.
1981 */
1982static void do_waker(struct work_struct *ws)
1983{
1984 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1985 wake_worker(pool);
1986 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1987}
1988
Joe Thornber85ad6432014-05-09 15:59:38 +01001989/*
1990 * We're holding onto IO to allow userland time to react. After the
1991 * timeout either the pool will have been resized (and thus back in
1992 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1993 */
1994static void do_no_space_timeout(struct work_struct *ws)
1995{
1996 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1997 no_space_timeout);
1998
1999 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
2000 set_pool_mode(pool, PM_READ_ONLY);
2001}
2002
Joe Thornber991d9fa2011-10-31 20:21:18 +00002003/*----------------------------------------------------------------*/
2004
Joe Thornbere7a3e872014-05-13 16:14:14 -04002005struct pool_work {
Joe Thornber738211f2014-03-03 15:52:28 +00002006 struct work_struct worker;
Joe Thornbere7a3e872014-05-13 16:14:14 -04002007 struct completion complete;
Joe Thornber738211f2014-03-03 15:52:28 +00002008};
2009
Joe Thornbere7a3e872014-05-13 16:14:14 -04002010static struct pool_work *to_pool_work(struct work_struct *ws)
Joe Thornber738211f2014-03-03 15:52:28 +00002011{
Joe Thornbere7a3e872014-05-13 16:14:14 -04002012 return container_of(ws, struct pool_work, worker);
2013}
2014
2015static void pool_work_complete(struct pool_work *pw)
2016{
2017 complete(&pw->complete);
2018}
2019
2020static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2021 void (*fn)(struct work_struct *))
2022{
2023 INIT_WORK_ONSTACK(&pw->worker, fn);
2024 init_completion(&pw->complete);
2025 queue_work(pool->wq, &pw->worker);
2026 wait_for_completion(&pw->complete);
2027}
2028
2029/*----------------------------------------------------------------*/
2030
2031struct noflush_work {
2032 struct pool_work pw;
2033 struct thin_c *tc;
2034};
2035
2036static struct noflush_work *to_noflush(struct work_struct *ws)
2037{
2038 return container_of(to_pool_work(ws), struct noflush_work, pw);
Joe Thornber738211f2014-03-03 15:52:28 +00002039}
2040
2041static void do_noflush_start(struct work_struct *ws)
2042{
Joe Thornbere7a3e872014-05-13 16:14:14 -04002043 struct noflush_work *w = to_noflush(ws);
Joe Thornber738211f2014-03-03 15:52:28 +00002044 w->tc->requeue_mode = true;
2045 requeue_io(w->tc);
Joe Thornbere7a3e872014-05-13 16:14:14 -04002046 pool_work_complete(&w->pw);
Joe Thornber738211f2014-03-03 15:52:28 +00002047}
2048
2049static void do_noflush_stop(struct work_struct *ws)
2050{
Joe Thornbere7a3e872014-05-13 16:14:14 -04002051 struct noflush_work *w = to_noflush(ws);
Joe Thornber738211f2014-03-03 15:52:28 +00002052 w->tc->requeue_mode = false;
Joe Thornbere7a3e872014-05-13 16:14:14 -04002053 pool_work_complete(&w->pw);
Joe Thornber738211f2014-03-03 15:52:28 +00002054}
2055
2056static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2057{
2058 struct noflush_work w;
2059
Joe Thornber738211f2014-03-03 15:52:28 +00002060 w.tc = tc;
Joe Thornbere7a3e872014-05-13 16:14:14 -04002061 pool_work_wait(&w.pw, tc->pool, fn);
Joe Thornber738211f2014-03-03 15:52:28 +00002062}
2063
2064/*----------------------------------------------------------------*/
2065
Joe Thornbere49e5822012-07-27 15:08:16 +01002066static enum pool_mode get_pool_mode(struct pool *pool)
2067{
2068 return pool->pf.mode;
2069}
2070
Joe Thornber3e1a0692014-03-03 16:03:26 +00002071static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2072{
2073 dm_table_event(pool->ti->table);
2074 DMINFO("%s: switching pool to %s mode",
2075 dm_device_name(pool->pool_md), new_mode);
2076}
2077
Mike Snitzer8b64e882013-12-20 14:27:28 -05002078static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
Joe Thornbere49e5822012-07-27 15:08:16 +01002079{
Mike Snitzercdc2b412014-02-14 18:10:55 -05002080 struct pool_c *pt = pool->ti->private;
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002081 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2082 enum pool_mode old_mode = get_pool_mode(pool);
Mike Snitzer80c57892014-05-20 13:38:33 -04002083 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002084
2085 /*
2086 * Never allow the pool to transition to PM_WRITE mode if user
2087 * intervention is required to verify metadata and data consistency.
2088 */
2089 if (new_mode == PM_WRITE && needs_check) {
2090 DMERR("%s: unable to switch pool to write mode until repaired.",
2091 dm_device_name(pool->pool_md));
2092 if (old_mode != new_mode)
2093 new_mode = old_mode;
2094 else
2095 new_mode = PM_READ_ONLY;
2096 }
2097 /*
2098 * If we were in PM_FAIL mode, rollback of metadata failed. We're
2099 * not going to recover without a thin_repair. So we never let the
2100 * pool move out of the old mode.
2101 */
2102 if (old_mode == PM_FAIL)
2103 new_mode = old_mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01002104
Mike Snitzer8b64e882013-12-20 14:27:28 -05002105 switch (new_mode) {
Joe Thornbere49e5822012-07-27 15:08:16 +01002106 case PM_FAIL:
Mike Snitzer8b64e882013-12-20 14:27:28 -05002107 if (old_mode != new_mode)
Joe Thornber3e1a0692014-03-03 16:03:26 +00002108 notify_of_pool_mode_change(pool, "failure");
Joe Thornber5383ef32013-12-04 16:30:01 -05002109 dm_pool_metadata_read_only(pool->pmd);
Joe Thornbere49e5822012-07-27 15:08:16 +01002110 pool->process_bio = process_bio_fail;
2111 pool->process_discard = process_bio_fail;
Joe Thornbera374bb22014-10-10 13:43:14 +01002112 pool->process_cell = process_cell_fail;
2113 pool->process_discard_cell = process_cell_fail;
Joe Thornbere49e5822012-07-27 15:08:16 +01002114 pool->process_prepared_mapping = process_prepared_mapping_fail;
2115 pool->process_prepared_discard = process_prepared_discard_fail;
Joe Thornber3e1a0692014-03-03 16:03:26 +00002116
2117 error_retry_list(pool);
Joe Thornbere49e5822012-07-27 15:08:16 +01002118 break;
2119
2120 case PM_READ_ONLY:
Mike Snitzer8b64e882013-12-20 14:27:28 -05002121 if (old_mode != new_mode)
Joe Thornber3e1a0692014-03-03 16:03:26 +00002122 notify_of_pool_mode_change(pool, "read-only");
2123 dm_pool_metadata_read_only(pool->pmd);
2124 pool->process_bio = process_bio_read_only;
2125 pool->process_discard = process_bio_success;
Joe Thornbera374bb22014-10-10 13:43:14 +01002126 pool->process_cell = process_cell_read_only;
2127 pool->process_discard_cell = process_cell_success;
Joe Thornber3e1a0692014-03-03 16:03:26 +00002128 pool->process_prepared_mapping = process_prepared_mapping_fail;
2129 pool->process_prepared_discard = process_prepared_discard_passdown;
2130
2131 error_retry_list(pool);
2132 break;
2133
2134 case PM_OUT_OF_DATA_SPACE:
2135 /*
2136 * Ideally we'd never hit this state; the low water mark
2137 * would trigger userland to extend the pool before we
2138 * completely run out of data space. However, many small
2139 * IOs to unprovisioned space can consume data space at an
2140 * alarming rate. Adjust your low water mark if you're
2141 * frequently seeing this mode.
2142 */
2143 if (old_mode != new_mode)
2144 notify_of_pool_mode_change(pool, "out-of-data-space");
2145 pool->process_bio = process_bio_read_only;
Joe Thornbera374bb22014-10-10 13:43:14 +01002146 pool->process_discard = process_discard_bio;
2147 pool->process_cell = process_cell_read_only;
2148 pool->process_discard_cell = process_discard_cell;
Joe Thornber3e1a0692014-03-03 16:03:26 +00002149 pool->process_prepared_mapping = process_prepared_mapping;
2150 pool->process_prepared_discard = process_prepared_discard_passdown;
Joe Thornber85ad6432014-05-09 15:59:38 +01002151
Mike Snitzer80c57892014-05-20 13:38:33 -04002152 if (!pool->pf.error_if_no_space && no_space_timeout)
2153 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
Joe Thornbere49e5822012-07-27 15:08:16 +01002154 break;
2155
2156 case PM_WRITE:
Mike Snitzer8b64e882013-12-20 14:27:28 -05002157 if (old_mode != new_mode)
Joe Thornber3e1a0692014-03-03 16:03:26 +00002158 notify_of_pool_mode_change(pool, "write");
Joe Thornber9b7aaa62013-12-04 16:58:19 -05002159 dm_pool_metadata_read_write(pool->pmd);
Joe Thornbere49e5822012-07-27 15:08:16 +01002160 pool->process_bio = process_bio;
Joe Thornbera374bb22014-10-10 13:43:14 +01002161 pool->process_discard = process_discard_bio;
2162 pool->process_cell = process_cell;
2163 pool->process_discard_cell = process_discard_cell;
Joe Thornbere49e5822012-07-27 15:08:16 +01002164 pool->process_prepared_mapping = process_prepared_mapping;
2165 pool->process_prepared_discard = process_prepared_discard;
2166 break;
2167 }
Mike Snitzer8b64e882013-12-20 14:27:28 -05002168
2169 pool->pf.mode = new_mode;
Mike Snitzercdc2b412014-02-14 18:10:55 -05002170 /*
2171 * The pool mode may have changed, sync it so bind_control_target()
2172 * doesn't cause an unexpected mode transition on resume.
2173 */
2174 pt->adjusted_pf.mode = new_mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01002175}
2176
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002177static void abort_transaction(struct pool *pool)
2178{
2179 const char *dev_name = dm_device_name(pool->pool_md);
2180
2181 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2182 if (dm_pool_abort_metadata(pool->pmd)) {
2183 DMERR("%s: failed to abort metadata transaction", dev_name);
2184 set_pool_mode(pool, PM_FAIL);
2185 }
2186
2187 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2188 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2189 set_pool_mode(pool, PM_FAIL);
2190 }
2191}
2192
Joe Thornberb5330652013-12-04 19:51:33 -05002193static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2194{
2195 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2196 dm_device_name(pool->pool_md), op, r);
2197
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002198 abort_transaction(pool);
Joe Thornberb5330652013-12-04 19:51:33 -05002199 set_pool_mode(pool, PM_READ_ONLY);
2200}
2201
Joe Thornbere49e5822012-07-27 15:08:16 +01002202/*----------------------------------------------------------------*/
2203
Joe Thornber991d9fa2011-10-31 20:21:18 +00002204/*
2205 * Mapping functions.
2206 */
2207
2208/*
2209 * Called only while mapping a thin bio to hand it over to the workqueue.
2210 */
2211static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2212{
2213 unsigned long flags;
2214 struct pool *pool = tc->pool;
2215
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002216 spin_lock_irqsave(&tc->lock, flags);
2217 bio_list_add(&tc->deferred_bio_list, bio);
2218 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002219
2220 wake_worker(pool);
2221}
2222
Joe Thornber7d327fe2014-10-06 15:45:59 +01002223static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2224{
2225 struct pool *pool = tc->pool;
2226
2227 throttle_lock(&pool->throttle);
2228 thin_defer_bio(tc, bio);
2229 throttle_unlock(&pool->throttle);
2230}
2231
Joe Thornbera374bb22014-10-10 13:43:14 +01002232static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2233{
2234 unsigned long flags;
2235 struct pool *pool = tc->pool;
2236
2237 throttle_lock(&pool->throttle);
2238 spin_lock_irqsave(&tc->lock, flags);
2239 list_add_tail(&cell->user_list, &tc->deferred_cells);
2240 spin_unlock_irqrestore(&tc->lock, flags);
2241 throttle_unlock(&pool->throttle);
2242
2243 wake_worker(pool);
2244}
2245
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00002246static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
Joe Thornbereb2aa482012-03-28 18:41:28 +01002247{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00002248 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01002249
2250 h->tc = tc;
2251 h->shared_read_entry = NULL;
Joe Thornbere8088072012-12-21 20:23:31 +00002252 h->all_io_entry = NULL;
Joe Thornbereb2aa482012-03-28 18:41:28 +01002253 h->overwrite_mapping = NULL;
Joe Thornbereb2aa482012-03-28 18:41:28 +01002254}
2255
Joe Thornber991d9fa2011-10-31 20:21:18 +00002256/*
2257 * Non-blocking function called from the thin target's map function.
2258 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002259static int thin_bio_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002260{
2261 int r;
2262 struct thin_c *tc = ti->private;
2263 dm_block_t block = get_bio_block(tc, bio);
2264 struct dm_thin_device *td = tc->td;
2265 struct dm_thin_lookup_result result;
Joe Thornbera374bb22014-10-10 13:43:14 +01002266 struct dm_bio_prison_cell *virt_cell, *data_cell;
Joe Thornbere8088072012-12-21 20:23:31 +00002267 struct dm_cell_key key;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002268
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00002269 thin_hook_bio(tc, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01002270
Joe Thornber738211f2014-03-03 15:52:28 +00002271 if (tc->requeue_mode) {
2272 bio_endio(bio, DM_ENDIO_REQUEUE);
2273 return DM_MAPIO_SUBMITTED;
2274 }
2275
Joe Thornbere49e5822012-07-27 15:08:16 +01002276 if (get_pool_mode(tc->pool) == PM_FAIL) {
2277 bio_io_error(bio);
2278 return DM_MAPIO_SUBMITTED;
2279 }
2280
Joe Thornber104655f2012-03-28 18:41:28 +01002281 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
Joe Thornber7d327fe2014-10-06 15:45:59 +01002282 thin_defer_bio_with_throttle(tc, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002283 return DM_MAPIO_SUBMITTED;
2284 }
2285
Joe Thornberc822ed962014-10-10 09:41:09 +01002286 /*
2287 * We must hold the virtual cell before doing the lookup, otherwise
2288 * there's a race with discard.
2289 */
2290 build_virtual_key(tc->td, block, &key);
Joe Thornbera374bb22014-10-10 13:43:14 +01002291 if (bio_detain(tc->pool, &key, bio, &virt_cell))
Joe Thornberc822ed962014-10-10 09:41:09 +01002292 return DM_MAPIO_SUBMITTED;
2293
Joe Thornber991d9fa2011-10-31 20:21:18 +00002294 r = dm_thin_find_block(td, block, 0, &result);
2295
2296 /*
2297 * Note that we defer readahead too.
2298 */
2299 switch (r) {
2300 case 0:
2301 if (unlikely(result.shared)) {
2302 /*
2303 * We have a race condition here between the
2304 * result.shared value returned by the lookup and
2305 * snapshot creation, which may cause new
2306 * sharing.
2307 *
2308 * To avoid this always quiesce the origin before
2309 * taking the snap. You want to do this anyway to
2310 * ensure a consistent application view
2311 * (i.e. lockfs).
2312 *
2313 * More distant ancestors are irrelevant. The
2314 * shared flag will be set in their case.
2315 */
Joe Thornbera374bb22014-10-10 13:43:14 +01002316 thin_defer_cell(tc, virt_cell);
Joe Thornbere8088072012-12-21 20:23:31 +00002317 return DM_MAPIO_SUBMITTED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002318 }
Joe Thornbere8088072012-12-21 20:23:31 +00002319
Joe Thornbere8088072012-12-21 20:23:31 +00002320 build_data_key(tc->td, result.block, &key);
Joe Thornbera374bb22014-10-10 13:43:14 +01002321 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2322 cell_defer_no_holder(tc, virt_cell);
Joe Thornbere8088072012-12-21 20:23:31 +00002323 return DM_MAPIO_SUBMITTED;
2324 }
2325
2326 inc_all_io_entry(tc->pool, bio);
Joe Thornbera374bb22014-10-10 13:43:14 +01002327 cell_defer_no_holder(tc, data_cell);
2328 cell_defer_no_holder(tc, virt_cell);
Joe Thornbere8088072012-12-21 20:23:31 +00002329
2330 remap(tc, bio, result.block);
2331 return DM_MAPIO_REMAPPED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002332
2333 case -ENODATA:
Joe Thornbere49e5822012-07-27 15:08:16 +01002334 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
2335 /*
2336 * This block isn't provisioned, and we have no way
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05002337 * of doing so.
Joe Thornbere49e5822012-07-27 15:08:16 +01002338 */
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05002339 handle_unserviceable_bio(tc->pool, bio);
Joe Thornbera374bb22014-10-10 13:43:14 +01002340 cell_defer_no_holder(tc, virt_cell);
Joe Thornber2aab3852012-12-21 20:23:33 +00002341 return DM_MAPIO_SUBMITTED;
Joe Thornbere49e5822012-07-27 15:08:16 +01002342 }
2343 /* fall through */
2344
2345 case -EWOULDBLOCK:
Joe Thornbera374bb22014-10-10 13:43:14 +01002346 thin_defer_cell(tc, virt_cell);
Joe Thornber2aab3852012-12-21 20:23:33 +00002347 return DM_MAPIO_SUBMITTED;
Joe Thornbere49e5822012-07-27 15:08:16 +01002348
2349 default:
2350 /*
2351 * Must always call bio_io_error on failure.
2352 * dm_thin_find_block can fail with -EINVAL if the
2353 * pool is switched to fail-io mode.
2354 */
2355 bio_io_error(bio);
Joe Thornbera374bb22014-10-10 13:43:14 +01002356 cell_defer_no_holder(tc, virt_cell);
Joe Thornber2aab3852012-12-21 20:23:33 +00002357 return DM_MAPIO_SUBMITTED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002358 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002359}
2360
2361static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2362{
Joe Thornber991d9fa2011-10-31 20:21:18 +00002363 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
Mike Snitzer760fe672014-03-20 08:36:47 -04002364 struct request_queue *q;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002365
Mike Snitzer760fe672014-03-20 08:36:47 -04002366 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2367 return 1;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002368
Mike Snitzer760fe672014-03-20 08:36:47 -04002369 q = bdev_get_queue(pt->data_dev->bdev);
2370 return bdi_congested(&q->backing_dev_info, bdi_bits);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002371}
2372
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002373static void requeue_bios(struct pool *pool)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002374{
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002375 unsigned long flags;
2376 struct thin_c *tc;
2377
2378 rcu_read_lock();
2379 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2380 spin_lock_irqsave(&tc->lock, flags);
2381 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2382 bio_list_init(&tc->retry_on_resume_list);
2383 spin_unlock_irqrestore(&tc->lock, flags);
2384 }
2385 rcu_read_unlock();
Joe Thornber991d9fa2011-10-31 20:21:18 +00002386}
2387
2388/*----------------------------------------------------------------
2389 * Binding of control targets to a pool object
2390 *--------------------------------------------------------------*/
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002391static bool data_dev_supports_discard(struct pool_c *pt)
2392{
2393 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2394
2395 return q && blk_queue_discard(q);
2396}
2397
Joe Thornber58051b92013-03-20 17:21:25 +00002398static bool is_factor(sector_t block_size, uint32_t n)
2399{
2400 return !sector_div(block_size, n);
2401}
2402
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002403/*
2404 * If discard_passdown was enabled verify that the data device
Mike Snitzer0424caa2012-09-26 23:45:47 +01002405 * supports discards. Disable discard_passdown if not.
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002406 */
Mike Snitzer0424caa2012-09-26 23:45:47 +01002407static void disable_passdown_if_not_supported(struct pool_c *pt)
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002408{
Mike Snitzer0424caa2012-09-26 23:45:47 +01002409 struct pool *pool = pt->pool;
2410 struct block_device *data_bdev = pt->data_dev->bdev;
2411 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2412 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
2413 const char *reason = NULL;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002414 char buf[BDEVNAME_SIZE];
2415
Mike Snitzer0424caa2012-09-26 23:45:47 +01002416 if (!pt->adjusted_pf.discard_passdown)
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002417 return;
2418
Mike Snitzer0424caa2012-09-26 23:45:47 +01002419 if (!data_dev_supports_discard(pt))
2420 reason = "discard unsupported";
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002421
Mike Snitzer0424caa2012-09-26 23:45:47 +01002422 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2423 reason = "max discard sectors smaller than a block";
2424
2425 else if (data_limits->discard_granularity > block_size)
2426 reason = "discard granularity larger than a block";
2427
Joe Thornber58051b92013-03-20 17:21:25 +00002428 else if (!is_factor(block_size, data_limits->discard_granularity))
Mike Snitzer0424caa2012-09-26 23:45:47 +01002429 reason = "discard granularity not a factor of block size";
2430
2431 if (reason) {
2432 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2433 pt->adjusted_pf.discard_passdown = false;
2434 }
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002435}
2436
Joe Thornber991d9fa2011-10-31 20:21:18 +00002437static int bind_control_target(struct pool *pool, struct dm_target *ti)
2438{
2439 struct pool_c *pt = ti->private;
2440
Joe Thornbere49e5822012-07-27 15:08:16 +01002441 /*
Joe Thornber9b7aaa62013-12-04 16:58:19 -05002442 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
Joe Thornbere49e5822012-07-27 15:08:16 +01002443 */
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002444 enum pool_mode old_mode = get_pool_mode(pool);
Mike Snitzer0424caa2012-09-26 23:45:47 +01002445 enum pool_mode new_mode = pt->adjusted_pf.mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01002446
Joe Thornber9b7aaa62013-12-04 16:58:19 -05002447 /*
Mike Snitzer8b64e882013-12-20 14:27:28 -05002448 * Don't change the pool's mode until set_pool_mode() below.
2449 * Otherwise the pool's process_* function pointers may
2450 * not match the desired pool mode.
2451 */
2452 pt->adjusted_pf.mode = old_mode;
2453
2454 pool->ti = ti;
2455 pool->pf = pt->adjusted_pf;
2456 pool->low_water_blocks = pt->low_water_blocks;
2457
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002458 set_pool_mode(pool, new_mode);
Mike Snitzerf4026932012-05-19 01:01:01 +01002459
Joe Thornber991d9fa2011-10-31 20:21:18 +00002460 return 0;
2461}
2462
2463static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2464{
2465 if (pool->ti == ti)
2466 pool->ti = NULL;
2467}
2468
2469/*----------------------------------------------------------------
2470 * Pool creation
2471 *--------------------------------------------------------------*/
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002472/* Initialize pool features. */
2473static void pool_features_init(struct pool_features *pf)
2474{
Joe Thornbere49e5822012-07-27 15:08:16 +01002475 pf->mode = PM_WRITE;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002476 pf->zero_new_blocks = true;
2477 pf->discard_enabled = true;
2478 pf->discard_passdown = true;
Mike Snitzer787a996c2013-12-06 16:21:43 -05002479 pf->error_if_no_space = false;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002480}
2481
Joe Thornber991d9fa2011-10-31 20:21:18 +00002482static void __pool_destroy(struct pool *pool)
2483{
2484 __pool_table_remove(pool);
2485
2486 if (dm_pool_metadata_close(pool->pmd) < 0)
2487 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2488
Mike Snitzer44feb382012-10-12 21:02:10 +01002489 dm_bio_prison_destroy(pool->prison);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002490 dm_kcopyd_client_destroy(pool->copier);
2491
2492 if (pool->wq)
2493 destroy_workqueue(pool->wq);
2494
2495 if (pool->next_mapping)
2496 mempool_free(pool->next_mapping, pool->mapping_pool);
2497 mempool_destroy(pool->mapping_pool);
Mike Snitzer44feb382012-10-12 21:02:10 +01002498 dm_deferred_set_destroy(pool->shared_read_ds);
2499 dm_deferred_set_destroy(pool->all_io_ds);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002500 kfree(pool);
2501}
2502
Mike Snitzera24c2562012-06-03 00:30:00 +01002503static struct kmem_cache *_new_mapping_cache;
Mike Snitzera24c2562012-06-03 00:30:00 +01002504
Joe Thornber991d9fa2011-10-31 20:21:18 +00002505static struct pool *pool_create(struct mapped_device *pool_md,
2506 struct block_device *metadata_dev,
Joe Thornbere49e5822012-07-27 15:08:16 +01002507 unsigned long block_size,
2508 int read_only, char **error)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002509{
2510 int r;
2511 void *err_p;
2512 struct pool *pool;
2513 struct dm_pool_metadata *pmd;
Joe Thornbere49e5822012-07-27 15:08:16 +01002514 bool format_device = read_only ? false : true;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002515
Joe Thornbere49e5822012-07-27 15:08:16 +01002516 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002517 if (IS_ERR(pmd)) {
2518 *error = "Error creating metadata object";
2519 return (struct pool *)pmd;
2520 }
2521
2522 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2523 if (!pool) {
2524 *error = "Error allocating memory for pool";
2525 err_p = ERR_PTR(-ENOMEM);
2526 goto bad_pool;
2527 }
2528
2529 pool->pmd = pmd;
2530 pool->sectors_per_block = block_size;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +01002531 if (block_size & (block_size - 1))
2532 pool->sectors_per_block_shift = -1;
2533 else
2534 pool->sectors_per_block_shift = __ffs(block_size);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002535 pool->low_water_blocks = 0;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002536 pool_features_init(&pool->pf);
Joe Thornbera195db22014-10-06 16:30:06 -04002537 pool->prison = dm_bio_prison_create();
Joe Thornber991d9fa2011-10-31 20:21:18 +00002538 if (!pool->prison) {
2539 *error = "Error creating pool's bio prison";
2540 err_p = ERR_PTR(-ENOMEM);
2541 goto bad_prison;
2542 }
2543
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00002544 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002545 if (IS_ERR(pool->copier)) {
2546 r = PTR_ERR(pool->copier);
2547 *error = "Error creating pool's kcopyd client";
2548 err_p = ERR_PTR(r);
2549 goto bad_kcopyd_client;
2550 }
2551
2552 /*
2553 * Create singlethreaded workqueue that will service all devices
2554 * that use this metadata.
2555 */
2556 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2557 if (!pool->wq) {
2558 *error = "Error creating pool's workqueue";
2559 err_p = ERR_PTR(-ENOMEM);
2560 goto bad_wq;
2561 }
2562
Joe Thornber7d327fe2014-10-06 15:45:59 +01002563 throttle_init(&pool->throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002564 INIT_WORK(&pool->worker, do_worker);
Joe Thornber905e51b2012-03-28 18:41:27 +01002565 INIT_DELAYED_WORK(&pool->waker, do_waker);
Joe Thornber85ad6432014-05-09 15:59:38 +01002566 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002567 spin_lock_init(&pool->lock);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002568 bio_list_init(&pool->deferred_flush_bios);
2569 INIT_LIST_HEAD(&pool->prepared_mappings);
Joe Thornber104655f2012-03-28 18:41:28 +01002570 INIT_LIST_HEAD(&pool->prepared_discards);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002571 INIT_LIST_HEAD(&pool->active_thins);
Joe Thornber88a66212013-12-04 20:16:12 -05002572 pool->low_water_triggered = false;
Mike Snitzer44feb382012-10-12 21:02:10 +01002573
2574 pool->shared_read_ds = dm_deferred_set_create();
2575 if (!pool->shared_read_ds) {
2576 *error = "Error creating pool's shared read deferred set";
2577 err_p = ERR_PTR(-ENOMEM);
2578 goto bad_shared_read_ds;
2579 }
2580
2581 pool->all_io_ds = dm_deferred_set_create();
2582 if (!pool->all_io_ds) {
2583 *error = "Error creating pool's all io deferred set";
2584 err_p = ERR_PTR(-ENOMEM);
2585 goto bad_all_io_ds;
2586 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002587
2588 pool->next_mapping = NULL;
Mike Snitzera24c2562012-06-03 00:30:00 +01002589 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2590 _new_mapping_cache);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002591 if (!pool->mapping_pool) {
2592 *error = "Error creating pool's mapping mempool";
2593 err_p = ERR_PTR(-ENOMEM);
2594 goto bad_mapping_pool;
2595 }
2596
Joe Thornber991d9fa2011-10-31 20:21:18 +00002597 pool->ref_count = 1;
Joe Thornber905e51b2012-03-28 18:41:27 +01002598 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002599 pool->pool_md = pool_md;
2600 pool->md_dev = metadata_dev;
2601 __pool_table_insert(pool);
2602
2603 return pool;
2604
Joe Thornber991d9fa2011-10-31 20:21:18 +00002605bad_mapping_pool:
Mike Snitzer44feb382012-10-12 21:02:10 +01002606 dm_deferred_set_destroy(pool->all_io_ds);
2607bad_all_io_ds:
2608 dm_deferred_set_destroy(pool->shared_read_ds);
2609bad_shared_read_ds:
Joe Thornber991d9fa2011-10-31 20:21:18 +00002610 destroy_workqueue(pool->wq);
2611bad_wq:
2612 dm_kcopyd_client_destroy(pool->copier);
2613bad_kcopyd_client:
Mike Snitzer44feb382012-10-12 21:02:10 +01002614 dm_bio_prison_destroy(pool->prison);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002615bad_prison:
2616 kfree(pool);
2617bad_pool:
2618 if (dm_pool_metadata_close(pmd))
2619 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2620
2621 return err_p;
2622}
2623
2624static void __pool_inc(struct pool *pool)
2625{
2626 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2627 pool->ref_count++;
2628}
2629
2630static void __pool_dec(struct pool *pool)
2631{
2632 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2633 BUG_ON(!pool->ref_count);
2634 if (!--pool->ref_count)
2635 __pool_destroy(pool);
2636}
2637
2638static struct pool *__pool_find(struct mapped_device *pool_md,
2639 struct block_device *metadata_dev,
Joe Thornbere49e5822012-07-27 15:08:16 +01002640 unsigned long block_size, int read_only,
2641 char **error, int *created)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002642{
2643 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2644
2645 if (pool) {
Mike Snitzerf09996c2012-07-27 15:07:59 +01002646 if (pool->pool_md != pool_md) {
2647 *error = "metadata device already in use by a pool";
Joe Thornber991d9fa2011-10-31 20:21:18 +00002648 return ERR_PTR(-EBUSY);
Mike Snitzerf09996c2012-07-27 15:07:59 +01002649 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002650 __pool_inc(pool);
2651
2652 } else {
2653 pool = __pool_table_lookup(pool_md);
2654 if (pool) {
Mike Snitzerf09996c2012-07-27 15:07:59 +01002655 if (pool->md_dev != metadata_dev) {
2656 *error = "different pool cannot replace a pool";
Joe Thornber991d9fa2011-10-31 20:21:18 +00002657 return ERR_PTR(-EINVAL);
Mike Snitzerf09996c2012-07-27 15:07:59 +01002658 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002659 __pool_inc(pool);
2660
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002661 } else {
Joe Thornbere49e5822012-07-27 15:08:16 +01002662 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002663 *created = 1;
2664 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002665 }
2666
2667 return pool;
2668}
2669
2670/*----------------------------------------------------------------
2671 * Pool target methods
2672 *--------------------------------------------------------------*/
2673static void pool_dtr(struct dm_target *ti)
2674{
2675 struct pool_c *pt = ti->private;
2676
2677 mutex_lock(&dm_thin_pool_table.mutex);
2678
2679 unbind_control_target(pt->pool, ti);
2680 __pool_dec(pt->pool);
2681 dm_put_device(ti, pt->metadata_dev);
2682 dm_put_device(ti, pt->data_dev);
2683 kfree(pt);
2684
2685 mutex_unlock(&dm_thin_pool_table.mutex);
2686}
2687
Joe Thornber991d9fa2011-10-31 20:21:18 +00002688static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2689 struct dm_target *ti)
2690{
2691 int r;
2692 unsigned argc;
2693 const char *arg_name;
2694
2695 static struct dm_arg _args[] = {
Mike Snitzer74aa45c2014-01-15 19:07:58 -05002696 {0, 4, "Invalid number of pool feature arguments"},
Joe Thornber991d9fa2011-10-31 20:21:18 +00002697 };
2698
2699 /*
2700 * No feature arguments supplied.
2701 */
2702 if (!as->argc)
2703 return 0;
2704
2705 r = dm_read_arg_group(_args, as, &argc, &ti->error);
2706 if (r)
2707 return -EINVAL;
2708
2709 while (argc && !r) {
2710 arg_name = dm_shift_arg(as);
2711 argc--;
2712
Joe Thornbere49e5822012-07-27 15:08:16 +01002713 if (!strcasecmp(arg_name, "skip_block_zeroing"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002714 pf->zero_new_blocks = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002715
Joe Thornbere49e5822012-07-27 15:08:16 +01002716 else if (!strcasecmp(arg_name, "ignore_discard"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002717 pf->discard_enabled = false;
Joe Thornbere49e5822012-07-27 15:08:16 +01002718
2719 else if (!strcasecmp(arg_name, "no_discard_passdown"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002720 pf->discard_passdown = false;
Joe Thornbere49e5822012-07-27 15:08:16 +01002721
2722 else if (!strcasecmp(arg_name, "read_only"))
2723 pf->mode = PM_READ_ONLY;
2724
Mike Snitzer787a996c2013-12-06 16:21:43 -05002725 else if (!strcasecmp(arg_name, "error_if_no_space"))
2726 pf->error_if_no_space = true;
2727
Joe Thornbere49e5822012-07-27 15:08:16 +01002728 else {
2729 ti->error = "Unrecognised pool feature requested";
2730 r = -EINVAL;
2731 break;
2732 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002733 }
2734
2735 return r;
2736}
2737
Joe Thornberac8c3f32013-05-10 14:37:21 +01002738static void metadata_low_callback(void *context)
2739{
2740 struct pool *pool = context;
2741
2742 DMWARN("%s: reached low water mark for metadata device: sending event.",
2743 dm_device_name(pool->pool_md));
2744
2745 dm_table_event(pool->ti->table);
2746}
2747
Mike Snitzer7d489352014-02-12 23:58:15 -05002748static sector_t get_dev_size(struct block_device *bdev)
Joe Thornberb17446d2013-05-10 14:37:18 +01002749{
Mike Snitzer7d489352014-02-12 23:58:15 -05002750 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2751}
2752
2753static void warn_if_metadata_device_too_big(struct block_device *bdev)
2754{
2755 sector_t metadata_dev_size = get_dev_size(bdev);
Joe Thornberb17446d2013-05-10 14:37:18 +01002756 char buffer[BDEVNAME_SIZE];
2757
Mike Snitzer7d489352014-02-12 23:58:15 -05002758 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
Joe Thornberb17446d2013-05-10 14:37:18 +01002759 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2760 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
Mike Snitzer7d489352014-02-12 23:58:15 -05002761}
2762
2763static sector_t get_metadata_dev_size(struct block_device *bdev)
2764{
2765 sector_t metadata_dev_size = get_dev_size(bdev);
2766
2767 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2768 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
Joe Thornberb17446d2013-05-10 14:37:18 +01002769
2770 return metadata_dev_size;
2771}
2772
Joe Thornber24347e92013-05-10 14:37:19 +01002773static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2774{
2775 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2776
Mike Snitzer7d489352014-02-12 23:58:15 -05002777 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
Joe Thornber24347e92013-05-10 14:37:19 +01002778
2779 return metadata_dev_size;
2780}
2781
Joe Thornber991d9fa2011-10-31 20:21:18 +00002782/*
Joe Thornberac8c3f32013-05-10 14:37:21 +01002783 * When a metadata threshold is crossed a dm event is triggered, and
2784 * userland should respond by growing the metadata device. We could let
2785 * userland set the threshold, like we do with the data threshold, but I'm
2786 * not sure they know enough to do this well.
2787 */
2788static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2789{
2790 /*
2791 * 4M is ample for all ops with the possible exception of thin
2792 * device deletion which is harmless if it fails (just retry the
2793 * delete after you've grown the device).
2794 */
2795 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2796 return min((dm_block_t)1024ULL /* 4M */, quarter);
2797}
2798
2799/*
Joe Thornber991d9fa2011-10-31 20:21:18 +00002800 * thin-pool <metadata dev> <data dev>
2801 * <data block size (sectors)>
2802 * <low water mark (blocks)>
2803 * [<#feature args> [<arg>]*]
2804 *
2805 * Optional feature arguments are:
2806 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002807 * ignore_discard: disable discard
2808 * no_discard_passdown: don't pass discards down to the data device
Mike Snitzer787a996c2013-12-06 16:21:43 -05002809 * read_only: Don't allow any changes to be made to the pool metadata.
2810 * error_if_no_space: error IOs, instead of queueing, if no space.
Joe Thornber991d9fa2011-10-31 20:21:18 +00002811 */
2812static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2813{
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002814 int r, pool_created = 0;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002815 struct pool_c *pt;
2816 struct pool *pool;
2817 struct pool_features pf;
2818 struct dm_arg_set as;
2819 struct dm_dev *data_dev;
2820 unsigned long block_size;
2821 dm_block_t low_water_blocks;
2822 struct dm_dev *metadata_dev;
Joe Thornber5d0db962013-05-10 14:37:19 +01002823 fmode_t metadata_mode;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002824
2825 /*
2826 * FIXME Remove validation from scope of lock.
2827 */
2828 mutex_lock(&dm_thin_pool_table.mutex);
2829
2830 if (argc < 4) {
2831 ti->error = "Invalid argument count";
2832 r = -EINVAL;
2833 goto out_unlock;
2834 }
Joe Thornber5d0db962013-05-10 14:37:19 +01002835
Joe Thornber991d9fa2011-10-31 20:21:18 +00002836 as.argc = argc;
2837 as.argv = argv;
2838
Joe Thornber5d0db962013-05-10 14:37:19 +01002839 /*
2840 * Set default pool features.
2841 */
2842 pool_features_init(&pf);
2843
2844 dm_consume_args(&as, 4);
2845 r = parse_pool_features(&as, &pf, ti);
2846 if (r)
2847 goto out_unlock;
2848
2849 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2850 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002851 if (r) {
2852 ti->error = "Error opening metadata block device";
2853 goto out_unlock;
2854 }
Mike Snitzer7d489352014-02-12 23:58:15 -05002855 warn_if_metadata_device_too_big(metadata_dev->bdev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002856
2857 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2858 if (r) {
2859 ti->error = "Error getting data device";
2860 goto out_metadata;
2861 }
2862
2863 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2864 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2865 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01002866 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00002867 ti->error = "Invalid block size";
2868 r = -EINVAL;
2869 goto out;
2870 }
2871
2872 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2873 ti->error = "Invalid low water mark";
2874 r = -EINVAL;
2875 goto out;
2876 }
2877
Joe Thornber991d9fa2011-10-31 20:21:18 +00002878 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2879 if (!pt) {
2880 r = -ENOMEM;
2881 goto out;
2882 }
2883
2884 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
Joe Thornbere49e5822012-07-27 15:08:16 +01002885 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002886 if (IS_ERR(pool)) {
2887 r = PTR_ERR(pool);
2888 goto out_free_pt;
2889 }
2890
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002891 /*
2892 * 'pool_created' reflects whether this is the first table load.
2893 * Top level discard support is not allowed to be changed after
2894 * initial load. This would require a pool reload to trigger thin
2895 * device changes.
2896 */
2897 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2898 ti->error = "Discard support cannot be disabled once enabled";
2899 r = -EINVAL;
2900 goto out_flags_changed;
2901 }
2902
Joe Thornber991d9fa2011-10-31 20:21:18 +00002903 pt->pool = pool;
2904 pt->ti = ti;
2905 pt->metadata_dev = metadata_dev;
2906 pt->data_dev = data_dev;
2907 pt->low_water_blocks = low_water_blocks;
Mike Snitzer0424caa2012-09-26 23:45:47 +01002908 pt->adjusted_pf = pt->requested_pf = pf;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002909 ti->num_flush_bios = 1;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002910
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002911 /*
2912 * Only need to enable discards if the pool should pass
2913 * them down to the data device. The thin device's discard
2914 * processing will cause mappings to be removed from the btree.
2915 */
Mike Snitzerb60ab992013-09-19 18:49:11 -04002916 ti->discard_zeroes_data_unsupported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002917 if (pf.discard_enabled && pf.discard_passdown) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002918 ti->num_discard_bios = 1;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002919
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002920 /*
2921 * Setting 'discards_supported' circumvents the normal
2922 * stacking of discard limits (this keeps the pool and
2923 * thin devices' discard limits consistent).
2924 */
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01002925 ti->discards_supported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002926 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002927 ti->private = pt;
2928
Joe Thornberac8c3f32013-05-10 14:37:21 +01002929 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2930 calc_metadata_threshold(pt),
2931 metadata_low_callback,
2932 pool);
2933 if (r)
2934 goto out_free_pt;
2935
Joe Thornber991d9fa2011-10-31 20:21:18 +00002936 pt->callbacks.congested_fn = pool_is_congested;
2937 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2938
2939 mutex_unlock(&dm_thin_pool_table.mutex);
2940
2941 return 0;
2942
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002943out_flags_changed:
2944 __pool_dec(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002945out_free_pt:
2946 kfree(pt);
2947out:
2948 dm_put_device(ti, data_dev);
2949out_metadata:
2950 dm_put_device(ti, metadata_dev);
2951out_unlock:
2952 mutex_unlock(&dm_thin_pool_table.mutex);
2953
2954 return r;
2955}
2956
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002957static int pool_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002958{
2959 int r;
2960 struct pool_c *pt = ti->private;
2961 struct pool *pool = pt->pool;
2962 unsigned long flags;
2963
2964 /*
2965 * As this is a singleton target, ti->begin is always zero.
2966 */
2967 spin_lock_irqsave(&pool->lock, flags);
2968 bio->bi_bdev = pt->data_dev->bdev;
2969 r = DM_MAPIO_REMAPPED;
2970 spin_unlock_irqrestore(&pool->lock, flags);
2971
2972 return r;
2973}
2974
Joe Thornberb17446d2013-05-10 14:37:18 +01002975static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2976{
2977 int r;
2978 struct pool_c *pt = ti->private;
2979 struct pool *pool = pt->pool;
2980 sector_t data_size = ti->len;
2981 dm_block_t sb_data_size;
2982
2983 *need_commit = false;
2984
2985 (void) sector_div(data_size, pool->sectors_per_block);
2986
2987 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2988 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002989 DMERR("%s: failed to retrieve data device size",
2990 dm_device_name(pool->pool_md));
Joe Thornberb17446d2013-05-10 14:37:18 +01002991 return r;
2992 }
2993
2994 if (data_size < sb_data_size) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002995 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2996 dm_device_name(pool->pool_md),
Joe Thornberb17446d2013-05-10 14:37:18 +01002997 (unsigned long long)data_size, sb_data_size);
2998 return -EINVAL;
2999
3000 } else if (data_size > sb_data_size) {
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05003001 if (dm_pool_metadata_needs_check(pool->pmd)) {
3002 DMERR("%s: unable to grow the data device until repaired.",
3003 dm_device_name(pool->pool_md));
3004 return 0;
3005 }
3006
Mike Snitzer6f7f51d2013-12-04 10:25:53 -05003007 if (sb_data_size)
3008 DMINFO("%s: growing the data device from %llu to %llu blocks",
3009 dm_device_name(pool->pool_md),
3010 sb_data_size, (unsigned long long)data_size);
Joe Thornberb17446d2013-05-10 14:37:18 +01003011 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3012 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05003013 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
Joe Thornberb17446d2013-05-10 14:37:18 +01003014 return r;
3015 }
3016
3017 *need_commit = true;
3018 }
3019
3020 return 0;
3021}
3022
Joe Thornber24347e92013-05-10 14:37:19 +01003023static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3024{
3025 int r;
3026 struct pool_c *pt = ti->private;
3027 struct pool *pool = pt->pool;
3028 dm_block_t metadata_dev_size, sb_metadata_dev_size;
3029
3030 *need_commit = false;
3031
Alasdair G Kergon610bba82013-05-19 18:57:50 +01003032 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
Joe Thornber24347e92013-05-10 14:37:19 +01003033
3034 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3035 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003036 DMERR("%s: failed to retrieve metadata device size",
3037 dm_device_name(pool->pool_md));
Joe Thornber24347e92013-05-10 14:37:19 +01003038 return r;
3039 }
3040
3041 if (metadata_dev_size < sb_metadata_dev_size) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003042 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3043 dm_device_name(pool->pool_md),
Joe Thornber24347e92013-05-10 14:37:19 +01003044 metadata_dev_size, sb_metadata_dev_size);
3045 return -EINVAL;
3046
3047 } else if (metadata_dev_size > sb_metadata_dev_size) {
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05003048 if (dm_pool_metadata_needs_check(pool->pmd)) {
3049 DMERR("%s: unable to grow the metadata device until repaired.",
3050 dm_device_name(pool->pool_md));
3051 return 0;
3052 }
3053
Mike Snitzer7d489352014-02-12 23:58:15 -05003054 warn_if_metadata_device_too_big(pool->md_dev);
Mike Snitzer6f7f51d2013-12-04 10:25:53 -05003055 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3056 dm_device_name(pool->pool_md),
3057 sb_metadata_dev_size, metadata_dev_size);
Joe Thornber24347e92013-05-10 14:37:19 +01003058 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3059 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05003060 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
Joe Thornber24347e92013-05-10 14:37:19 +01003061 return r;
3062 }
3063
3064 *need_commit = true;
3065 }
3066
3067 return 0;
3068}
3069
Joe Thornber991d9fa2011-10-31 20:21:18 +00003070/*
3071 * Retrieves the number of blocks of the data device from
3072 * the superblock and compares it to the actual device size,
3073 * thus resizing the data device in case it has grown.
3074 *
3075 * This both copes with opening preallocated data devices in the ctr
3076 * being followed by a resume
3077 * -and-
3078 * calling the resume method individually after userspace has
3079 * grown the data device in reaction to a table event.
3080 */
3081static int pool_preresume(struct dm_target *ti)
3082{
3083 int r;
Joe Thornber24347e92013-05-10 14:37:19 +01003084 bool need_commit1, need_commit2;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003085 struct pool_c *pt = ti->private;
3086 struct pool *pool = pt->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003087
3088 /*
3089 * Take control of the pool object.
3090 */
3091 r = bind_control_target(pool, ti);
3092 if (r)
3093 return r;
3094
Joe Thornberb17446d2013-05-10 14:37:18 +01003095 r = maybe_resize_data_dev(ti, &need_commit1);
3096 if (r)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003097 return r;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003098
Joe Thornber24347e92013-05-10 14:37:19 +01003099 r = maybe_resize_metadata_dev(ti, &need_commit2);
3100 if (r)
3101 return r;
3102
3103 if (need_commit1 || need_commit2)
Joe Thornber020cc3b2013-12-04 15:05:36 -05003104 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003105
3106 return 0;
3107}
3108
3109static void pool_resume(struct dm_target *ti)
3110{
3111 struct pool_c *pt = ti->private;
3112 struct pool *pool = pt->pool;
3113 unsigned long flags;
3114
3115 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber88a66212013-12-04 20:16:12 -05003116 pool->low_water_triggered = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003117 spin_unlock_irqrestore(&pool->lock, flags);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003118 requeue_bios(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003119
Joe Thornber905e51b2012-03-28 18:41:27 +01003120 do_waker(&pool->waker.work);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003121}
3122
3123static void pool_postsuspend(struct dm_target *ti)
3124{
Joe Thornber991d9fa2011-10-31 20:21:18 +00003125 struct pool_c *pt = ti->private;
3126 struct pool *pool = pt->pool;
3127
Joe Thornber905e51b2012-03-28 18:41:27 +01003128 cancel_delayed_work(&pool->waker);
Joe Thornber85ad6432014-05-09 15:59:38 +01003129 cancel_delayed_work(&pool->no_space_timeout);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003130 flush_workqueue(pool->wq);
Joe Thornber020cc3b2013-12-04 15:05:36 -05003131 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003132}
3133
3134static int check_arg_count(unsigned argc, unsigned args_required)
3135{
3136 if (argc != args_required) {
3137 DMWARN("Message received with %u arguments instead of %u.",
3138 argc, args_required);
3139 return -EINVAL;
3140 }
3141
3142 return 0;
3143}
3144
3145static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3146{
3147 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3148 *dev_id <= MAX_DEV_ID)
3149 return 0;
3150
3151 if (warning)
3152 DMWARN("Message received with invalid device id: %s", arg);
3153
3154 return -EINVAL;
3155}
3156
3157static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3158{
3159 dm_thin_id dev_id;
3160 int r;
3161
3162 r = check_arg_count(argc, 2);
3163 if (r)
3164 return r;
3165
3166 r = read_dev_id(argv[1], &dev_id, 1);
3167 if (r)
3168 return r;
3169
3170 r = dm_pool_create_thin(pool->pmd, dev_id);
3171 if (r) {
3172 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3173 argv[1]);
3174 return r;
3175 }
3176
3177 return 0;
3178}
3179
3180static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3181{
3182 dm_thin_id dev_id;
3183 dm_thin_id origin_dev_id;
3184 int r;
3185
3186 r = check_arg_count(argc, 3);
3187 if (r)
3188 return r;
3189
3190 r = read_dev_id(argv[1], &dev_id, 1);
3191 if (r)
3192 return r;
3193
3194 r = read_dev_id(argv[2], &origin_dev_id, 1);
3195 if (r)
3196 return r;
3197
3198 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3199 if (r) {
3200 DMWARN("Creation of new snapshot %s of device %s failed.",
3201 argv[1], argv[2]);
3202 return r;
3203 }
3204
3205 return 0;
3206}
3207
3208static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3209{
3210 dm_thin_id dev_id;
3211 int r;
3212
3213 r = check_arg_count(argc, 2);
3214 if (r)
3215 return r;
3216
3217 r = read_dev_id(argv[1], &dev_id, 1);
3218 if (r)
3219 return r;
3220
3221 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3222 if (r)
3223 DMWARN("Deletion of thin device %s failed.", argv[1]);
3224
3225 return r;
3226}
3227
3228static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3229{
3230 dm_thin_id old_id, new_id;
3231 int r;
3232
3233 r = check_arg_count(argc, 3);
3234 if (r)
3235 return r;
3236
3237 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3238 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3239 return -EINVAL;
3240 }
3241
3242 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3243 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3244 return -EINVAL;
3245 }
3246
3247 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3248 if (r) {
3249 DMWARN("Failed to change transaction id from %s to %s.",
3250 argv[1], argv[2]);
3251 return r;
3252 }
3253
3254 return 0;
3255}
3256
Joe Thornbercc8394d2012-06-03 00:30:01 +01003257static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3258{
3259 int r;
3260
3261 r = check_arg_count(argc, 1);
3262 if (r)
3263 return r;
3264
Joe Thornber020cc3b2013-12-04 15:05:36 -05003265 (void) commit(pool);
Joe Thornber0d200ae2012-07-03 12:55:31 +01003266
Joe Thornbercc8394d2012-06-03 00:30:01 +01003267 r = dm_pool_reserve_metadata_snap(pool->pmd);
3268 if (r)
3269 DMWARN("reserve_metadata_snap message failed.");
3270
3271 return r;
3272}
3273
3274static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3275{
3276 int r;
3277
3278 r = check_arg_count(argc, 1);
3279 if (r)
3280 return r;
3281
3282 r = dm_pool_release_metadata_snap(pool->pmd);
3283 if (r)
3284 DMWARN("release_metadata_snap message failed.");
3285
3286 return r;
3287}
3288
Joe Thornber991d9fa2011-10-31 20:21:18 +00003289/*
3290 * Messages supported:
3291 * create_thin <dev_id>
3292 * create_snap <dev_id> <origin_id>
3293 * delete <dev_id>
3294 * trim <dev_id> <new_size_in_sectors>
3295 * set_transaction_id <current_trans_id> <new_trans_id>
Joe Thornbercc8394d2012-06-03 00:30:01 +01003296 * reserve_metadata_snap
3297 * release_metadata_snap
Joe Thornber991d9fa2011-10-31 20:21:18 +00003298 */
3299static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3300{
3301 int r = -EINVAL;
3302 struct pool_c *pt = ti->private;
3303 struct pool *pool = pt->pool;
3304
3305 if (!strcasecmp(argv[0], "create_thin"))
3306 r = process_create_thin_mesg(argc, argv, pool);
3307
3308 else if (!strcasecmp(argv[0], "create_snap"))
3309 r = process_create_snap_mesg(argc, argv, pool);
3310
3311 else if (!strcasecmp(argv[0], "delete"))
3312 r = process_delete_mesg(argc, argv, pool);
3313
3314 else if (!strcasecmp(argv[0], "set_transaction_id"))
3315 r = process_set_transaction_id_mesg(argc, argv, pool);
3316
Joe Thornbercc8394d2012-06-03 00:30:01 +01003317 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3318 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3319
3320 else if (!strcasecmp(argv[0], "release_metadata_snap"))
3321 r = process_release_metadata_snap_mesg(argc, argv, pool);
3322
Joe Thornber991d9fa2011-10-31 20:21:18 +00003323 else
3324 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3325
Joe Thornbere49e5822012-07-27 15:08:16 +01003326 if (!r)
Joe Thornber020cc3b2013-12-04 15:05:36 -05003327 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003328
3329 return r;
3330}
3331
Joe Thornbere49e5822012-07-27 15:08:16 +01003332static void emit_flags(struct pool_features *pf, char *result,
3333 unsigned sz, unsigned maxlen)
3334{
3335 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
Mike Snitzer787a996c2013-12-06 16:21:43 -05003336 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3337 pf->error_if_no_space;
Joe Thornbere49e5822012-07-27 15:08:16 +01003338 DMEMIT("%u ", count);
3339
3340 if (!pf->zero_new_blocks)
3341 DMEMIT("skip_block_zeroing ");
3342
3343 if (!pf->discard_enabled)
3344 DMEMIT("ignore_discard ");
3345
3346 if (!pf->discard_passdown)
3347 DMEMIT("no_discard_passdown ");
3348
3349 if (pf->mode == PM_READ_ONLY)
3350 DMEMIT("read_only ");
Mike Snitzer787a996c2013-12-06 16:21:43 -05003351
3352 if (pf->error_if_no_space)
3353 DMEMIT("error_if_no_space ");
Joe Thornbere49e5822012-07-27 15:08:16 +01003354}
3355
Joe Thornber991d9fa2011-10-31 20:21:18 +00003356/*
3357 * Status line is:
3358 * <transaction id> <used metadata sectors>/<total metadata sectors>
3359 * <used data sectors>/<total data sectors> <held metadata root>
3360 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003361static void pool_status(struct dm_target *ti, status_type_t type,
3362 unsigned status_flags, char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003363{
Joe Thornbere49e5822012-07-27 15:08:16 +01003364 int r;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003365 unsigned sz = 0;
3366 uint64_t transaction_id;
3367 dm_block_t nr_free_blocks_data;
3368 dm_block_t nr_free_blocks_metadata;
3369 dm_block_t nr_blocks_data;
3370 dm_block_t nr_blocks_metadata;
3371 dm_block_t held_root;
3372 char buf[BDEVNAME_SIZE];
3373 char buf2[BDEVNAME_SIZE];
3374 struct pool_c *pt = ti->private;
3375 struct pool *pool = pt->pool;
3376
3377 switch (type) {
3378 case STATUSTYPE_INFO:
Joe Thornbere49e5822012-07-27 15:08:16 +01003379 if (get_pool_mode(pool) == PM_FAIL) {
3380 DMEMIT("Fail");
3381 break;
3382 }
3383
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01003384 /* Commit to ensure statistics aren't out-of-date */
3385 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
Joe Thornber020cc3b2013-12-04 15:05:36 -05003386 (void) commit(pool);
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01003387
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003388 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3389 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003390 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3391 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003392 goto err;
3393 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003394
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003395 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3396 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003397 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3398 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003399 goto err;
3400 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003401
3402 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003403 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003404 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3405 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003406 goto err;
3407 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003408
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003409 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3410 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003411 DMERR("%s: dm_pool_get_free_block_count returned %d",
3412 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003413 goto err;
3414 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003415
3416 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003417 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003418 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3419 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003420 goto err;
3421 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003422
Joe Thornbercc8394d2012-06-03 00:30:01 +01003423 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003424 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003425 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3426 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003427 goto err;
3428 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003429
3430 DMEMIT("%llu %llu/%llu %llu/%llu ",
3431 (unsigned long long)transaction_id,
3432 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3433 (unsigned long long)nr_blocks_metadata,
3434 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3435 (unsigned long long)nr_blocks_data);
3436
3437 if (held_root)
Joe Thornbere49e5822012-07-27 15:08:16 +01003438 DMEMIT("%llu ", held_root);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003439 else
Joe Thornbere49e5822012-07-27 15:08:16 +01003440 DMEMIT("- ");
3441
Joe Thornber3e1a0692014-03-03 16:03:26 +00003442 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3443 DMEMIT("out_of_data_space ");
3444 else if (pool->pf.mode == PM_READ_ONLY)
Joe Thornbere49e5822012-07-27 15:08:16 +01003445 DMEMIT("ro ");
3446 else
3447 DMEMIT("rw ");
3448
Mike Snitzer018debe2012-12-21 20:23:32 +00003449 if (!pool->pf.discard_enabled)
Mike Snitzer787a996c2013-12-06 16:21:43 -05003450 DMEMIT("ignore_discard ");
Mike Snitzer018debe2012-12-21 20:23:32 +00003451 else if (pool->pf.discard_passdown)
Mike Snitzer787a996c2013-12-06 16:21:43 -05003452 DMEMIT("discard_passdown ");
Joe Thornbere49e5822012-07-27 15:08:16 +01003453 else
Mike Snitzer787a996c2013-12-06 16:21:43 -05003454 DMEMIT("no_discard_passdown ");
3455
3456 if (pool->pf.error_if_no_space)
3457 DMEMIT("error_if_no_space ");
3458 else
3459 DMEMIT("queue_if_no_space ");
Joe Thornber991d9fa2011-10-31 20:21:18 +00003460
3461 break;
3462
3463 case STATUSTYPE_TABLE:
3464 DMEMIT("%s %s %lu %llu ",
3465 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3466 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3467 (unsigned long)pool->sectors_per_block,
3468 (unsigned long long)pt->low_water_blocks);
Mike Snitzer0424caa2012-09-26 23:45:47 +01003469 emit_flags(&pt->requested_pf, result, sz, maxlen);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003470 break;
3471 }
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003472 return;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003473
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003474err:
3475 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00003476}
3477
3478static int pool_iterate_devices(struct dm_target *ti,
3479 iterate_devices_callout_fn fn, void *data)
3480{
3481 struct pool_c *pt = ti->private;
3482
3483 return fn(ti, pt->data_dev, 0, ti->len, data);
3484}
3485
3486static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3487 struct bio_vec *biovec, int max_size)
3488{
3489 struct pool_c *pt = ti->private;
3490 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3491
3492 if (!q->merge_bvec_fn)
3493 return max_size;
3494
3495 bvm->bi_bdev = pt->data_dev->bdev;
3496
3497 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3498}
3499
Mike Snitzer0424caa2012-09-26 23:45:47 +01003500static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
Joe Thornber104655f2012-03-28 18:41:28 +01003501{
Mike Snitzer0424caa2012-09-26 23:45:47 +01003502 struct pool *pool = pt->pool;
3503 struct queue_limits *data_limits;
3504
Joe Thornber104655f2012-03-28 18:41:28 +01003505 limits->max_discard_sectors = pool->sectors_per_block;
3506
3507 /*
Mike Snitzer0424caa2012-09-26 23:45:47 +01003508 * discard_granularity is just a hint, and not enforced.
Joe Thornber104655f2012-03-28 18:41:28 +01003509 */
Mike Snitzer0424caa2012-09-26 23:45:47 +01003510 if (pt->adjusted_pf.discard_passdown) {
3511 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
Lukas Czerner09869de2014-06-11 12:28:43 -04003512 limits->discard_granularity = max(data_limits->discard_granularity,
3513 pool->sectors_per_block << SECTOR_SHIFT);
Mike Snitzerf13945d2013-03-01 22:45:44 +00003514 } else
Mike Snitzer0424caa2012-09-26 23:45:47 +01003515 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
Joe Thornber104655f2012-03-28 18:41:28 +01003516}
3517
Joe Thornber991d9fa2011-10-31 20:21:18 +00003518static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3519{
3520 struct pool_c *pt = ti->private;
3521 struct pool *pool = pt->pool;
Mike Snitzer604ea902014-10-09 18:43:25 -04003522 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3523
3524 /*
3525 * Adjust max_sectors_kb to highest possible power-of-2
3526 * factor of pool->sectors_per_block.
3527 */
3528 if (limits->max_hw_sectors & (limits->max_hw_sectors - 1))
3529 limits->max_sectors = rounddown_pow_of_two(limits->max_hw_sectors);
3530 else
3531 limits->max_sectors = limits->max_hw_sectors;
3532
3533 if (limits->max_sectors < pool->sectors_per_block) {
3534 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3535 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3536 limits->max_sectors--;
3537 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3538 }
3539 } else if (block_size_is_power_of_two(pool)) {
3540 /* max_sectors_kb is >= power-of-2 thinp blocksize */
3541 while (!is_factor(limits->max_sectors, pool->sectors_per_block)) {
3542 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3543 limits->max_sectors--;
3544 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3545 }
3546 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003547
Mike Snitzer0cc67cd2013-08-20 15:02:41 -04003548 /*
3549 * If the system-determined stacked limits are compatible with the
3550 * pool's blocksize (io_opt is a factor) do not override them.
3551 */
3552 if (io_opt_sectors < pool->sectors_per_block ||
Mike Snitzer604ea902014-10-09 18:43:25 -04003553 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3554 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3555 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3556 else
3557 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
Mike Snitzer0cc67cd2013-08-20 15:02:41 -04003558 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3559 }
Mike Snitzer0424caa2012-09-26 23:45:47 +01003560
3561 /*
3562 * pt->adjusted_pf is a staging area for the actual features to use.
3563 * They get transferred to the live pool in bind_control_target()
3564 * called from pool_preresume().
3565 */
Mike Snitzerb60ab992013-09-19 18:49:11 -04003566 if (!pt->adjusted_pf.discard_enabled) {
3567 /*
3568 * Must explicitly disallow stacking discard limits otherwise the
3569 * block layer will stack them if pool's data device has support.
3570 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3571 * user to see that, so make sure to set all discard limits to 0.
3572 */
3573 limits->discard_granularity = 0;
Mike Snitzer0424caa2012-09-26 23:45:47 +01003574 return;
Mike Snitzerb60ab992013-09-19 18:49:11 -04003575 }
Mike Snitzer0424caa2012-09-26 23:45:47 +01003576
3577 disable_passdown_if_not_supported(pt);
3578
3579 set_discard_limits(pt, limits);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003580}
3581
3582static struct target_type pool_target = {
3583 .name = "thin-pool",
3584 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3585 DM_TARGET_IMMUTABLE,
Mike Snitzer36f12aeb2014-10-09 15:24:12 -04003586 .version = {1, 14, 0},
Joe Thornber991d9fa2011-10-31 20:21:18 +00003587 .module = THIS_MODULE,
3588 .ctr = pool_ctr,
3589 .dtr = pool_dtr,
3590 .map = pool_map,
3591 .postsuspend = pool_postsuspend,
3592 .preresume = pool_preresume,
3593 .resume = pool_resume,
3594 .message = pool_message,
3595 .status = pool_status,
3596 .merge = pool_merge,
3597 .iterate_devices = pool_iterate_devices,
3598 .io_hints = pool_io_hints,
3599};
3600
3601/*----------------------------------------------------------------
3602 * Thin target methods
3603 *--------------------------------------------------------------*/
Joe Thornberb10ebd32014-04-08 11:29:01 +01003604static void thin_get(struct thin_c *tc)
3605{
3606 atomic_inc(&tc->refcount);
3607}
3608
3609static void thin_put(struct thin_c *tc)
3610{
3611 if (atomic_dec_and_test(&tc->refcount))
3612 complete(&tc->can_destroy);
3613}
3614
Joe Thornber991d9fa2011-10-31 20:21:18 +00003615static void thin_dtr(struct dm_target *ti)
3616{
3617 struct thin_c *tc = ti->private;
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003618 unsigned long flags;
3619
Joe Thornberb10ebd32014-04-08 11:29:01 +01003620 thin_put(tc);
3621 wait_for_completion(&tc->can_destroy);
3622
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003623 spin_lock_irqsave(&tc->pool->lock, flags);
3624 list_del_rcu(&tc->list);
3625 spin_unlock_irqrestore(&tc->pool->lock, flags);
3626 synchronize_rcu();
Joe Thornber991d9fa2011-10-31 20:21:18 +00003627
3628 mutex_lock(&dm_thin_pool_table.mutex);
3629
3630 __pool_dec(tc->pool);
3631 dm_pool_close_thin_device(tc->td);
3632 dm_put_device(ti, tc->pool_dev);
Joe Thornber2dd9c252012-03-28 18:41:28 +01003633 if (tc->origin_dev)
3634 dm_put_device(ti, tc->origin_dev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003635 kfree(tc);
3636
3637 mutex_unlock(&dm_thin_pool_table.mutex);
3638}
3639
3640/*
3641 * Thin target parameters:
3642 *
Joe Thornber2dd9c252012-03-28 18:41:28 +01003643 * <pool_dev> <dev_id> [origin_dev]
Joe Thornber991d9fa2011-10-31 20:21:18 +00003644 *
3645 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3646 * dev_id: the internal device identifier
Joe Thornber2dd9c252012-03-28 18:41:28 +01003647 * origin_dev: a device external to the pool that should act as the origin
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003648 *
3649 * If the pool device has discards disabled, they get disabled for the thin
3650 * device as well.
Joe Thornber991d9fa2011-10-31 20:21:18 +00003651 */
3652static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3653{
3654 int r;
3655 struct thin_c *tc;
Joe Thornber2dd9c252012-03-28 18:41:28 +01003656 struct dm_dev *pool_dev, *origin_dev;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003657 struct mapped_device *pool_md;
Joe Thornber5e3283e2014-04-08 11:08:41 +01003658 unsigned long flags;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003659
3660 mutex_lock(&dm_thin_pool_table.mutex);
3661
Joe Thornber2dd9c252012-03-28 18:41:28 +01003662 if (argc != 2 && argc != 3) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00003663 ti->error = "Invalid argument count";
3664 r = -EINVAL;
3665 goto out_unlock;
3666 }
3667
3668 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3669 if (!tc) {
3670 ti->error = "Out of memory";
3671 r = -ENOMEM;
3672 goto out_unlock;
3673 }
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003674 spin_lock_init(&tc->lock);
Joe Thornbera374bb22014-10-10 13:43:14 +01003675 INIT_LIST_HEAD(&tc->deferred_cells);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003676 bio_list_init(&tc->deferred_bio_list);
3677 bio_list_init(&tc->retry_on_resume_list);
Mike Snitzer67324ea2014-03-21 18:33:41 -04003678 tc->sort_bio_list = RB_ROOT;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003679
Joe Thornber2dd9c252012-03-28 18:41:28 +01003680 if (argc == 3) {
3681 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3682 if (r) {
3683 ti->error = "Error opening origin device";
3684 goto bad_origin_dev;
3685 }
3686 tc->origin_dev = origin_dev;
3687 }
3688
Joe Thornber991d9fa2011-10-31 20:21:18 +00003689 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3690 if (r) {
3691 ti->error = "Error opening pool device";
3692 goto bad_pool_dev;
3693 }
3694 tc->pool_dev = pool_dev;
3695
3696 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3697 ti->error = "Invalid device id";
3698 r = -EINVAL;
3699 goto bad_common;
3700 }
3701
3702 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3703 if (!pool_md) {
3704 ti->error = "Couldn't get pool mapped device";
3705 r = -EINVAL;
3706 goto bad_common;
3707 }
3708
3709 tc->pool = __pool_table_lookup(pool_md);
3710 if (!tc->pool) {
3711 ti->error = "Couldn't find pool object";
3712 r = -EINVAL;
3713 goto bad_pool_lookup;
3714 }
3715 __pool_inc(tc->pool);
3716
Joe Thornbere49e5822012-07-27 15:08:16 +01003717 if (get_pool_mode(tc->pool) == PM_FAIL) {
3718 ti->error = "Couldn't open thin device, Pool is in fail mode";
Mike Snitzer1acacc02014-02-19 20:32:33 -05003719 r = -EINVAL;
Joe Thornbere49e5822012-07-27 15:08:16 +01003720 goto bad_thin_open;
3721 }
3722
Joe Thornber991d9fa2011-10-31 20:21:18 +00003723 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3724 if (r) {
3725 ti->error = "Couldn't open thin internal device";
3726 goto bad_thin_open;
3727 }
3728
Mike Snitzer542f9032012-07-27 15:08:00 +01003729 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3730 if (r)
Mike Snitzer1acacc02014-02-19 20:32:33 -05003731 goto bad_target_max_io_len;
Mike Snitzer542f9032012-07-27 15:08:00 +01003732
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00003733 ti->num_flush_bios = 1;
Joe Thornber16ad3d12012-07-27 15:08:07 +01003734 ti->flush_supported = true;
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00003735 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003736
3737 /* In case the pool supports discards, pass them on. */
Mike Snitzerb60ab992013-09-19 18:49:11 -04003738 ti->discard_zeroes_data_unsupported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003739 if (tc->pool->pf.discard_enabled) {
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01003740 ti->discards_supported = true;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00003741 ti->num_discard_bios = 1;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00003742 /* Discard bios must be split on a block boundary */
3743 ti->split_discard_bios = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003744 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003745
3746 dm_put(pool_md);
3747
3748 mutex_unlock(&dm_thin_pool_table.mutex);
3749
Joe Thornberb10ebd32014-04-08 11:29:01 +01003750 atomic_set(&tc->refcount, 1);
3751 init_completion(&tc->can_destroy);
3752
Joe Thornber5e3283e2014-04-08 11:08:41 +01003753 spin_lock_irqsave(&tc->pool->lock, flags);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003754 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
Joe Thornber5e3283e2014-04-08 11:08:41 +01003755 spin_unlock_irqrestore(&tc->pool->lock, flags);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003756 /*
3757 * This synchronize_rcu() call is needed here otherwise we risk a
3758 * wake_worker() call finding no bios to process (because the newly
3759 * added tc isn't yet visible). So this reduces latency since we
3760 * aren't then dependent on the periodic commit to wake_worker().
3761 */
3762 synchronize_rcu();
3763
Joe Thornber991d9fa2011-10-31 20:21:18 +00003764 return 0;
3765
Mike Snitzer1acacc02014-02-19 20:32:33 -05003766bad_target_max_io_len:
3767 dm_pool_close_thin_device(tc->td);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003768bad_thin_open:
3769 __pool_dec(tc->pool);
3770bad_pool_lookup:
3771 dm_put(pool_md);
3772bad_common:
3773 dm_put_device(ti, tc->pool_dev);
3774bad_pool_dev:
Joe Thornber2dd9c252012-03-28 18:41:28 +01003775 if (tc->origin_dev)
3776 dm_put_device(ti, tc->origin_dev);
3777bad_origin_dev:
Joe Thornber991d9fa2011-10-31 20:21:18 +00003778 kfree(tc);
3779out_unlock:
3780 mutex_unlock(&dm_thin_pool_table.mutex);
3781
3782 return r;
3783}
3784
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00003785static int thin_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003786{
Kent Overstreet4f024f32013-10-11 15:44:27 -07003787 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003788
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00003789 return thin_bio_map(ti, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003790}
3791
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00003792static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
Joe Thornbereb2aa482012-03-28 18:41:28 +01003793{
3794 unsigned long flags;
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00003795 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01003796 struct list_head work;
Mike Snitzera24c2562012-06-03 00:30:00 +01003797 struct dm_thin_new_mapping *m, *tmp;
Joe Thornbereb2aa482012-03-28 18:41:28 +01003798 struct pool *pool = h->tc->pool;
3799
3800 if (h->shared_read_entry) {
3801 INIT_LIST_HEAD(&work);
Mike Snitzer44feb382012-10-12 21:02:10 +01003802 dm_deferred_entry_dec(h->shared_read_entry, &work);
Joe Thornbereb2aa482012-03-28 18:41:28 +01003803
3804 spin_lock_irqsave(&pool->lock, flags);
3805 list_for_each_entry_safe(m, tmp, &work, list) {
3806 list_del(&m->list);
Joe Thornber50f3c3e2014-06-13 13:57:09 +01003807 __complete_mapping_preparation(m);
Joe Thornbereb2aa482012-03-28 18:41:28 +01003808 }
3809 spin_unlock_irqrestore(&pool->lock, flags);
3810 }
3811
Joe Thornber104655f2012-03-28 18:41:28 +01003812 if (h->all_io_entry) {
3813 INIT_LIST_HEAD(&work);
Mike Snitzer44feb382012-10-12 21:02:10 +01003814 dm_deferred_entry_dec(h->all_io_entry, &work);
Joe Thornber563af182012-12-21 20:23:31 +00003815 if (!list_empty(&work)) {
3816 spin_lock_irqsave(&pool->lock, flags);
3817 list_for_each_entry_safe(m, tmp, &work, list)
Mike Snitzerdaec3382013-12-11 14:01:20 -05003818 list_add_tail(&m->list, &pool->prepared_discards);
Joe Thornber563af182012-12-21 20:23:31 +00003819 spin_unlock_irqrestore(&pool->lock, flags);
3820 wake_worker(pool);
3821 }
Joe Thornber104655f2012-03-28 18:41:28 +01003822 }
3823
Joe Thornbereb2aa482012-03-28 18:41:28 +01003824 return 0;
3825}
3826
Joe Thornber738211f2014-03-03 15:52:28 +00003827static void thin_presuspend(struct dm_target *ti)
3828{
3829 struct thin_c *tc = ti->private;
3830
3831 if (dm_noflush_suspending(ti))
3832 noflush_work(tc, do_noflush_start);
3833}
3834
Joe Thornber991d9fa2011-10-31 20:21:18 +00003835static void thin_postsuspend(struct dm_target *ti)
3836{
Joe Thornber738211f2014-03-03 15:52:28 +00003837 struct thin_c *tc = ti->private;
3838
3839 /*
3840 * The dm_noflush_suspending flag has been cleared by now, so
3841 * unfortunately we must always run this.
3842 */
3843 noflush_work(tc, do_noflush_stop);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003844}
3845
Joe Thornbere5aea7b2014-06-13 14:47:24 +01003846static int thin_preresume(struct dm_target *ti)
3847{
3848 struct thin_c *tc = ti->private;
3849
3850 if (tc->origin_dev)
3851 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
3852
3853 return 0;
3854}
3855
Joe Thornber991d9fa2011-10-31 20:21:18 +00003856/*
3857 * <nr mapped sectors> <highest mapped sector>
3858 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003859static void thin_status(struct dm_target *ti, status_type_t type,
3860 unsigned status_flags, char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003861{
3862 int r;
3863 ssize_t sz = 0;
3864 dm_block_t mapped, highest;
3865 char buf[BDEVNAME_SIZE];
3866 struct thin_c *tc = ti->private;
3867
Joe Thornbere49e5822012-07-27 15:08:16 +01003868 if (get_pool_mode(tc->pool) == PM_FAIL) {
3869 DMEMIT("Fail");
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003870 return;
Joe Thornbere49e5822012-07-27 15:08:16 +01003871 }
3872
Joe Thornber991d9fa2011-10-31 20:21:18 +00003873 if (!tc->td)
3874 DMEMIT("-");
3875 else {
3876 switch (type) {
3877 case STATUSTYPE_INFO:
3878 r = dm_thin_get_mapped_count(tc->td, &mapped);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003879 if (r) {
3880 DMERR("dm_thin_get_mapped_count returned %d", r);
3881 goto err;
3882 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003883
3884 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003885 if (r < 0) {
3886 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3887 goto err;
3888 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003889
3890 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3891 if (r)
3892 DMEMIT("%llu", ((highest + 1) *
3893 tc->pool->sectors_per_block) - 1);
3894 else
3895 DMEMIT("-");
3896 break;
3897
3898 case STATUSTYPE_TABLE:
3899 DMEMIT("%s %lu",
3900 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3901 (unsigned long) tc->dev_id);
Joe Thornber2dd9c252012-03-28 18:41:28 +01003902 if (tc->origin_dev)
3903 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
Joe Thornber991d9fa2011-10-31 20:21:18 +00003904 break;
3905 }
3906 }
3907
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003908 return;
3909
3910err:
3911 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00003912}
3913
Mike Snitzer36f12aeb2014-10-09 15:24:12 -04003914static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3915 struct bio_vec *biovec, int max_size)
3916{
3917 struct thin_c *tc = ti->private;
3918 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
3919
3920 if (!q->merge_bvec_fn)
3921 return max_size;
3922
3923 bvm->bi_bdev = tc->pool_dev->bdev;
3924 bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
3925
3926 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3927}
3928
Joe Thornber991d9fa2011-10-31 20:21:18 +00003929static int thin_iterate_devices(struct dm_target *ti,
3930 iterate_devices_callout_fn fn, void *data)
3931{
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003932 sector_t blocks;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003933 struct thin_c *tc = ti->private;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003934 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003935
3936 /*
3937 * We can't call dm_pool_get_data_dev_size() since that blocks. So
3938 * we follow a more convoluted path through to the pool's target.
3939 */
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003940 if (!pool->ti)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003941 return 0; /* nothing is bound */
3942
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003943 blocks = pool->ti->len;
3944 (void) sector_div(blocks, pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003945 if (blocks)
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003946 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003947
3948 return 0;
3949}
3950
Joe Thornber991d9fa2011-10-31 20:21:18 +00003951static struct target_type thin_target = {
3952 .name = "thin",
Mike Snitzer36f12aeb2014-10-09 15:24:12 -04003953 .version = {1, 14, 0},
Joe Thornber991d9fa2011-10-31 20:21:18 +00003954 .module = THIS_MODULE,
3955 .ctr = thin_ctr,
3956 .dtr = thin_dtr,
3957 .map = thin_map,
Joe Thornbereb2aa482012-03-28 18:41:28 +01003958 .end_io = thin_endio,
Joe Thornbere5aea7b2014-06-13 14:47:24 +01003959 .preresume = thin_preresume,
Joe Thornber738211f2014-03-03 15:52:28 +00003960 .presuspend = thin_presuspend,
Joe Thornber991d9fa2011-10-31 20:21:18 +00003961 .postsuspend = thin_postsuspend,
3962 .status = thin_status,
Mike Snitzer36f12aeb2014-10-09 15:24:12 -04003963 .merge = thin_merge,
Joe Thornber991d9fa2011-10-31 20:21:18 +00003964 .iterate_devices = thin_iterate_devices,
Joe Thornber991d9fa2011-10-31 20:21:18 +00003965};
3966
3967/*----------------------------------------------------------------*/
3968
3969static int __init dm_thin_init(void)
3970{
3971 int r;
3972
3973 pool_table_init();
3974
3975 r = dm_register_target(&thin_target);
3976 if (r)
3977 return r;
3978
3979 r = dm_register_target(&pool_target);
3980 if (r)
Mike Snitzera24c2562012-06-03 00:30:00 +01003981 goto bad_pool_target;
3982
3983 r = -ENOMEM;
3984
Mike Snitzera24c2562012-06-03 00:30:00 +01003985 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3986 if (!_new_mapping_cache)
3987 goto bad_new_mapping_cache;
3988
Mike Snitzera24c2562012-06-03 00:30:00 +01003989 return 0;
3990
Mike Snitzera24c2562012-06-03 00:30:00 +01003991bad_new_mapping_cache:
Mike Snitzera24c2562012-06-03 00:30:00 +01003992 dm_unregister_target(&pool_target);
3993bad_pool_target:
3994 dm_unregister_target(&thin_target);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003995
3996 return r;
3997}
3998
3999static void dm_thin_exit(void)
4000{
4001 dm_unregister_target(&thin_target);
4002 dm_unregister_target(&pool_target);
Mike Snitzera24c2562012-06-03 00:30:00 +01004003
Mike Snitzera24c2562012-06-03 00:30:00 +01004004 kmem_cache_destroy(_new_mapping_cache);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004005}
4006
4007module_init(dm_thin_init);
4008module_exit(dm_thin_exit);
4009
Mike Snitzer80c57892014-05-20 13:38:33 -04004010module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4011MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4012
Alasdair G Kergon7cab8bf2012-05-12 01:43:19 +01004013MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
Joe Thornber991d9fa2011-10-31 20:21:18 +00004014MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4015MODULE_LICENSE("GPL");