blob: 8f015d924a242493639a18bf2c6411e336c9c2f0 [file] [log] [blame]
Joe Thornber991d9fa2011-10-31 20:21:18 +00001/*
Joe Thornbere49e5822012-07-27 15:08:16 +01002 * Copyright (C) 2011-2012 Red Hat UK.
Joe Thornber991d9fa2011-10-31 20:21:18 +00003 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
Mike Snitzer4f81a412012-10-12 21:02:13 +01008#include "dm-bio-prison.h"
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01009#include "dm.h"
Joe Thornber991d9fa2011-10-31 20:21:18 +000010
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
Manuel Schölling0f30af92014-05-22 22:42:37 +020014#include <linux/jiffies.h>
Mike Snitzer604ea902014-10-09 18:43:25 -040015#include <linux/log2.h>
Joe Thornber991d9fa2011-10-31 20:21:18 +000016#include <linux/list.h>
Mike Snitzerc140e1c2014-03-20 21:17:14 -040017#include <linux/rculist.h>
Joe Thornber991d9fa2011-10-31 20:21:18 +000018#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/slab.h>
Joe Thornbera822c832015-07-03 10:22:42 +010021#include <linux/vmalloc.h>
Joe Thornberac4c3f32014-10-10 16:42:10 +010022#include <linux/sort.h>
Mike Snitzer67324ea2014-03-21 18:33:41 -040023#include <linux/rbtree.h>
Joe Thornber991d9fa2011-10-31 20:21:18 +000024
25#define DM_MSG_PREFIX "thin"
26
27/*
28 * Tunable constants
29 */
Alasdair G Kergon7768ed32012-07-27 15:07:57 +010030#define ENDIO_HOOK_POOL_SIZE 1024
Joe Thornber991d9fa2011-10-31 20:21:18 +000031#define MAPPING_POOL_SIZE 1024
Joe Thornber905e51b2012-03-28 18:41:27 +010032#define COMMIT_PERIOD HZ
Mike Snitzer80c57892014-05-20 13:38:33 -040033#define NO_SPACE_TIMEOUT_SECS 60
34
35static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
Joe Thornber991d9fa2011-10-31 20:21:18 +000036
Mikulas Patockadf5d2e92013-03-01 22:45:49 +000037DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
38 "A percentage of time allocated for copy on write");
39
Joe Thornber991d9fa2011-10-31 20:21:18 +000040/*
41 * The block size of the device holding pool data must be
42 * between 64KB and 1GB.
43 */
44#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
45#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
46
47/*
Joe Thornber991d9fa2011-10-31 20:21:18 +000048 * Device id is restricted to 24 bits.
49 */
50#define MAX_DEV_ID ((1 << 24) - 1)
51
52/*
53 * How do we handle breaking sharing of data blocks?
54 * =================================================
55 *
56 * We use a standard copy-on-write btree to store the mappings for the
57 * devices (note I'm talking about copy-on-write of the metadata here, not
58 * the data). When you take an internal snapshot you clone the root node
59 * of the origin btree. After this there is no concept of an origin or a
60 * snapshot. They are just two device trees that happen to point to the
61 * same data blocks.
62 *
63 * When we get a write in we decide if it's to a shared data block using
64 * some timestamp magic. If it is, we have to break sharing.
65 *
66 * Let's say we write to a shared block in what was the origin. The
67 * steps are:
68 *
69 * i) plug io further to this physical block. (see bio_prison code).
70 *
71 * ii) quiesce any read io to that shared data block. Obviously
Mike Snitzer44feb382012-10-12 21:02:10 +010072 * including all devices that share this block. (see dm_deferred_set code)
Joe Thornber991d9fa2011-10-31 20:21:18 +000073 *
74 * iii) copy the data block to a newly allocate block. This step can be
75 * missed out if the io covers the block. (schedule_copy).
76 *
77 * iv) insert the new mapping into the origin's btree
Joe Thornberfe878f32012-03-28 18:41:24 +010078 * (process_prepared_mapping). This act of inserting breaks some
Joe Thornber991d9fa2011-10-31 20:21:18 +000079 * sharing of btree nodes between the two devices. Breaking sharing only
80 * effects the btree of that specific device. Btrees for the other
81 * devices that share the block never change. The btree for the origin
82 * device as it was after the last commit is untouched, ie. we're using
83 * persistent data structures in the functional programming sense.
84 *
85 * v) unplug io to this physical block, including the io that triggered
86 * the breaking of sharing.
87 *
88 * Steps (ii) and (iii) occur in parallel.
89 *
90 * The metadata _doesn't_ need to be committed before the io continues. We
91 * get away with this because the io is always written to a _new_ block.
92 * If there's a crash, then:
93 *
94 * - The origin mapping will point to the old origin block (the shared
95 * one). This will contain the data as it was before the io that triggered
96 * the breaking of sharing came in.
97 *
98 * - The snap mapping still points to the old block. As it would after
99 * the commit.
100 *
101 * The downside of this scheme is the timestamp magic isn't perfect, and
102 * will continue to think that data block in the snapshot device is shared
103 * even after the write to the origin has broken sharing. I suspect data
104 * blocks will typically be shared by many different devices, so we're
105 * breaking sharing n + 1 times, rather than n, where n is the number of
106 * devices that reference this data block. At the moment I think the
107 * benefits far, far outweigh the disadvantages.
108 */
109
110/*----------------------------------------------------------------*/
111
112/*
Joe Thornber991d9fa2011-10-31 20:21:18 +0000113 * Key building.
114 */
Joe Thornber34fbcf62015-04-16 12:58:35 +0100115enum lock_space {
116 VIRTUAL,
117 PHYSICAL
118};
119
120static void build_key(struct dm_thin_device *td, enum lock_space ls,
121 dm_block_t b, dm_block_t e, struct dm_cell_key *key)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000122{
Joe Thornber34fbcf62015-04-16 12:58:35 +0100123 key->virtual = (ls == VIRTUAL);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000124 key->dev = dm_thin_dev_id(td);
Joe Thornber5f274d82014-09-17 10:17:39 +0100125 key->block_begin = b;
Joe Thornber34fbcf62015-04-16 12:58:35 +0100126 key->block_end = e;
127}
128
129static void build_data_key(struct dm_thin_device *td, dm_block_t b,
130 struct dm_cell_key *key)
131{
132 build_key(td, PHYSICAL, b, b + 1llu, key);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000133}
134
135static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
Mike Snitzer44feb382012-10-12 21:02:10 +0100136 struct dm_cell_key *key)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000137{
Joe Thornber34fbcf62015-04-16 12:58:35 +0100138 build_key(td, VIRTUAL, b, b + 1llu, key);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000139}
140
141/*----------------------------------------------------------------*/
142
Joe Thornber7d327fe2014-10-06 15:45:59 +0100143#define THROTTLE_THRESHOLD (1 * HZ)
144
145struct throttle {
146 struct rw_semaphore lock;
147 unsigned long threshold;
148 bool throttle_applied;
149};
150
151static void throttle_init(struct throttle *t)
152{
153 init_rwsem(&t->lock);
154 t->throttle_applied = false;
155}
156
157static void throttle_work_start(struct throttle *t)
158{
159 t->threshold = jiffies + THROTTLE_THRESHOLD;
160}
161
162static void throttle_work_update(struct throttle *t)
163{
164 if (!t->throttle_applied && jiffies > t->threshold) {
165 down_write(&t->lock);
166 t->throttle_applied = true;
167 }
168}
169
170static void throttle_work_complete(struct throttle *t)
171{
172 if (t->throttle_applied) {
173 t->throttle_applied = false;
174 up_write(&t->lock);
175 }
176}
177
178static void throttle_lock(struct throttle *t)
179{
180 down_read(&t->lock);
181}
182
183static void throttle_unlock(struct throttle *t)
184{
185 up_read(&t->lock);
186}
187
188/*----------------------------------------------------------------*/
189
Joe Thornber991d9fa2011-10-31 20:21:18 +0000190/*
191 * A pool device ties together a metadata device and a data device. It
192 * also provides the interface for creating and destroying internal
193 * devices.
194 */
Mike Snitzera24c2562012-06-03 00:30:00 +0100195struct dm_thin_new_mapping;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100196
Joe Thornbere49e5822012-07-27 15:08:16 +0100197/*
Joe Thornber3e1a0692014-03-03 16:03:26 +0000198 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
Joe Thornbere49e5822012-07-27 15:08:16 +0100199 */
200enum pool_mode {
201 PM_WRITE, /* metadata may be changed */
Joe Thornber3e1a0692014-03-03 16:03:26 +0000202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
Joe Thornbere49e5822012-07-27 15:08:16 +0100203 PM_READ_ONLY, /* metadata may not be changed */
204 PM_FAIL, /* all I/O fails */
205};
206
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100207struct pool_features {
Joe Thornbere49e5822012-07-27 15:08:16 +0100208 enum pool_mode mode;
209
Mike Snitzer9bc142d2012-09-26 23:45:46 +0100210 bool zero_new_blocks:1;
211 bool discard_enabled:1;
212 bool discard_passdown:1;
Mike Snitzer787a996c2013-12-06 16:21:43 -0500213 bool error_if_no_space:1;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100214};
215
Joe Thornbere49e5822012-07-27 15:08:16 +0100216struct thin_c;
217typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
Joe Thornbera374bb22014-10-10 13:43:14 +0100218typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
Joe Thornbere49e5822012-07-27 15:08:16 +0100219typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
220
Joe Thornberac4c3f32014-10-10 16:42:10 +0100221#define CELL_SORT_ARRAY_SIZE 8192
222
Joe Thornber991d9fa2011-10-31 20:21:18 +0000223struct pool {
224 struct list_head list;
225 struct dm_target *ti; /* Only set if a pool target is bound */
226
227 struct mapped_device *pool_md;
228 struct block_device *md_dev;
229 struct dm_pool_metadata *pmd;
230
Joe Thornber991d9fa2011-10-31 20:21:18 +0000231 dm_block_t low_water_blocks;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100232 uint32_t sectors_per_block;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +0100233 int sectors_per_block_shift;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000234
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100235 struct pool_features pf;
Joe Thornber88a66212013-12-04 20:16:12 -0500236 bool low_water_triggered:1; /* A dm event has been sent */
Mike Snitzer80e96c52014-11-07 15:09:46 -0500237 bool suspended:1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000238
Mike Snitzer44feb382012-10-12 21:02:10 +0100239 struct dm_bio_prison *prison;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000240 struct dm_kcopyd_client *copier;
241
242 struct workqueue_struct *wq;
Joe Thornber7d327fe2014-10-06 15:45:59 +0100243 struct throttle throttle;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000244 struct work_struct worker;
Joe Thornber905e51b2012-03-28 18:41:27 +0100245 struct delayed_work waker;
Joe Thornber85ad643b2014-05-09 15:59:38 +0100246 struct delayed_work no_space_timeout;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000247
Joe Thornber905e51b2012-03-28 18:41:27 +0100248 unsigned long last_commit_jiffies;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100249 unsigned ref_count;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000250
251 spinlock_t lock;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000252 struct bio_list deferred_flush_bios;
253 struct list_head prepared_mappings;
Joe Thornber104655f2012-03-28 18:41:28 +0100254 struct list_head prepared_discards;
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400255 struct list_head active_thins;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000256
Mike Snitzer44feb382012-10-12 21:02:10 +0100257 struct dm_deferred_set *shared_read_ds;
258 struct dm_deferred_set *all_io_ds;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000259
Mike Snitzera24c2562012-06-03 00:30:00 +0100260 struct dm_thin_new_mapping *next_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000261 mempool_t *mapping_pool;
Joe Thornbere49e5822012-07-27 15:08:16 +0100262
263 process_bio_fn process_bio;
264 process_bio_fn process_discard;
265
Joe Thornbera374bb22014-10-10 13:43:14 +0100266 process_cell_fn process_cell;
267 process_cell_fn process_discard_cell;
268
Joe Thornbere49e5822012-07-27 15:08:16 +0100269 process_mapping_fn process_prepared_mapping;
270 process_mapping_fn process_prepared_discard;
Joe Thornberac4c3f32014-10-10 16:42:10 +0100271
Joe Thornbera822c832015-07-03 10:22:42 +0100272 struct dm_bio_prison_cell **cell_sort_array;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000273};
274
Joe Thornbere49e5822012-07-27 15:08:16 +0100275static enum pool_mode get_pool_mode(struct pool *pool);
Joe Thornberb5330652013-12-04 19:51:33 -0500276static void metadata_operation_failed(struct pool *pool, const char *op, int r);
Joe Thornbere49e5822012-07-27 15:08:16 +0100277
Joe Thornber991d9fa2011-10-31 20:21:18 +0000278/*
279 * Target context for a pool.
280 */
281struct pool_c {
282 struct dm_target *ti;
283 struct pool *pool;
284 struct dm_dev *data_dev;
285 struct dm_dev *metadata_dev;
286 struct dm_target_callbacks callbacks;
287
288 dm_block_t low_water_blocks;
Mike Snitzer0424caa2012-09-26 23:45:47 +0100289 struct pool_features requested_pf; /* Features requested during table load */
290 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000291};
292
293/*
294 * Target context for a thin.
295 */
296struct thin_c {
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400297 struct list_head list;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000298 struct dm_dev *pool_dev;
Joe Thornber2dd9c252012-03-28 18:41:28 +0100299 struct dm_dev *origin_dev;
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100300 sector_t origin_size;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000301 dm_thin_id dev_id;
302
303 struct pool *pool;
304 struct dm_thin_device *td;
Mike Snitzer583024d2014-10-28 20:58:45 -0400305 struct mapped_device *thin_md;
306
Joe Thornber738211f2014-03-03 15:52:28 +0000307 bool requeue_mode:1;
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400308 spinlock_t lock;
Joe Thornbera374bb22014-10-10 13:43:14 +0100309 struct list_head deferred_cells;
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400310 struct bio_list deferred_bio_list;
311 struct bio_list retry_on_resume_list;
Mike Snitzer67324ea2014-03-21 18:33:41 -0400312 struct rb_root sort_bio_list; /* sorted list of deferred bios */
Joe Thornberb10ebd32014-04-08 11:29:01 +0100313
314 /*
315 * Ensures the thin is not destroyed until the worker has finished
316 * iterating the active_thins list.
317 */
318 atomic_t refcount;
319 struct completion can_destroy;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000320};
321
322/*----------------------------------------------------------------*/
323
Joe Thornber34fbcf62015-04-16 12:58:35 +0100324/**
325 * __blkdev_issue_discard_async - queue a discard with async completion
326 * @bdev: blockdev to issue discard for
327 * @sector: start sector
328 * @nr_sects: number of sectors to discard
329 * @gfp_mask: memory allocation flags (for bio_alloc)
330 * @flags: BLKDEV_IFL_* flags to control behaviour
331 * @parent_bio: parent discard bio that all sub discards get chained to
332 *
333 * Description:
334 * Asynchronously issue a discard request for the sectors in question.
335 * NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap
336 * that is being kept local to DM thinp until the block changes to allow
337 * late bio splitting land upstream.
338 */
339static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
340 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
341 struct bio *parent_bio)
342{
343 struct request_queue *q = bdev_get_queue(bdev);
344 int type = REQ_WRITE | REQ_DISCARD;
345 unsigned int max_discard_sectors, granularity;
346 int alignment;
347 struct bio *bio;
348 int ret = 0;
349 struct blk_plug plug;
350
351 if (!q)
352 return -ENXIO;
353
354 if (!blk_queue_discard(q))
355 return -EOPNOTSUPP;
356
357 /* Zero-sector (unknown) and one-sector granularities are the same. */
358 granularity = max(q->limits.discard_granularity >> 9, 1U);
359 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
360
361 /*
362 * Ensure that max_discard_sectors is of the proper
363 * granularity, so that requests stay aligned after a split.
364 */
365 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
366 max_discard_sectors -= max_discard_sectors % granularity;
367 if (unlikely(!max_discard_sectors)) {
368 /* Avoid infinite loop below. Being cautious never hurts. */
369 return -EOPNOTSUPP;
370 }
371
372 if (flags & BLKDEV_DISCARD_SECURE) {
373 if (!blk_queue_secdiscard(q))
374 return -EOPNOTSUPP;
375 type |= REQ_SECURE;
376 }
377
378 blk_start_plug(&plug);
379 while (nr_sects) {
380 unsigned int req_sects;
381 sector_t end_sect, tmp;
382
383 /*
384 * Required bio_put occurs in bio_endio thanks to bio_chain below
385 */
386 bio = bio_alloc(gfp_mask, 1);
387 if (!bio) {
388 ret = -ENOMEM;
389 break;
390 }
391
392 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
393
394 /*
395 * If splitting a request, and the next starting sector would be
396 * misaligned, stop the discard at the previous aligned sector.
397 */
398 end_sect = sector + req_sects;
399 tmp = end_sect;
400 if (req_sects < nr_sects &&
401 sector_div(tmp, granularity) != alignment) {
402 end_sect = end_sect - alignment;
403 sector_div(end_sect, granularity);
404 end_sect = end_sect * granularity + alignment;
405 req_sects = end_sect - sector;
406 }
407
408 bio_chain(bio, parent_bio);
409
410 bio->bi_iter.bi_sector = sector;
411 bio->bi_bdev = bdev;
412
413 bio->bi_iter.bi_size = req_sects << 9;
414 nr_sects -= req_sects;
415 sector = end_sect;
416
417 submit_bio(type, bio);
418
419 /*
420 * We can loop for a long time in here, if someone does
421 * full device discards (like mkfs). Be nice and allow
422 * us to schedule out to avoid softlocking if preempt
423 * is disabled.
424 */
425 cond_resched();
426 }
427 blk_finish_plug(&plug);
428
429 return ret;
430}
431
432static bool block_size_is_power_of_two(struct pool *pool)
433{
434 return pool->sectors_per_block_shift >= 0;
435}
436
437static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
438{
439 return block_size_is_power_of_two(pool) ?
440 (b << pool->sectors_per_block_shift) :
441 (b * pool->sectors_per_block);
442}
443
444static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
445 struct bio *parent_bio)
446{
447 sector_t s = block_to_sectors(tc->pool, data_b);
448 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
449
450 return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
451 GFP_NOWAIT, 0, parent_bio);
452}
453
454/*----------------------------------------------------------------*/
455
Joe Thornber025b9682013-03-01 22:45:50 +0000456/*
457 * wake_worker() is used when new work is queued and when pool_resume is
458 * ready to continue deferred IO processing.
459 */
460static void wake_worker(struct pool *pool)
461{
462 queue_work(pool->wq, &pool->worker);
463}
464
465/*----------------------------------------------------------------*/
466
Joe Thornber6beca5e2013-03-01 22:45:50 +0000467static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
468 struct dm_bio_prison_cell **cell_result)
469{
470 int r;
471 struct dm_bio_prison_cell *cell_prealloc;
472
473 /*
474 * Allocate a cell from the prison's mempool.
475 * This might block but it can't fail.
476 */
477 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
478
479 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
480 if (r)
481 /*
482 * We reused an old cell; we can get rid of
483 * the new one.
484 */
485 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
486
487 return r;
488}
489
490static void cell_release(struct pool *pool,
491 struct dm_bio_prison_cell *cell,
492 struct bio_list *bios)
493{
494 dm_cell_release(pool->prison, cell, bios);
495 dm_bio_prison_free_cell(pool->prison, cell);
496}
497
Joe Thornber2d759a42014-10-10 15:27:16 +0100498static void cell_visit_release(struct pool *pool,
499 void (*fn)(void *, struct dm_bio_prison_cell *),
500 void *context,
501 struct dm_bio_prison_cell *cell)
502{
503 dm_cell_visit_release(pool->prison, fn, context, cell);
504 dm_bio_prison_free_cell(pool->prison, cell);
505}
506
Joe Thornber6beca5e2013-03-01 22:45:50 +0000507static void cell_release_no_holder(struct pool *pool,
508 struct dm_bio_prison_cell *cell,
509 struct bio_list *bios)
510{
511 dm_cell_release_no_holder(pool->prison, cell, bios);
512 dm_bio_prison_free_cell(pool->prison, cell);
513}
514
Mike Snitzeraf918052014-05-22 14:32:51 -0400515static void cell_error_with_code(struct pool *pool,
516 struct dm_bio_prison_cell *cell, int error_code)
Joe Thornber6beca5e2013-03-01 22:45:50 +0000517{
Mike Snitzeraf918052014-05-22 14:32:51 -0400518 dm_cell_error(pool->prison, cell, error_code);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000519 dm_bio_prison_free_cell(pool->prison, cell);
520}
521
Mike Snitzeraf918052014-05-22 14:32:51 -0400522static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
523{
524 cell_error_with_code(pool, cell, -EIO);
525}
526
Joe Thornbera374bb22014-10-10 13:43:14 +0100527static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
528{
529 cell_error_with_code(pool, cell, 0);
530}
531
532static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
533{
534 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
535}
536
Joe Thornber6beca5e2013-03-01 22:45:50 +0000537/*----------------------------------------------------------------*/
538
Joe Thornber991d9fa2011-10-31 20:21:18 +0000539/*
540 * A global list of pools that uses a struct mapped_device as a key.
541 */
542static struct dm_thin_pool_table {
543 struct mutex mutex;
544 struct list_head pools;
545} dm_thin_pool_table;
546
547static void pool_table_init(void)
548{
549 mutex_init(&dm_thin_pool_table.mutex);
550 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
551}
552
553static void __pool_table_insert(struct pool *pool)
554{
555 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
556 list_add(&pool->list, &dm_thin_pool_table.pools);
557}
558
559static void __pool_table_remove(struct pool *pool)
560{
561 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
562 list_del(&pool->list);
563}
564
565static struct pool *__pool_table_lookup(struct mapped_device *md)
566{
567 struct pool *pool = NULL, *tmp;
568
569 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
570
571 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
572 if (tmp->pool_md == md) {
573 pool = tmp;
574 break;
575 }
576 }
577
578 return pool;
579}
580
581static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
582{
583 struct pool *pool = NULL, *tmp;
584
585 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
586
587 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
588 if (tmp->md_dev == md_dev) {
589 pool = tmp;
590 break;
591 }
592 }
593
594 return pool;
595}
596
597/*----------------------------------------------------------------*/
598
Mike Snitzera24c2562012-06-03 00:30:00 +0100599struct dm_thin_endio_hook {
Joe Thornbereb2aa482012-03-28 18:41:28 +0100600 struct thin_c *tc;
Mike Snitzer44feb382012-10-12 21:02:10 +0100601 struct dm_deferred_entry *shared_read_entry;
602 struct dm_deferred_entry *all_io_entry;
Mike Snitzera24c2562012-06-03 00:30:00 +0100603 struct dm_thin_new_mapping *overwrite_mapping;
Mike Snitzer67324ea2014-03-21 18:33:41 -0400604 struct rb_node rb_node;
Joe Thornber34fbcf62015-04-16 12:58:35 +0100605 struct dm_bio_prison_cell *cell;
Joe Thornbereb2aa482012-03-28 18:41:28 +0100606};
607
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400608static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
609{
610 bio_list_merge(bios, master);
611 bio_list_init(master);
612}
613
614static void error_bio_list(struct bio_list *bios, int error)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000615{
616 struct bio *bio;
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400617
618 while ((bio = bio_list_pop(bios)))
619 bio_endio(bio, error);
620}
621
622static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
623{
Joe Thornber991d9fa2011-10-31 20:21:18 +0000624 struct bio_list bios;
Joe Thornber18adc572014-03-03 15:46:42 +0000625 unsigned long flags;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000626
627 bio_list_init(&bios);
Joe Thornber18adc572014-03-03 15:46:42 +0000628
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400629 spin_lock_irqsave(&tc->lock, flags);
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400630 __merge_bio_list(&bios, master);
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400631 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000632
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400633 error_bio_list(&bios, error);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000634}
635
Joe Thornbera374bb22014-10-10 13:43:14 +0100636static void requeue_deferred_cells(struct thin_c *tc)
637{
638 struct pool *pool = tc->pool;
639 unsigned long flags;
640 struct list_head cells;
641 struct dm_bio_prison_cell *cell, *tmp;
642
643 INIT_LIST_HEAD(&cells);
644
645 spin_lock_irqsave(&tc->lock, flags);
646 list_splice_init(&tc->deferred_cells, &cells);
647 spin_unlock_irqrestore(&tc->lock, flags);
648
649 list_for_each_entry_safe(cell, tmp, &cells, user_list)
650 cell_requeue(pool, cell);
651}
652
Joe Thornber991d9fa2011-10-31 20:21:18 +0000653static void requeue_io(struct thin_c *tc)
654{
Joe Thornber3e1a0692014-03-03 16:03:26 +0000655 struct bio_list bios;
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400656 unsigned long flags;
Joe Thornber3e1a0692014-03-03 16:03:26 +0000657
658 bio_list_init(&bios);
659
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400660 spin_lock_irqsave(&tc->lock, flags);
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400661 __merge_bio_list(&bios, &tc->deferred_bio_list);
662 __merge_bio_list(&bios, &tc->retry_on_resume_list);
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400663 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber3e1a0692014-03-03 16:03:26 +0000664
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400665 error_bio_list(&bios, DM_ENDIO_REQUEUE);
666 requeue_deferred_cells(tc);
Joe Thornber3e1a0692014-03-03 16:03:26 +0000667}
668
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400669static void error_retry_list(struct pool *pool)
670{
671 struct thin_c *tc;
672
673 rcu_read_lock();
674 list_for_each_entry_rcu(tc, &pool->active_thins, list)
Mike Snitzer42d6a8c2014-10-19 07:52:44 -0400675 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400676 rcu_read_unlock();
677}
678
Joe Thornber991d9fa2011-10-31 20:21:18 +0000679/*
680 * This section of code contains the logic for processing a thin device's IO.
681 * Much of the code depends on pool object resources (lists, workqueues, etc)
682 * but most is exclusively called from the thin target rather than the thin-pool
683 * target.
684 */
685
686static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
687{
Mike Snitzer58f77a22013-03-01 22:45:45 +0000688 struct pool *pool = tc->pool;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700689 sector_t block_nr = bio->bi_iter.bi_sector;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100690
Mike Snitzer58f77a22013-03-01 22:45:45 +0000691 if (block_size_is_power_of_two(pool))
692 block_nr >>= pool->sectors_per_block_shift;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +0100693 else
Mike Snitzer58f77a22013-03-01 22:45:45 +0000694 (void) sector_div(block_nr, pool->sectors_per_block);
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100695
696 return block_nr;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000697}
698
Joe Thornber34fbcf62015-04-16 12:58:35 +0100699/*
700 * Returns the _complete_ blocks that this bio covers.
701 */
702static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
703 dm_block_t *begin, dm_block_t *end)
704{
705 struct pool *pool = tc->pool;
706 sector_t b = bio->bi_iter.bi_sector;
707 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
708
709 b += pool->sectors_per_block - 1ull; /* so we round up */
710
711 if (block_size_is_power_of_two(pool)) {
712 b >>= pool->sectors_per_block_shift;
713 e >>= pool->sectors_per_block_shift;
714 } else {
715 (void) sector_div(b, pool->sectors_per_block);
716 (void) sector_div(e, pool->sectors_per_block);
717 }
718
719 if (e < b)
720 /* Can happen if the bio is within a single block. */
721 e = b;
722
723 *begin = b;
724 *end = e;
725}
726
Joe Thornber991d9fa2011-10-31 20:21:18 +0000727static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
728{
729 struct pool *pool = tc->pool;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700730 sector_t bi_sector = bio->bi_iter.bi_sector;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000731
732 bio->bi_bdev = tc->pool_dev->bdev;
Mike Snitzer58f77a22013-03-01 22:45:45 +0000733 if (block_size_is_power_of_two(pool))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700734 bio->bi_iter.bi_sector =
735 (block << pool->sectors_per_block_shift) |
736 (bi_sector & (pool->sectors_per_block - 1));
Mike Snitzer58f77a22013-03-01 22:45:45 +0000737 else
Kent Overstreet4f024f32013-10-11 15:44:27 -0700738 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
Mike Snitzer58f77a22013-03-01 22:45:45 +0000739 sector_div(bi_sector, pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000740}
741
Joe Thornber2dd9c252012-03-28 18:41:28 +0100742static void remap_to_origin(struct thin_c *tc, struct bio *bio)
743{
744 bio->bi_bdev = tc->origin_dev->bdev;
745}
746
Joe Thornber4afdd682012-07-27 15:08:14 +0100747static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
748{
749 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
750 dm_thin_changed_this_transaction(tc->td);
751}
752
Joe Thornbere8088072012-12-21 20:23:31 +0000753static void inc_all_io_entry(struct pool *pool, struct bio *bio)
754{
755 struct dm_thin_endio_hook *h;
756
757 if (bio->bi_rw & REQ_DISCARD)
758 return;
759
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000760 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbere8088072012-12-21 20:23:31 +0000761 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
762}
763
Joe Thornber2dd9c252012-03-28 18:41:28 +0100764static void issue(struct thin_c *tc, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000765{
766 struct pool *pool = tc->pool;
767 unsigned long flags;
768
Joe Thornbere49e5822012-07-27 15:08:16 +0100769 if (!bio_triggers_commit(tc, bio)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000770 generic_make_request(bio);
Joe Thornbere49e5822012-07-27 15:08:16 +0100771 return;
772 }
773
774 /*
775 * Complete bio with an error if earlier I/O caused changes to
776 * the metadata that can't be committed e.g, due to I/O errors
777 * on the metadata device.
778 */
779 if (dm_thin_aborted_changes(tc->td)) {
780 bio_io_error(bio);
781 return;
782 }
783
784 /*
785 * Batch together any bios that trigger commits and then issue a
786 * single commit for them in process_deferred_bios().
787 */
788 spin_lock_irqsave(&pool->lock, flags);
789 bio_list_add(&pool->deferred_flush_bios, bio);
790 spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000791}
792
Joe Thornber2dd9c252012-03-28 18:41:28 +0100793static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
794{
795 remap_to_origin(tc, bio);
796 issue(tc, bio);
797}
798
799static void remap_and_issue(struct thin_c *tc, struct bio *bio,
800 dm_block_t block)
801{
802 remap(tc, bio, block);
803 issue(tc, bio);
804}
805
Joe Thornber991d9fa2011-10-31 20:21:18 +0000806/*----------------------------------------------------------------*/
807
808/*
809 * Bio endio functions.
810 */
Mike Snitzera24c2562012-06-03 00:30:00 +0100811struct dm_thin_new_mapping {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000812 struct list_head list;
813
Mike Snitzer7f214662013-12-17 13:43:31 -0500814 bool pass_discard:1;
Joe Thornber34fbcf62015-04-16 12:58:35 +0100815 bool maybe_shared:1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000816
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100817 /*
818 * Track quiescing, copying and zeroing preparation actions. When this
819 * counter hits zero the block is prepared and can be inserted into the
820 * btree.
821 */
822 atomic_t prepare_actions;
823
Mike Snitzer7f214662013-12-17 13:43:31 -0500824 int err;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000825 struct thin_c *tc;
Joe Thornber34fbcf62015-04-16 12:58:35 +0100826 dm_block_t virt_begin, virt_end;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000827 dm_block_t data_block;
Joe Thornber34fbcf62015-04-16 12:58:35 +0100828 struct dm_bio_prison_cell *cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000829
830 /*
831 * If the bio covers the whole area of a block then we can avoid
832 * zeroing or copying. Instead this bio is hooked. The bio will
833 * still be in the cell, so care has to be taken to avoid issuing
834 * the bio twice.
835 */
836 struct bio *bio;
837 bio_end_io_t *saved_bi_end_io;
838};
839
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100840static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000841{
842 struct pool *pool = m->tc->pool;
843
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100844 if (atomic_dec_and_test(&m->prepare_actions)) {
Mike Snitzerdaec3382013-12-11 14:01:20 -0500845 list_add_tail(&m->list, &pool->prepared_mappings);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000846 wake_worker(pool);
847 }
848}
849
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100850static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000851{
852 unsigned long flags;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000853 struct pool *pool = m->tc->pool;
854
Joe Thornber991d9fa2011-10-31 20:21:18 +0000855 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber50f3c3e2014-06-13 13:57:09 +0100856 __complete_mapping_preparation(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000857 spin_unlock_irqrestore(&pool->lock, flags);
858}
859
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100860static void copy_complete(int read_err, unsigned long write_err, void *context)
861{
862 struct dm_thin_new_mapping *m = context;
863
864 m->err = read_err || write_err ? -EIO : 0;
865 complete_mapping_preparation(m);
866}
867
Joe Thornber991d9fa2011-10-31 20:21:18 +0000868static void overwrite_endio(struct bio *bio, int err)
869{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000870 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzera24c2562012-06-03 00:30:00 +0100871 struct dm_thin_new_mapping *m = h->overwrite_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000872
Mike Snitzer8b908f82015-05-13 17:53:13 -0400873 bio->bi_end_io = m->saved_bi_end_io;
874
Joe Thornber991d9fa2011-10-31 20:21:18 +0000875 m->err = err;
Joe Thornbere5aea7b2014-06-13 14:47:24 +0100876 complete_mapping_preparation(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000877}
878
Joe Thornber991d9fa2011-10-31 20:21:18 +0000879/*----------------------------------------------------------------*/
880
881/*
882 * Workqueue.
883 */
884
885/*
886 * Prepared mapping jobs.
887 */
888
889/*
Joe Thornber2d759a42014-10-10 15:27:16 +0100890 * This sends the bios in the cell, except the original holder, back
891 * to the deferred_bios list.
Joe Thornber991d9fa2011-10-31 20:21:18 +0000892 */
Joe Thornberf286ba02012-12-21 20:23:33 +0000893static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000894{
Joe Thornber991d9fa2011-10-31 20:21:18 +0000895 struct pool *pool = tc->pool;
896 unsigned long flags;
897
Mike Snitzerc140e1c2014-03-20 21:17:14 -0400898 spin_lock_irqsave(&tc->lock, flags);
899 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
900 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000901
902 wake_worker(pool);
903}
904
Joe Thornbera374bb22014-10-10 13:43:14 +0100905static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
906
Joe Thornber2d759a42014-10-10 15:27:16 +0100907struct remap_info {
908 struct thin_c *tc;
909 struct bio_list defer_bios;
910 struct bio_list issue_bios;
911};
912
913static void __inc_remap_and_issue_cell(void *context,
914 struct dm_bio_prison_cell *cell)
915{
916 struct remap_info *info = context;
917 struct bio *bio;
918
919 while ((bio = bio_list_pop(&cell->bios))) {
920 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
921 bio_list_add(&info->defer_bios, bio);
922 else {
923 inc_all_io_entry(info->tc->pool, bio);
924
925 /*
926 * We can't issue the bios with the bio prison lock
927 * held, so we add them to a list to issue on
928 * return from this function.
929 */
930 bio_list_add(&info->issue_bios, bio);
931 }
932 }
933}
934
Joe Thornbera374bb22014-10-10 13:43:14 +0100935static void inc_remap_and_issue_cell(struct thin_c *tc,
936 struct dm_bio_prison_cell *cell,
937 dm_block_t block)
938{
939 struct bio *bio;
Joe Thornber2d759a42014-10-10 15:27:16 +0100940 struct remap_info info;
Joe Thornbera374bb22014-10-10 13:43:14 +0100941
Joe Thornber2d759a42014-10-10 15:27:16 +0100942 info.tc = tc;
943 bio_list_init(&info.defer_bios);
944 bio_list_init(&info.issue_bios);
Joe Thornbera374bb22014-10-10 13:43:14 +0100945
Joe Thornber2d759a42014-10-10 15:27:16 +0100946 /*
947 * We have to be careful to inc any bios we're about to issue
948 * before the cell is released, and avoid a race with new bios
949 * being added to the cell.
950 */
951 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
952 &info, cell);
953
954 while ((bio = bio_list_pop(&info.defer_bios)))
955 thin_defer_bio(tc, bio);
956
957 while ((bio = bio_list_pop(&info.issue_bios)))
958 remap_and_issue(info.tc, bio, block);
Joe Thornbera374bb22014-10-10 13:43:14 +0100959}
960
Joe Thornbere49e5822012-07-27 15:08:16 +0100961static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
962{
Joe Thornber6beca5e2013-03-01 22:45:50 +0000963 cell_error(m->tc->pool, m->cell);
Joe Thornbere49e5822012-07-27 15:08:16 +0100964 list_del(&m->list);
965 mempool_free(m, m->tc->pool->mapping_pool);
966}
Joe Thornber025b9682013-03-01 22:45:50 +0000967
Mike Snitzera24c2562012-06-03 00:30:00 +0100968static void process_prepared_mapping(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000969{
970 struct thin_c *tc = m->tc;
Joe Thornber6beca5e2013-03-01 22:45:50 +0000971 struct pool *pool = tc->pool;
Mike Snitzer8b908f82015-05-13 17:53:13 -0400972 struct bio *bio = m->bio;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000973 int r;
974
Joe Thornber991d9fa2011-10-31 20:21:18 +0000975 if (m->err) {
Joe Thornber6beca5e2013-03-01 22:45:50 +0000976 cell_error(pool, m->cell);
Joe Thornber905386f2012-07-27 15:08:05 +0100977 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000978 }
979
980 /*
981 * Commit the prepared block into the mapping btree.
982 * Any I/O for this block arriving after this point will get
983 * remapped to it directly.
984 */
Joe Thornber34fbcf62015-04-16 12:58:35 +0100985 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000986 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -0500987 metadata_operation_failed(pool, "dm_thin_insert_block", r);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000988 cell_error(pool, m->cell);
Joe Thornber905386f2012-07-27 15:08:05 +0100989 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000990 }
991
992 /*
993 * Release any bios held while the block was being provisioned.
994 * If we are processing a write bio that completely covers the block,
995 * we already processed it so can ignore it now when processing
996 * the bios in the cell.
997 */
998 if (bio) {
Joe Thornber2d759a42014-10-10 15:27:16 +0100999 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001000 bio_endio(bio, 0);
Joe Thornber2d759a42014-10-10 15:27:16 +01001001 } else {
1002 inc_all_io_entry(tc->pool, m->cell->holder);
1003 remap_and_issue(tc, m->cell->holder, m->data_block);
1004 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1005 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001006
Joe Thornber905386f2012-07-27 15:08:05 +01001007out:
Joe Thornber991d9fa2011-10-31 20:21:18 +00001008 list_del(&m->list);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001009 mempool_free(m, pool->mapping_pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001010}
1011
Joe Thornber34fbcf62015-04-16 12:58:35 +01001012/*----------------------------------------------------------------*/
1013
1014static void free_discard_mapping(struct dm_thin_new_mapping *m)
1015{
1016 struct thin_c *tc = m->tc;
1017 if (m->cell)
1018 cell_defer_no_holder(tc, m->cell);
1019 mempool_free(m, tc->pool->mapping_pool);
1020}
1021
Joe Thornbere49e5822012-07-27 15:08:16 +01001022static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber104655f2012-03-28 18:41:28 +01001023{
Joe Thornbere49e5822012-07-27 15:08:16 +01001024 bio_io_error(m->bio);
Joe Thornber34fbcf62015-04-16 12:58:35 +01001025 free_discard_mapping(m);
Joe Thornbere49e5822012-07-27 15:08:16 +01001026}
Joe Thornber104655f2012-03-28 18:41:28 +01001027
Joe Thornber34fbcf62015-04-16 12:58:35 +01001028static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
Joe Thornbere49e5822012-07-27 15:08:16 +01001029{
Joe Thornber34fbcf62015-04-16 12:58:35 +01001030 bio_endio(m->bio, 0);
1031 free_discard_mapping(m);
Joe Thornber104655f2012-03-28 18:41:28 +01001032}
1033
Joe Thornber34fbcf62015-04-16 12:58:35 +01001034static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
Joe Thornbere49e5822012-07-27 15:08:16 +01001035{
1036 int r;
1037 struct thin_c *tc = m->tc;
1038
Joe Thornber34fbcf62015-04-16 12:58:35 +01001039 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1040 if (r) {
1041 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1042 bio_io_error(m->bio);
1043 } else
1044 bio_endio(m->bio, 0);
Joe Thornbere49e5822012-07-27 15:08:16 +01001045
Joe Thornber34fbcf62015-04-16 12:58:35 +01001046 cell_defer_no_holder(tc, m->cell);
1047 mempool_free(m, tc->pool->mapping_pool);
1048}
1049
1050static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
1051{
1052 /*
1053 * We've already unmapped this range of blocks, but before we
1054 * passdown we have to check that these blocks are now unused.
1055 */
1056 int r;
1057 bool used = true;
1058 struct thin_c *tc = m->tc;
1059 struct pool *pool = tc->pool;
1060 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1061
1062 while (b != end) {
1063 /* find start of unmapped run */
1064 for (; b < end; b++) {
1065 r = dm_pool_block_is_used(pool->pmd, b, &used);
1066 if (r)
1067 return r;
1068
1069 if (!used)
1070 break;
1071 }
1072
1073 if (b == end)
1074 break;
1075
1076 /* find end of run */
1077 for (e = b + 1; e != end; e++) {
1078 r = dm_pool_block_is_used(pool->pmd, e, &used);
1079 if (r)
1080 return r;
1081
1082 if (used)
1083 break;
1084 }
1085
1086 r = issue_discard(tc, b, e, m->bio);
1087 if (r)
1088 return r;
1089
1090 b = e;
1091 }
1092
1093 return 0;
1094}
1095
1096static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1097{
1098 int r;
1099 struct thin_c *tc = m->tc;
1100 struct pool *pool = tc->pool;
1101
1102 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1103 if (r)
1104 metadata_operation_failed(pool, "dm_thin_remove_range", r);
1105
1106 else if (m->maybe_shared)
1107 r = passdown_double_checking_shared_status(m);
1108 else
1109 r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
1110
1111 /*
1112 * Even if r is set, there could be sub discards in flight that we
1113 * need to wait for.
1114 */
1115 bio_endio(m->bio, r);
1116 cell_defer_no_holder(tc, m->cell);
1117 mempool_free(m, pool->mapping_pool);
Joe Thornbere49e5822012-07-27 15:08:16 +01001118}
1119
Joe Thornber104655f2012-03-28 18:41:28 +01001120static void process_prepared(struct pool *pool, struct list_head *head,
Joe Thornbere49e5822012-07-27 15:08:16 +01001121 process_mapping_fn *fn)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001122{
1123 unsigned long flags;
1124 struct list_head maps;
Mike Snitzera24c2562012-06-03 00:30:00 +01001125 struct dm_thin_new_mapping *m, *tmp;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001126
1127 INIT_LIST_HEAD(&maps);
1128 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +01001129 list_splice_init(head, &maps);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001130 spin_unlock_irqrestore(&pool->lock, flags);
1131
1132 list_for_each_entry_safe(m, tmp, &maps, list)
Joe Thornbere49e5822012-07-27 15:08:16 +01001133 (*fn)(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001134}
1135
1136/*
1137 * Deferred bio jobs.
1138 */
Joe Thornber104655f2012-03-28 18:41:28 +01001139static int io_overlaps_block(struct pool *pool, struct bio *bio)
1140{
Kent Overstreet4f024f32013-10-11 15:44:27 -07001141 return bio->bi_iter.bi_size ==
1142 (pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber104655f2012-03-28 18:41:28 +01001143}
1144
Joe Thornber991d9fa2011-10-31 20:21:18 +00001145static int io_overwrites_block(struct pool *pool, struct bio *bio)
1146{
Joe Thornber104655f2012-03-28 18:41:28 +01001147 return (bio_data_dir(bio) == WRITE) &&
1148 io_overlaps_block(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001149}
1150
1151static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1152 bio_end_io_t *fn)
1153{
1154 *save = bio->bi_end_io;
1155 bio->bi_end_io = fn;
1156}
1157
1158static int ensure_next_mapping(struct pool *pool)
1159{
1160 if (pool->next_mapping)
1161 return 0;
1162
1163 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
1164
1165 return pool->next_mapping ? 0 : -ENOMEM;
1166}
1167
Mike Snitzera24c2562012-06-03 00:30:00 +01001168static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001169{
Mike Snitzer16961b02013-12-17 13:19:11 -05001170 struct dm_thin_new_mapping *m = pool->next_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001171
1172 BUG_ON(!pool->next_mapping);
1173
Mike Snitzer16961b02013-12-17 13:19:11 -05001174 memset(m, 0, sizeof(struct dm_thin_new_mapping));
1175 INIT_LIST_HEAD(&m->list);
1176 m->bio = NULL;
1177
Joe Thornber991d9fa2011-10-31 20:21:18 +00001178 pool->next_mapping = NULL;
1179
Mike Snitzer16961b02013-12-17 13:19:11 -05001180 return m;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001181}
1182
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001183static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1184 sector_t begin, sector_t end)
1185{
1186 int r;
1187 struct dm_io_region to;
1188
1189 to.bdev = tc->pool_dev->bdev;
1190 to.sector = begin;
1191 to.count = end - begin;
1192
1193 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1194 if (r < 0) {
1195 DMERR_LIMIT("dm_kcopyd_zero() failed");
1196 copy_complete(1, 1, m);
1197 }
1198}
1199
Mike Snitzer452d7a62014-10-09 19:20:21 -04001200static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
Joe Thornber34fbcf62015-04-16 12:58:35 +01001201 dm_block_t data_begin,
Mike Snitzer452d7a62014-10-09 19:20:21 -04001202 struct dm_thin_new_mapping *m)
1203{
1204 struct pool *pool = tc->pool;
1205 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1206
1207 h->overwrite_mapping = m;
1208 m->bio = bio;
1209 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1210 inc_all_io_entry(pool, bio);
Joe Thornber34fbcf62015-04-16 12:58:35 +01001211 remap_and_issue(tc, bio, data_begin);
Mike Snitzer452d7a62014-10-09 19:20:21 -04001212}
1213
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001214/*
1215 * A partial copy also needs to zero the uncopied region.
1216 */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001217static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
Joe Thornber2dd9c252012-03-28 18:41:28 +01001218 struct dm_dev *origin, dm_block_t data_origin,
1219 dm_block_t data_dest,
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001220 struct dm_bio_prison_cell *cell, struct bio *bio,
1221 sector_t len)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001222{
1223 int r;
1224 struct pool *pool = tc->pool;
Mike Snitzera24c2562012-06-03 00:30:00 +01001225 struct dm_thin_new_mapping *m = get_next_mapping(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001226
Joe Thornber991d9fa2011-10-31 20:21:18 +00001227 m->tc = tc;
Joe Thornber34fbcf62015-04-16 12:58:35 +01001228 m->virt_begin = virt_block;
1229 m->virt_end = virt_block + 1u;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001230 m->data_block = data_dest;
1231 m->cell = cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001232
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001233 /*
1234 * quiesce action + copy action + an extra reference held for the
1235 * duration of this function (we may need to inc later for a
1236 * partial zero).
1237 */
1238 atomic_set(&m->prepare_actions, 3);
1239
Mike Snitzer44feb382012-10-12 21:02:10 +01001240 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001241 complete_mapping_preparation(m); /* already quiesced */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001242
1243 /*
1244 * IO to pool_dev remaps to the pool target's data_dev.
1245 *
1246 * If the whole block of data is being overwritten, we can issue the
1247 * bio immediately. Otherwise we use kcopyd to clone the data first.
1248 */
Mike Snitzer452d7a62014-10-09 19:20:21 -04001249 if (io_overwrites_block(pool, bio))
1250 remap_and_issue_overwrite(tc, bio, data_dest, m);
1251 else {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001252 struct dm_io_region from, to;
1253
Joe Thornber2dd9c252012-03-28 18:41:28 +01001254 from.bdev = origin->bdev;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001255 from.sector = data_origin * pool->sectors_per_block;
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001256 from.count = len;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001257
1258 to.bdev = tc->pool_dev->bdev;
1259 to.sector = data_dest * pool->sectors_per_block;
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001260 to.count = len;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001261
1262 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1263 0, copy_complete, m);
1264 if (r < 0) {
Mike Snitzerc3977412012-12-21 20:23:34 +00001265 DMERR_LIMIT("dm_kcopyd_copy() failed");
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001266 copy_complete(1, 1, m);
1267
1268 /*
1269 * We allow the zero to be issued, to simplify the
1270 * error path. Otherwise we'd need to start
1271 * worrying about decrementing the prepare_actions
1272 * counter.
1273 */
1274 }
1275
1276 /*
1277 * Do we need to zero a tail region?
1278 */
1279 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1280 atomic_inc(&m->prepare_actions);
1281 ll_zero(tc, m,
1282 data_dest * pool->sectors_per_block + len,
1283 (data_dest + 1) * pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001284 }
1285 }
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001286
1287 complete_mapping_preparation(m); /* drop our ref */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001288}
1289
Joe Thornber2dd9c252012-03-28 18:41:28 +01001290static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1291 dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzera24c2562012-06-03 00:30:00 +01001292 struct dm_bio_prison_cell *cell, struct bio *bio)
Joe Thornber2dd9c252012-03-28 18:41:28 +01001293{
1294 schedule_copy(tc, virt_block, tc->pool_dev,
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001295 data_origin, data_dest, cell, bio,
1296 tc->pool->sectors_per_block);
Joe Thornber2dd9c252012-03-28 18:41:28 +01001297}
1298
Joe Thornber991d9fa2011-10-31 20:21:18 +00001299static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzera24c2562012-06-03 00:30:00 +01001300 dm_block_t data_block, struct dm_bio_prison_cell *cell,
Joe Thornber991d9fa2011-10-31 20:21:18 +00001301 struct bio *bio)
1302{
1303 struct pool *pool = tc->pool;
Mike Snitzera24c2562012-06-03 00:30:00 +01001304 struct dm_thin_new_mapping *m = get_next_mapping(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001305
Joe Thornber50f3c3e2014-06-13 13:57:09 +01001306 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001307 m->tc = tc;
Joe Thornber34fbcf62015-04-16 12:58:35 +01001308 m->virt_begin = virt_block;
1309 m->virt_end = virt_block + 1u;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001310 m->data_block = data_block;
1311 m->cell = cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001312
1313 /*
1314 * If the whole block of data is being overwritten or we are not
1315 * zeroing pre-existing data, we can issue the bio immediately.
1316 * Otherwise we use kcopyd to zero the data first.
1317 */
Mike Snitzerf8ae7522015-05-14 11:28:37 -04001318 if (pool->pf.zero_new_blocks) {
1319 if (io_overwrites_block(pool, bio))
1320 remap_and_issue_overwrite(tc, bio, data_block, m);
1321 else
1322 ll_zero(tc, m, data_block * pool->sectors_per_block,
1323 (data_block + 1) * pool->sectors_per_block);
1324 } else
Joe Thornber991d9fa2011-10-31 20:21:18 +00001325 process_prepared_mapping(m);
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001326}
Joe Thornber991d9fa2011-10-31 20:21:18 +00001327
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001328static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1329 dm_block_t data_dest,
1330 struct dm_bio_prison_cell *cell, struct bio *bio)
1331{
1332 struct pool *pool = tc->pool;
1333 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1334 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1335
1336 if (virt_block_end <= tc->origin_size)
1337 schedule_copy(tc, virt_block, tc->origin_dev,
1338 virt_block, data_dest, cell, bio,
1339 pool->sectors_per_block);
1340
1341 else if (virt_block_begin < tc->origin_size)
1342 schedule_copy(tc, virt_block, tc->origin_dev,
1343 virt_block, data_dest, cell, bio,
1344 tc->origin_size - virt_block_begin);
1345
1346 else
1347 schedule_zero(tc, virt_block, data_dest, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001348}
1349
Joe Thornber2c43fd22014-12-11 11:12:19 +00001350static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1351
1352static void check_for_space(struct pool *pool)
1353{
1354 int r;
1355 dm_block_t nr_free;
1356
1357 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1358 return;
1359
1360 r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1361 if (r)
1362 return;
1363
1364 if (nr_free)
1365 set_pool_mode(pool, PM_WRITE);
1366}
1367
Joe Thornbere49e5822012-07-27 15:08:16 +01001368/*
1369 * A non-zero return indicates read_only or fail_io mode.
1370 * Many callers don't care about the return value.
1371 */
Joe Thornber020cc3b2013-12-04 15:05:36 -05001372static int commit(struct pool *pool)
Joe Thornbere49e5822012-07-27 15:08:16 +01001373{
1374 int r;
1375
Joe Thornber8d07e8a2014-05-06 16:28:14 +01001376 if (get_pool_mode(pool) >= PM_READ_ONLY)
Joe Thornbere49e5822012-07-27 15:08:16 +01001377 return -EINVAL;
1378
Joe Thornber020cc3b2013-12-04 15:05:36 -05001379 r = dm_pool_commit_metadata(pool->pmd);
Joe Thornberb5330652013-12-04 19:51:33 -05001380 if (r)
1381 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
Joe Thornber2c43fd22014-12-11 11:12:19 +00001382 else
1383 check_for_space(pool);
Joe Thornbere49e5822012-07-27 15:08:16 +01001384
1385 return r;
1386}
1387
Joe Thornber88a66212013-12-04 20:16:12 -05001388static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1389{
1390 unsigned long flags;
1391
1392 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1393 DMWARN("%s: reached low water mark for data device: sending event.",
1394 dm_device_name(pool->pool_md));
1395 spin_lock_irqsave(&pool->lock, flags);
1396 pool->low_water_triggered = true;
1397 spin_unlock_irqrestore(&pool->lock, flags);
1398 dm_table_event(pool->ti->table);
1399 }
1400}
1401
Joe Thornber991d9fa2011-10-31 20:21:18 +00001402static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1403{
1404 int r;
1405 dm_block_t free_blocks;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001406 struct pool *pool = tc->pool;
1407
Joe Thornber3e1a0692014-03-03 16:03:26 +00001408 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
Joe Thornber8d30abf2013-12-04 19:16:11 -05001409 return -EINVAL;
1410
Joe Thornber991d9fa2011-10-31 20:21:18 +00001411 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
Joe Thornberb5330652013-12-04 19:51:33 -05001412 if (r) {
1413 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001414 return r;
Joe Thornberb5330652013-12-04 19:51:33 -05001415 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001416
Joe Thornber88a66212013-12-04 20:16:12 -05001417 check_low_water_mark(pool, free_blocks);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001418
1419 if (!free_blocks) {
Mike Snitzer94563ba2013-08-22 09:56:18 -04001420 /*
1421 * Try to commit to see if that will free up some
1422 * more space.
1423 */
Joe Thornber020cc3b2013-12-04 15:05:36 -05001424 r = commit(pool);
1425 if (r)
1426 return r;
Mike Snitzer94563ba2013-08-22 09:56:18 -04001427
1428 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
Joe Thornberb5330652013-12-04 19:51:33 -05001429 if (r) {
1430 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
Mike Snitzer94563ba2013-08-22 09:56:18 -04001431 return r;
Joe Thornberb5330652013-12-04 19:51:33 -05001432 }
Mike Snitzer94563ba2013-08-22 09:56:18 -04001433
Mike Snitzer94563ba2013-08-22 09:56:18 -04001434 if (!free_blocks) {
Joe Thornber3e1a0692014-03-03 16:03:26 +00001435 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001436 return -ENOSPC;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001437 }
1438 }
1439
1440 r = dm_pool_alloc_data_block(pool->pmd, result);
Mike Snitzer4a02b342013-12-03 12:20:57 -05001441 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05001442 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001443 return r;
Mike Snitzer4a02b342013-12-03 12:20:57 -05001444 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001445
1446 return 0;
1447}
1448
1449/*
1450 * If we have run out of space, queue bios until the device is
1451 * resumed, presumably after having been reloaded with more space.
1452 */
1453static void retry_on_resume(struct bio *bio)
1454{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001455 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01001456 struct thin_c *tc = h->tc;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001457 unsigned long flags;
1458
Mike Snitzerc140e1c2014-03-20 21:17:14 -04001459 spin_lock_irqsave(&tc->lock, flags);
1460 bio_list_add(&tc->retry_on_resume_list, bio);
1461 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001462}
1463
Mike Snitzeraf918052014-05-22 14:32:51 -04001464static int should_error_unserviceable_bio(struct pool *pool)
Joe Thornber3e1a0692014-03-03 16:03:26 +00001465{
1466 enum pool_mode m = get_pool_mode(pool);
1467
1468 switch (m) {
1469 case PM_WRITE:
1470 /* Shouldn't get here */
1471 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
Mike Snitzeraf918052014-05-22 14:32:51 -04001472 return -EIO;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001473
1474 case PM_OUT_OF_DATA_SPACE:
Mike Snitzeraf918052014-05-22 14:32:51 -04001475 return pool->pf.error_if_no_space ? -ENOSPC : 0;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001476
1477 case PM_READ_ONLY:
1478 case PM_FAIL:
Mike Snitzeraf918052014-05-22 14:32:51 -04001479 return -EIO;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001480 default:
1481 /* Shouldn't get here */
1482 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
Mike Snitzeraf918052014-05-22 14:32:51 -04001483 return -EIO;
Joe Thornber3e1a0692014-03-03 16:03:26 +00001484 }
1485}
1486
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001487static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1488{
Mike Snitzeraf918052014-05-22 14:32:51 -04001489 int error = should_error_unserviceable_bio(pool);
1490
1491 if (error)
1492 bio_endio(bio, error);
Mike Snitzer6d162022013-12-20 18:09:02 -05001493 else
1494 retry_on_resume(bio);
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001495}
1496
Mike Snitzer399cadd2013-12-05 16:03:33 -05001497static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001498{
1499 struct bio *bio;
1500 struct bio_list bios;
Mike Snitzeraf918052014-05-22 14:32:51 -04001501 int error;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001502
Mike Snitzeraf918052014-05-22 14:32:51 -04001503 error = should_error_unserviceable_bio(pool);
1504 if (error) {
1505 cell_error_with_code(pool, cell, error);
Joe Thornber3e1a0692014-03-03 16:03:26 +00001506 return;
1507 }
1508
Joe Thornber991d9fa2011-10-31 20:21:18 +00001509 bio_list_init(&bios);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001510 cell_release(pool, cell, &bios);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001511
Mike Snitzer9d094ee2014-10-19 08:23:09 -04001512 while ((bio = bio_list_pop(&bios)))
1513 retry_on_resume(bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001514}
1515
Joe Thornber34fbcf62015-04-16 12:58:35 +01001516static void process_discard_cell_no_passdown(struct thin_c *tc,
1517 struct dm_bio_prison_cell *virt_cell)
Joe Thornber104655f2012-03-28 18:41:28 +01001518{
Joe Thornber104655f2012-03-28 18:41:28 +01001519 struct pool *pool = tc->pool;
Joe Thornber34fbcf62015-04-16 12:58:35 +01001520 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1521
1522 /*
1523 * We don't need to lock the data blocks, since there's no
1524 * passdown. We only lock data blocks for allocation and breaking sharing.
1525 */
1526 m->tc = tc;
1527 m->virt_begin = virt_cell->key.block_begin;
1528 m->virt_end = virt_cell->key.block_end;
1529 m->cell = virt_cell;
1530 m->bio = virt_cell->holder;
1531
1532 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1533 pool->process_prepared_discard(m);
1534}
1535
1536/*
1537 * FIXME: DM local hack to defer parent bios's end_io until we
1538 * _know_ all chained sub range discard bios have completed.
1539 * Will go away once late bio splitting lands upstream!
1540 */
1541static inline void __bio_inc_remaining(struct bio *bio)
1542{
1543 bio->bi_flags |= (1 << BIO_CHAIN);
1544 smp_mb__before_atomic();
1545 atomic_inc(&bio->__bi_remaining);
1546}
1547
1548static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1549 struct bio *bio)
1550{
1551 struct pool *pool = tc->pool;
1552
1553 int r;
1554 bool maybe_shared;
1555 struct dm_cell_key data_key;
1556 struct dm_bio_prison_cell *data_cell;
Mike Snitzera24c2562012-06-03 00:30:00 +01001557 struct dm_thin_new_mapping *m;
Joe Thornber34fbcf62015-04-16 12:58:35 +01001558 dm_block_t virt_begin, virt_end, data_begin;
Joe Thornber104655f2012-03-28 18:41:28 +01001559
Joe Thornber34fbcf62015-04-16 12:58:35 +01001560 while (begin != end) {
1561 r = ensure_next_mapping(pool);
1562 if (r)
1563 /* we did our best */
1564 return;
Joe Thornber104655f2012-03-28 18:41:28 +01001565
Joe Thornber34fbcf62015-04-16 12:58:35 +01001566 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1567 &data_begin, &maybe_shared);
1568 if (r)
1569 /*
1570 * Silently fail, letting any mappings we've
1571 * created complete.
1572 */
Joe Thornber104655f2012-03-28 18:41:28 +01001573 break;
Joe Thornber34fbcf62015-04-16 12:58:35 +01001574
1575 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
1576 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1577 /* contention, we'll give up with this range */
1578 begin = virt_end;
1579 continue;
Joe Thornber104655f2012-03-28 18:41:28 +01001580 }
1581
Joe Thornber104655f2012-03-28 18:41:28 +01001582 /*
Joe Thornber34fbcf62015-04-16 12:58:35 +01001583 * IO may still be going to the destination block. We must
1584 * quiesce before we can do the removal.
Joe Thornber104655f2012-03-28 18:41:28 +01001585 */
Joe Thornber34fbcf62015-04-16 12:58:35 +01001586 m = get_next_mapping(pool);
1587 m->tc = tc;
1588 m->maybe_shared = maybe_shared;
1589 m->virt_begin = virt_begin;
1590 m->virt_end = virt_end;
1591 m->data_block = data_begin;
1592 m->cell = data_cell;
1593 m->bio = bio;
Joe Thornber104655f2012-03-28 18:41:28 +01001594
Joe Thornber34fbcf62015-04-16 12:58:35 +01001595 /*
1596 * The parent bio must not complete before sub discard bios are
1597 * chained to it (see __blkdev_issue_discard_async's bio_chain)!
1598 *
1599 * This per-mapping bi_remaining increment is paired with
1600 * the implicit decrement that occurs via bio_endio() in
1601 * process_prepared_discard_{passdown,no_passdown}.
1602 */
1603 __bio_inc_remaining(bio);
1604 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1605 pool->process_prepared_discard(m);
1606
1607 begin = virt_end;
Joe Thornber104655f2012-03-28 18:41:28 +01001608 }
1609}
1610
Joe Thornber34fbcf62015-04-16 12:58:35 +01001611static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1612{
1613 struct bio *bio = virt_cell->holder;
1614 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1615
1616 /*
1617 * The virt_cell will only get freed once the origin bio completes.
1618 * This means it will remain locked while all the individual
1619 * passdown bios are in flight.
1620 */
1621 h->cell = virt_cell;
1622 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1623
1624 /*
1625 * We complete the bio now, knowing that the bi_remaining field
1626 * will prevent completion until the sub range discards have
1627 * completed.
1628 */
1629 bio_endio(bio, 0);
1630}
1631
Joe Thornbera374bb22014-10-10 13:43:14 +01001632static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1633{
Joe Thornber34fbcf62015-04-16 12:58:35 +01001634 dm_block_t begin, end;
1635 struct dm_cell_key virt_key;
1636 struct dm_bio_prison_cell *virt_cell;
Joe Thornbera374bb22014-10-10 13:43:14 +01001637
Joe Thornber34fbcf62015-04-16 12:58:35 +01001638 get_bio_block_range(tc, bio, &begin, &end);
1639 if (begin == end) {
1640 /*
1641 * The discard covers less than a block.
1642 */
1643 bio_endio(bio, 0);
1644 return;
1645 }
1646
1647 build_key(tc->td, VIRTUAL, begin, end, &virt_key);
1648 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1649 /*
1650 * Potential starvation issue: We're relying on the
1651 * fs/application being well behaved, and not trying to
1652 * send IO to a region at the same time as discarding it.
1653 * If they do this persistently then it's possible this
1654 * cell will never be granted.
1655 */
Joe Thornbera374bb22014-10-10 13:43:14 +01001656 return;
1657
Joe Thornber34fbcf62015-04-16 12:58:35 +01001658 tc->pool->process_discard_cell(tc, virt_cell);
Joe Thornbera374bb22014-10-10 13:43:14 +01001659}
1660
Joe Thornber991d9fa2011-10-31 20:21:18 +00001661static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer44feb382012-10-12 21:02:10 +01001662 struct dm_cell_key *key,
Joe Thornber991d9fa2011-10-31 20:21:18 +00001663 struct dm_thin_lookup_result *lookup_result,
Mike Snitzera24c2562012-06-03 00:30:00 +01001664 struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001665{
1666 int r;
1667 dm_block_t data_block;
Mike Snitzerd6fc2042013-08-21 17:40:11 -04001668 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001669
1670 r = alloc_data_block(tc, &data_block);
1671 switch (r) {
1672 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001673 schedule_internal_copy(tc, block, lookup_result->block,
1674 data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001675 break;
1676
1677 case -ENOSPC:
Mike Snitzer399cadd2013-12-05 16:03:33 -05001678 retry_bios_on_resume(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001679 break;
1680
1681 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001682 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1683 __func__, r);
Mike Snitzerd6fc2042013-08-21 17:40:11 -04001684 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001685 break;
1686 }
1687}
1688
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001689static void __remap_and_issue_shared_cell(void *context,
1690 struct dm_bio_prison_cell *cell)
1691{
1692 struct remap_info *info = context;
1693 struct bio *bio;
1694
1695 while ((bio = bio_list_pop(&cell->bios))) {
1696 if ((bio_data_dir(bio) == WRITE) ||
1697 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1698 bio_list_add(&info->defer_bios, bio);
1699 else {
1700 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1701
1702 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1703 inc_all_io_entry(info->tc->pool, bio);
1704 bio_list_add(&info->issue_bios, bio);
1705 }
1706 }
1707}
1708
1709static void remap_and_issue_shared_cell(struct thin_c *tc,
1710 struct dm_bio_prison_cell *cell,
1711 dm_block_t block)
1712{
1713 struct bio *bio;
1714 struct remap_info info;
1715
1716 info.tc = tc;
1717 bio_list_init(&info.defer_bios);
1718 bio_list_init(&info.issue_bios);
1719
1720 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1721 &info, cell);
1722
1723 while ((bio = bio_list_pop(&info.defer_bios)))
1724 thin_defer_bio(tc, bio);
1725
1726 while ((bio = bio_list_pop(&info.issue_bios)))
1727 remap_and_issue(tc, bio, block);
1728}
1729
Joe Thornber991d9fa2011-10-31 20:21:18 +00001730static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1731 dm_block_t block,
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001732 struct dm_thin_lookup_result *lookup_result,
1733 struct dm_bio_prison_cell *virt_cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001734{
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001735 struct dm_bio_prison_cell *data_cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001736 struct pool *pool = tc->pool;
Mike Snitzer44feb382012-10-12 21:02:10 +01001737 struct dm_cell_key key;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001738
1739 /*
1740 * If cell is already occupied, then sharing is already in the process
1741 * of being broken so we have nothing further to do here.
1742 */
1743 build_data_key(tc->td, lookup_result->block, &key);
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001744 if (bio_detain(pool, &key, bio, &data_cell)) {
1745 cell_defer_no_holder(tc, virt_cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001746 return;
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001747 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001748
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001749 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1750 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1751 cell_defer_no_holder(tc, virt_cell);
1752 } else {
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001753 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornber991d9fa2011-10-31 20:21:18 +00001754
Mike Snitzer44feb382012-10-12 21:02:10 +01001755 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
Joe Thornbere8088072012-12-21 20:23:31 +00001756 inc_all_io_entry(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001757 remap_and_issue(tc, bio, lookup_result->block);
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001758
1759 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1760 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001761 }
1762}
1763
1764static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzera24c2562012-06-03 00:30:00 +01001765 struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001766{
1767 int r;
1768 dm_block_t data_block;
Joe Thornber6beca5e2013-03-01 22:45:50 +00001769 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001770
1771 /*
1772 * Remap empty bios (flushes) immediately, without provisioning.
1773 */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001774 if (!bio->bi_iter.bi_size) {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001775 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001776 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001777
Joe Thornber991d9fa2011-10-31 20:21:18 +00001778 remap_and_issue(tc, bio, 0);
1779 return;
1780 }
1781
1782 /*
1783 * Fill read bios with zeroes and complete them immediately.
1784 */
1785 if (bio_data_dir(bio) == READ) {
1786 zero_fill_bio(bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001787 cell_defer_no_holder(tc, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001788 bio_endio(bio, 0);
1789 return;
1790 }
1791
1792 r = alloc_data_block(tc, &data_block);
1793 switch (r) {
1794 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001795 if (tc->origin_dev)
1796 schedule_external_copy(tc, block, data_block, cell, bio);
1797 else
1798 schedule_zero(tc, block, data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001799 break;
1800
1801 case -ENOSPC:
Mike Snitzer399cadd2013-12-05 16:03:33 -05001802 retry_bios_on_resume(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001803 break;
1804
1805 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001806 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1807 __func__, r);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001808 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001809 break;
1810 }
1811}
1812
Joe Thornbera374bb22014-10-10 13:43:14 +01001813static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001814{
1815 int r;
Joe Thornber6beca5e2013-03-01 22:45:50 +00001816 struct pool *pool = tc->pool;
Joe Thornbera374bb22014-10-10 13:43:14 +01001817 struct bio *bio = cell->holder;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001818 dm_block_t block = get_bio_block(tc, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001819 struct dm_thin_lookup_result lookup_result;
1820
Joe Thornbera374bb22014-10-10 13:43:14 +01001821 if (tc->requeue_mode) {
1822 cell_requeue(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001823 return;
Joe Thornbera374bb22014-10-10 13:43:14 +01001824 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001825
1826 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1827 switch (r) {
1828 case 0:
Joe Thornber23ca2bb2014-10-15 14:46:58 +01001829 if (lookup_result.shared)
1830 process_shared_bio(tc, bio, block, &lookup_result, cell);
1831 else {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001832 inc_all_io_entry(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001833 remap_and_issue(tc, bio, lookup_result.block);
Joe Thornbera374bb22014-10-10 13:43:14 +01001834 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
Joe Thornbere8088072012-12-21 20:23:31 +00001835 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001836 break;
1837
1838 case -ENODATA:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001839 if (bio_data_dir(bio) == READ && tc->origin_dev) {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001840 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001841 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001842
Joe Thornbere5aea7b2014-06-13 14:47:24 +01001843 if (bio_end_sector(bio) <= tc->origin_size)
1844 remap_to_origin_and_issue(tc, bio);
1845
1846 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1847 zero_fill_bio(bio);
1848 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1849 remap_to_origin_and_issue(tc, bio);
1850
1851 } else {
1852 zero_fill_bio(bio);
1853 bio_endio(bio, 0);
1854 }
Joe Thornber2dd9c252012-03-28 18:41:28 +01001855 } else
1856 provision_block(tc, bio, block, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001857 break;
1858
1859 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001860 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1861 __func__, r);
Joe Thornberf286ba02012-12-21 20:23:33 +00001862 cell_defer_no_holder(tc, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001863 bio_io_error(bio);
1864 break;
1865 }
1866}
1867
Joe Thornbera374bb22014-10-10 13:43:14 +01001868static void process_bio(struct thin_c *tc, struct bio *bio)
1869{
1870 struct pool *pool = tc->pool;
1871 dm_block_t block = get_bio_block(tc, bio);
1872 struct dm_bio_prison_cell *cell;
1873 struct dm_cell_key key;
1874
1875 /*
1876 * If cell is already occupied, then the block is already
1877 * being provisioned so we have nothing further to do here.
1878 */
1879 build_virtual_key(tc->td, block, &key);
1880 if (bio_detain(pool, &key, bio, &cell))
1881 return;
1882
1883 process_cell(tc, cell);
1884}
1885
1886static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1887 struct dm_bio_prison_cell *cell)
Joe Thornbere49e5822012-07-27 15:08:16 +01001888{
1889 int r;
1890 int rw = bio_data_dir(bio);
1891 dm_block_t block = get_bio_block(tc, bio);
1892 struct dm_thin_lookup_result lookup_result;
1893
1894 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1895 switch (r) {
1896 case 0:
Joe Thornbera374bb22014-10-10 13:43:14 +01001897 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001898 handle_unserviceable_bio(tc->pool, bio);
Joe Thornbera374bb22014-10-10 13:43:14 +01001899 if (cell)
1900 cell_defer_no_holder(tc, cell);
1901 } else {
Joe Thornbere8088072012-12-21 20:23:31 +00001902 inc_all_io_entry(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001903 remap_and_issue(tc, bio, lookup_result.block);
Joe Thornbera374bb22014-10-10 13:43:14 +01001904 if (cell)
1905 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
Joe Thornbere8088072012-12-21 20:23:31 +00001906 }
Joe Thornbere49e5822012-07-27 15:08:16 +01001907 break;
1908
1909 case -ENODATA:
Joe Thornbera374bb22014-10-10 13:43:14 +01001910 if (cell)
1911 cell_defer_no_holder(tc, cell);
Joe Thornbere49e5822012-07-27 15:08:16 +01001912 if (rw != READ) {
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001913 handle_unserviceable_bio(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001914 break;
1915 }
1916
1917 if (tc->origin_dev) {
Joe Thornbere8088072012-12-21 20:23:31 +00001918 inc_all_io_entry(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001919 remap_to_origin_and_issue(tc, bio);
1920 break;
1921 }
1922
1923 zero_fill_bio(bio);
1924 bio_endio(bio, 0);
1925 break;
1926
1927 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001928 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1929 __func__, r);
Joe Thornbera374bb22014-10-10 13:43:14 +01001930 if (cell)
1931 cell_defer_no_holder(tc, cell);
Joe Thornbere49e5822012-07-27 15:08:16 +01001932 bio_io_error(bio);
1933 break;
1934 }
1935}
1936
Joe Thornbera374bb22014-10-10 13:43:14 +01001937static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1938{
1939 __process_bio_read_only(tc, bio, NULL);
1940}
1941
1942static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1943{
1944 __process_bio_read_only(tc, cell->holder, cell);
1945}
1946
Joe Thornber3e1a0692014-03-03 16:03:26 +00001947static void process_bio_success(struct thin_c *tc, struct bio *bio)
1948{
1949 bio_endio(bio, 0);
1950}
1951
Joe Thornbere49e5822012-07-27 15:08:16 +01001952static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1953{
1954 bio_io_error(bio);
1955}
1956
Joe Thornbera374bb22014-10-10 13:43:14 +01001957static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1958{
1959 cell_success(tc->pool, cell);
1960}
1961
1962static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1963{
1964 cell_error(tc->pool, cell);
1965}
1966
Joe Thornberac8c3f32013-05-10 14:37:21 +01001967/*
1968 * FIXME: should we also commit due to size of transaction, measured in
1969 * metadata blocks?
1970 */
Joe Thornber905e51b2012-03-28 18:41:27 +01001971static int need_commit_due_to_time(struct pool *pool)
1972{
Manuel Schölling0f30af92014-05-22 22:42:37 +02001973 return !time_in_range(jiffies, pool->last_commit_jiffies,
1974 pool->last_commit_jiffies + COMMIT_PERIOD);
Joe Thornber905e51b2012-03-28 18:41:27 +01001975}
1976
Mike Snitzer67324ea2014-03-21 18:33:41 -04001977#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1978#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1979
1980static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1981{
1982 struct rb_node **rbp, *parent;
1983 struct dm_thin_endio_hook *pbd;
1984 sector_t bi_sector = bio->bi_iter.bi_sector;
1985
1986 rbp = &tc->sort_bio_list.rb_node;
1987 parent = NULL;
1988 while (*rbp) {
1989 parent = *rbp;
1990 pbd = thin_pbd(parent);
1991
1992 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1993 rbp = &(*rbp)->rb_left;
1994 else
1995 rbp = &(*rbp)->rb_right;
1996 }
1997
1998 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1999 rb_link_node(&pbd->rb_node, parent, rbp);
2000 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
2001}
2002
2003static void __extract_sorted_bios(struct thin_c *tc)
2004{
2005 struct rb_node *node;
2006 struct dm_thin_endio_hook *pbd;
2007 struct bio *bio;
2008
2009 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
2010 pbd = thin_pbd(node);
2011 bio = thin_bio(pbd);
2012
2013 bio_list_add(&tc->deferred_bio_list, bio);
2014 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
2015 }
2016
2017 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
2018}
2019
2020static void __sort_thin_deferred_bios(struct thin_c *tc)
2021{
2022 struct bio *bio;
2023 struct bio_list bios;
2024
2025 bio_list_init(&bios);
2026 bio_list_merge(&bios, &tc->deferred_bio_list);
2027 bio_list_init(&tc->deferred_bio_list);
2028
2029 /* Sort deferred_bio_list using rb-tree */
2030 while ((bio = bio_list_pop(&bios)))
2031 __thin_bio_rb_add(tc, bio);
2032
2033 /*
2034 * Transfer the sorted bios in sort_bio_list back to
2035 * deferred_bio_list to allow lockless submission of
2036 * all bios.
2037 */
2038 __extract_sorted_bios(tc);
2039}
2040
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002041static void process_thin_deferred_bios(struct thin_c *tc)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002042{
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002043 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002044 unsigned long flags;
2045 struct bio *bio;
2046 struct bio_list bios;
Mike Snitzer67324ea2014-03-21 18:33:41 -04002047 struct blk_plug plug;
Joe Thornber8a01a6a2014-10-06 15:28:30 +01002048 unsigned count = 0;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002049
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002050 if (tc->requeue_mode) {
Mike Snitzer42d6a8c2014-10-19 07:52:44 -04002051 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002052 return;
2053 }
2054
Joe Thornber991d9fa2011-10-31 20:21:18 +00002055 bio_list_init(&bios);
2056
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002057 spin_lock_irqsave(&tc->lock, flags);
Mike Snitzer67324ea2014-03-21 18:33:41 -04002058
2059 if (bio_list_empty(&tc->deferred_bio_list)) {
2060 spin_unlock_irqrestore(&tc->lock, flags);
2061 return;
2062 }
2063
2064 __sort_thin_deferred_bios(tc);
2065
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002066 bio_list_merge(&bios, &tc->deferred_bio_list);
2067 bio_list_init(&tc->deferred_bio_list);
Mike Snitzer67324ea2014-03-21 18:33:41 -04002068
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002069 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002070
Mike Snitzer67324ea2014-03-21 18:33:41 -04002071 blk_start_plug(&plug);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002072 while ((bio = bio_list_pop(&bios))) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00002073 /*
2074 * If we've got no free new_mapping structs, and processing
2075 * this bio might require one, we pause until there are some
2076 * prepared mappings to process.
2077 */
2078 if (ensure_next_mapping(pool)) {
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002079 spin_lock_irqsave(&tc->lock, flags);
2080 bio_list_add(&tc->deferred_bio_list, bio);
2081 bio_list_merge(&tc->deferred_bio_list, &bios);
2082 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002083 break;
2084 }
Joe Thornber104655f2012-03-28 18:41:28 +01002085
2086 if (bio->bi_rw & REQ_DISCARD)
Joe Thornbere49e5822012-07-27 15:08:16 +01002087 pool->process_discard(tc, bio);
Joe Thornber104655f2012-03-28 18:41:28 +01002088 else
Joe Thornbere49e5822012-07-27 15:08:16 +01002089 pool->process_bio(tc, bio);
Joe Thornber8a01a6a2014-10-06 15:28:30 +01002090
2091 if ((count++ & 127) == 0) {
Joe Thornber7d327fe2014-10-06 15:45:59 +01002092 throttle_work_update(&pool->throttle);
Joe Thornber8a01a6a2014-10-06 15:28:30 +01002093 dm_pool_issue_prefetches(pool->pmd);
2094 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002095 }
Mike Snitzer67324ea2014-03-21 18:33:41 -04002096 blk_finish_plug(&plug);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002097}
2098
Joe Thornberac4c3f32014-10-10 16:42:10 +01002099static int cmp_cells(const void *lhs, const void *rhs)
2100{
2101 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
2102 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
2103
2104 BUG_ON(!lhs_cell->holder);
2105 BUG_ON(!rhs_cell->holder);
2106
2107 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
2108 return -1;
2109
2110 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
2111 return 1;
2112
2113 return 0;
2114}
2115
2116static unsigned sort_cells(struct pool *pool, struct list_head *cells)
2117{
2118 unsigned count = 0;
2119 struct dm_bio_prison_cell *cell, *tmp;
2120
2121 list_for_each_entry_safe(cell, tmp, cells, user_list) {
2122 if (count >= CELL_SORT_ARRAY_SIZE)
2123 break;
2124
2125 pool->cell_sort_array[count++] = cell;
2126 list_del(&cell->user_list);
2127 }
2128
2129 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2130
2131 return count;
2132}
2133
Joe Thornbera374bb22014-10-10 13:43:14 +01002134static void process_thin_deferred_cells(struct thin_c *tc)
2135{
2136 struct pool *pool = tc->pool;
2137 unsigned long flags;
2138 struct list_head cells;
Joe Thornberac4c3f32014-10-10 16:42:10 +01002139 struct dm_bio_prison_cell *cell;
2140 unsigned i, j, count;
Joe Thornbera374bb22014-10-10 13:43:14 +01002141
2142 INIT_LIST_HEAD(&cells);
2143
2144 spin_lock_irqsave(&tc->lock, flags);
2145 list_splice_init(&tc->deferred_cells, &cells);
2146 spin_unlock_irqrestore(&tc->lock, flags);
2147
2148 if (list_empty(&cells))
2149 return;
2150
Joe Thornberac4c3f32014-10-10 16:42:10 +01002151 do {
2152 count = sort_cells(tc->pool, &cells);
Joe Thornbera374bb22014-10-10 13:43:14 +01002153
Joe Thornberac4c3f32014-10-10 16:42:10 +01002154 for (i = 0; i < count; i++) {
2155 cell = pool->cell_sort_array[i];
2156 BUG_ON(!cell->holder);
2157
2158 /*
2159 * If we've got no free new_mapping structs, and processing
2160 * this bio might require one, we pause until there are some
2161 * prepared mappings to process.
2162 */
2163 if (ensure_next_mapping(pool)) {
2164 for (j = i; j < count; j++)
2165 list_add(&pool->cell_sort_array[j]->user_list, &cells);
2166
2167 spin_lock_irqsave(&tc->lock, flags);
2168 list_splice(&cells, &tc->deferred_cells);
2169 spin_unlock_irqrestore(&tc->lock, flags);
2170 return;
2171 }
2172
2173 if (cell->holder->bi_rw & REQ_DISCARD)
2174 pool->process_discard_cell(tc, cell);
2175 else
2176 pool->process_cell(tc, cell);
Joe Thornbera374bb22014-10-10 13:43:14 +01002177 }
Joe Thornberac4c3f32014-10-10 16:42:10 +01002178 } while (!list_empty(&cells));
Joe Thornbera374bb22014-10-10 13:43:14 +01002179}
2180
Joe Thornberb10ebd32014-04-08 11:29:01 +01002181static void thin_get(struct thin_c *tc);
2182static void thin_put(struct thin_c *tc);
2183
2184/*
2185 * We can't hold rcu_read_lock() around code that can block. So we
2186 * find a thin with the rcu lock held; bump a refcount; then drop
2187 * the lock.
2188 */
2189static struct thin_c *get_first_thin(struct pool *pool)
2190{
2191 struct thin_c *tc = NULL;
2192
2193 rcu_read_lock();
2194 if (!list_empty(&pool->active_thins)) {
2195 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2196 thin_get(tc);
2197 }
2198 rcu_read_unlock();
2199
2200 return tc;
2201}
2202
2203static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2204{
2205 struct thin_c *old_tc = tc;
2206
2207 rcu_read_lock();
2208 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2209 thin_get(tc);
2210 thin_put(old_tc);
2211 rcu_read_unlock();
2212 return tc;
2213 }
2214 thin_put(old_tc);
2215 rcu_read_unlock();
2216
2217 return NULL;
2218}
2219
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002220static void process_deferred_bios(struct pool *pool)
2221{
2222 unsigned long flags;
2223 struct bio *bio;
2224 struct bio_list bios;
2225 struct thin_c *tc;
2226
Joe Thornberb10ebd32014-04-08 11:29:01 +01002227 tc = get_first_thin(pool);
2228 while (tc) {
Joe Thornbera374bb22014-10-10 13:43:14 +01002229 process_thin_deferred_cells(tc);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002230 process_thin_deferred_bios(tc);
Joe Thornberb10ebd32014-04-08 11:29:01 +01002231 tc = get_next_thin(pool, tc);
2232 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002233
2234 /*
2235 * If there are any deferred flush bios, we must commit
2236 * the metadata before issuing them.
2237 */
2238 bio_list_init(&bios);
2239 spin_lock_irqsave(&pool->lock, flags);
2240 bio_list_merge(&bios, &pool->deferred_flush_bios);
2241 bio_list_init(&pool->deferred_flush_bios);
2242 spin_unlock_irqrestore(&pool->lock, flags);
2243
Mike Snitzer4d1662a2014-02-06 06:08:56 -05002244 if (bio_list_empty(&bios) &&
2245 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
Joe Thornber991d9fa2011-10-31 20:21:18 +00002246 return;
2247
Joe Thornber020cc3b2013-12-04 15:05:36 -05002248 if (commit(pool)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00002249 while ((bio = bio_list_pop(&bios)))
2250 bio_io_error(bio);
2251 return;
2252 }
Joe Thornber905e51b2012-03-28 18:41:27 +01002253 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002254
2255 while ((bio = bio_list_pop(&bios)))
2256 generic_make_request(bio);
2257}
2258
2259static void do_worker(struct work_struct *ws)
2260{
2261 struct pool *pool = container_of(ws, struct pool, worker);
2262
Joe Thornber7d327fe2014-10-06 15:45:59 +01002263 throttle_work_start(&pool->throttle);
Joe Thornber8a01a6a2014-10-06 15:28:30 +01002264 dm_pool_issue_prefetches(pool->pmd);
Joe Thornber7d327fe2014-10-06 15:45:59 +01002265 throttle_work_update(&pool->throttle);
Joe Thornbere49e5822012-07-27 15:08:16 +01002266 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
Joe Thornber7d327fe2014-10-06 15:45:59 +01002267 throttle_work_update(&pool->throttle);
Joe Thornbere49e5822012-07-27 15:08:16 +01002268 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
Joe Thornber7d327fe2014-10-06 15:45:59 +01002269 throttle_work_update(&pool->throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002270 process_deferred_bios(pool);
Joe Thornber7d327fe2014-10-06 15:45:59 +01002271 throttle_work_complete(&pool->throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002272}
2273
Joe Thornber905e51b2012-03-28 18:41:27 +01002274/*
2275 * We want to commit periodically so that not too much
2276 * unwritten data builds up.
2277 */
2278static void do_waker(struct work_struct *ws)
2279{
2280 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2281 wake_worker(pool);
2282 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2283}
2284
Joe Thornber85ad643b2014-05-09 15:59:38 +01002285/*
2286 * We're holding onto IO to allow userland time to react. After the
2287 * timeout either the pool will have been resized (and thus back in
2288 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
2289 */
2290static void do_no_space_timeout(struct work_struct *ws)
2291{
2292 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2293 no_space_timeout);
2294
2295 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
2296 set_pool_mode(pool, PM_READ_ONLY);
2297}
2298
Joe Thornber991d9fa2011-10-31 20:21:18 +00002299/*----------------------------------------------------------------*/
2300
Joe Thornbere7a3e872014-05-13 16:14:14 -04002301struct pool_work {
Joe Thornber738211f2014-03-03 15:52:28 +00002302 struct work_struct worker;
Joe Thornbere7a3e872014-05-13 16:14:14 -04002303 struct completion complete;
Joe Thornber738211f2014-03-03 15:52:28 +00002304};
2305
Joe Thornbere7a3e872014-05-13 16:14:14 -04002306static struct pool_work *to_pool_work(struct work_struct *ws)
Joe Thornber738211f2014-03-03 15:52:28 +00002307{
Joe Thornbere7a3e872014-05-13 16:14:14 -04002308 return container_of(ws, struct pool_work, worker);
2309}
2310
2311static void pool_work_complete(struct pool_work *pw)
2312{
2313 complete(&pw->complete);
2314}
2315
2316static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2317 void (*fn)(struct work_struct *))
2318{
2319 INIT_WORK_ONSTACK(&pw->worker, fn);
2320 init_completion(&pw->complete);
2321 queue_work(pool->wq, &pw->worker);
2322 wait_for_completion(&pw->complete);
2323}
2324
2325/*----------------------------------------------------------------*/
2326
2327struct noflush_work {
2328 struct pool_work pw;
2329 struct thin_c *tc;
2330};
2331
2332static struct noflush_work *to_noflush(struct work_struct *ws)
2333{
2334 return container_of(to_pool_work(ws), struct noflush_work, pw);
Joe Thornber738211f2014-03-03 15:52:28 +00002335}
2336
2337static void do_noflush_start(struct work_struct *ws)
2338{
Joe Thornbere7a3e872014-05-13 16:14:14 -04002339 struct noflush_work *w = to_noflush(ws);
Joe Thornber738211f2014-03-03 15:52:28 +00002340 w->tc->requeue_mode = true;
2341 requeue_io(w->tc);
Joe Thornbere7a3e872014-05-13 16:14:14 -04002342 pool_work_complete(&w->pw);
Joe Thornber738211f2014-03-03 15:52:28 +00002343}
2344
2345static void do_noflush_stop(struct work_struct *ws)
2346{
Joe Thornbere7a3e872014-05-13 16:14:14 -04002347 struct noflush_work *w = to_noflush(ws);
Joe Thornber738211f2014-03-03 15:52:28 +00002348 w->tc->requeue_mode = false;
Joe Thornbere7a3e872014-05-13 16:14:14 -04002349 pool_work_complete(&w->pw);
Joe Thornber738211f2014-03-03 15:52:28 +00002350}
2351
2352static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2353{
2354 struct noflush_work w;
2355
Joe Thornber738211f2014-03-03 15:52:28 +00002356 w.tc = tc;
Joe Thornbere7a3e872014-05-13 16:14:14 -04002357 pool_work_wait(&w.pw, tc->pool, fn);
Joe Thornber738211f2014-03-03 15:52:28 +00002358}
2359
2360/*----------------------------------------------------------------*/
2361
Joe Thornbere49e5822012-07-27 15:08:16 +01002362static enum pool_mode get_pool_mode(struct pool *pool)
2363{
2364 return pool->pf.mode;
2365}
2366
Joe Thornber3e1a0692014-03-03 16:03:26 +00002367static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2368{
2369 dm_table_event(pool->ti->table);
2370 DMINFO("%s: switching pool to %s mode",
2371 dm_device_name(pool->pool_md), new_mode);
2372}
2373
Joe Thornber34fbcf62015-04-16 12:58:35 +01002374static bool passdown_enabled(struct pool_c *pt)
2375{
2376 return pt->adjusted_pf.discard_passdown;
2377}
2378
2379static void set_discard_callbacks(struct pool *pool)
2380{
2381 struct pool_c *pt = pool->ti->private;
2382
2383 if (passdown_enabled(pt)) {
2384 pool->process_discard_cell = process_discard_cell_passdown;
2385 pool->process_prepared_discard = process_prepared_discard_passdown;
2386 } else {
2387 pool->process_discard_cell = process_discard_cell_no_passdown;
2388 pool->process_prepared_discard = process_prepared_discard_no_passdown;
2389 }
2390}
2391
Mike Snitzer8b64e882013-12-20 14:27:28 -05002392static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
Joe Thornbere49e5822012-07-27 15:08:16 +01002393{
Mike Snitzercdc2b412014-02-14 18:10:55 -05002394 struct pool_c *pt = pool->ti->private;
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002395 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2396 enum pool_mode old_mode = get_pool_mode(pool);
Mike Snitzer80c57892014-05-20 13:38:33 -04002397 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002398
2399 /*
2400 * Never allow the pool to transition to PM_WRITE mode if user
2401 * intervention is required to verify metadata and data consistency.
2402 */
2403 if (new_mode == PM_WRITE && needs_check) {
2404 DMERR("%s: unable to switch pool to write mode until repaired.",
2405 dm_device_name(pool->pool_md));
2406 if (old_mode != new_mode)
2407 new_mode = old_mode;
2408 else
2409 new_mode = PM_READ_ONLY;
2410 }
2411 /*
2412 * If we were in PM_FAIL mode, rollback of metadata failed. We're
2413 * not going to recover without a thin_repair. So we never let the
2414 * pool move out of the old mode.
2415 */
2416 if (old_mode == PM_FAIL)
2417 new_mode = old_mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01002418
Mike Snitzer8b64e882013-12-20 14:27:28 -05002419 switch (new_mode) {
Joe Thornbere49e5822012-07-27 15:08:16 +01002420 case PM_FAIL:
Mike Snitzer8b64e882013-12-20 14:27:28 -05002421 if (old_mode != new_mode)
Joe Thornber3e1a0692014-03-03 16:03:26 +00002422 notify_of_pool_mode_change(pool, "failure");
Joe Thornber5383ef32013-12-04 16:30:01 -05002423 dm_pool_metadata_read_only(pool->pmd);
Joe Thornbere49e5822012-07-27 15:08:16 +01002424 pool->process_bio = process_bio_fail;
2425 pool->process_discard = process_bio_fail;
Joe Thornbera374bb22014-10-10 13:43:14 +01002426 pool->process_cell = process_cell_fail;
2427 pool->process_discard_cell = process_cell_fail;
Joe Thornbere49e5822012-07-27 15:08:16 +01002428 pool->process_prepared_mapping = process_prepared_mapping_fail;
2429 pool->process_prepared_discard = process_prepared_discard_fail;
Joe Thornber3e1a0692014-03-03 16:03:26 +00002430
2431 error_retry_list(pool);
Joe Thornbere49e5822012-07-27 15:08:16 +01002432 break;
2433
2434 case PM_READ_ONLY:
Mike Snitzer8b64e882013-12-20 14:27:28 -05002435 if (old_mode != new_mode)
Joe Thornber3e1a0692014-03-03 16:03:26 +00002436 notify_of_pool_mode_change(pool, "read-only");
2437 dm_pool_metadata_read_only(pool->pmd);
2438 pool->process_bio = process_bio_read_only;
2439 pool->process_discard = process_bio_success;
Joe Thornbera374bb22014-10-10 13:43:14 +01002440 pool->process_cell = process_cell_read_only;
2441 pool->process_discard_cell = process_cell_success;
Joe Thornber3e1a0692014-03-03 16:03:26 +00002442 pool->process_prepared_mapping = process_prepared_mapping_fail;
Joe Thornber34fbcf62015-04-16 12:58:35 +01002443 pool->process_prepared_discard = process_prepared_discard_success;
Joe Thornber3e1a0692014-03-03 16:03:26 +00002444
2445 error_retry_list(pool);
2446 break;
2447
2448 case PM_OUT_OF_DATA_SPACE:
2449 /*
2450 * Ideally we'd never hit this state; the low water mark
2451 * would trigger userland to extend the pool before we
2452 * completely run out of data space. However, many small
2453 * IOs to unprovisioned space can consume data space at an
2454 * alarming rate. Adjust your low water mark if you're
2455 * frequently seeing this mode.
2456 */
2457 if (old_mode != new_mode)
2458 notify_of_pool_mode_change(pool, "out-of-data-space");
2459 pool->process_bio = process_bio_read_only;
Joe Thornbera374bb22014-10-10 13:43:14 +01002460 pool->process_discard = process_discard_bio;
2461 pool->process_cell = process_cell_read_only;
Joe Thornber3e1a0692014-03-03 16:03:26 +00002462 pool->process_prepared_mapping = process_prepared_mapping;
Joe Thornber34fbcf62015-04-16 12:58:35 +01002463 set_discard_callbacks(pool);
Joe Thornber85ad643b2014-05-09 15:59:38 +01002464
Mike Snitzer80c57892014-05-20 13:38:33 -04002465 if (!pool->pf.error_if_no_space && no_space_timeout)
2466 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
Joe Thornbere49e5822012-07-27 15:08:16 +01002467 break;
2468
2469 case PM_WRITE:
Mike Snitzer8b64e882013-12-20 14:27:28 -05002470 if (old_mode != new_mode)
Joe Thornber3e1a0692014-03-03 16:03:26 +00002471 notify_of_pool_mode_change(pool, "write");
Joe Thornber9b7aaa62013-12-04 16:58:19 -05002472 dm_pool_metadata_read_write(pool->pmd);
Joe Thornbere49e5822012-07-27 15:08:16 +01002473 pool->process_bio = process_bio;
Joe Thornbera374bb22014-10-10 13:43:14 +01002474 pool->process_discard = process_discard_bio;
2475 pool->process_cell = process_cell;
Joe Thornbere49e5822012-07-27 15:08:16 +01002476 pool->process_prepared_mapping = process_prepared_mapping;
Joe Thornber34fbcf62015-04-16 12:58:35 +01002477 set_discard_callbacks(pool);
Joe Thornbere49e5822012-07-27 15:08:16 +01002478 break;
2479 }
Mike Snitzer8b64e882013-12-20 14:27:28 -05002480
2481 pool->pf.mode = new_mode;
Mike Snitzercdc2b412014-02-14 18:10:55 -05002482 /*
2483 * The pool mode may have changed, sync it so bind_control_target()
2484 * doesn't cause an unexpected mode transition on resume.
2485 */
2486 pt->adjusted_pf.mode = new_mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01002487}
2488
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002489static void abort_transaction(struct pool *pool)
2490{
2491 const char *dev_name = dm_device_name(pool->pool_md);
2492
2493 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2494 if (dm_pool_abort_metadata(pool->pmd)) {
2495 DMERR("%s: failed to abort metadata transaction", dev_name);
2496 set_pool_mode(pool, PM_FAIL);
2497 }
2498
2499 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2500 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2501 set_pool_mode(pool, PM_FAIL);
2502 }
2503}
2504
Joe Thornberb5330652013-12-04 19:51:33 -05002505static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2506{
2507 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2508 dm_device_name(pool->pool_md), op, r);
2509
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002510 abort_transaction(pool);
Joe Thornberb5330652013-12-04 19:51:33 -05002511 set_pool_mode(pool, PM_READ_ONLY);
2512}
2513
Joe Thornbere49e5822012-07-27 15:08:16 +01002514/*----------------------------------------------------------------*/
2515
Joe Thornber991d9fa2011-10-31 20:21:18 +00002516/*
2517 * Mapping functions.
2518 */
2519
2520/*
2521 * Called only while mapping a thin bio to hand it over to the workqueue.
2522 */
2523static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2524{
2525 unsigned long flags;
2526 struct pool *pool = tc->pool;
2527
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002528 spin_lock_irqsave(&tc->lock, flags);
2529 bio_list_add(&tc->deferred_bio_list, bio);
2530 spin_unlock_irqrestore(&tc->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002531
2532 wake_worker(pool);
2533}
2534
Joe Thornber7d327fe2014-10-06 15:45:59 +01002535static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2536{
2537 struct pool *pool = tc->pool;
2538
2539 throttle_lock(&pool->throttle);
2540 thin_defer_bio(tc, bio);
2541 throttle_unlock(&pool->throttle);
2542}
2543
Joe Thornbera374bb22014-10-10 13:43:14 +01002544static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2545{
2546 unsigned long flags;
2547 struct pool *pool = tc->pool;
2548
2549 throttle_lock(&pool->throttle);
2550 spin_lock_irqsave(&tc->lock, flags);
2551 list_add_tail(&cell->user_list, &tc->deferred_cells);
2552 spin_unlock_irqrestore(&tc->lock, flags);
2553 throttle_unlock(&pool->throttle);
2554
2555 wake_worker(pool);
2556}
2557
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00002558static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
Joe Thornbereb2aa482012-03-28 18:41:28 +01002559{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00002560 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01002561
2562 h->tc = tc;
2563 h->shared_read_entry = NULL;
Joe Thornbere8088072012-12-21 20:23:31 +00002564 h->all_io_entry = NULL;
Joe Thornbereb2aa482012-03-28 18:41:28 +01002565 h->overwrite_mapping = NULL;
Joe Thornber34fbcf62015-04-16 12:58:35 +01002566 h->cell = NULL;
Joe Thornbereb2aa482012-03-28 18:41:28 +01002567}
2568
Joe Thornber991d9fa2011-10-31 20:21:18 +00002569/*
2570 * Non-blocking function called from the thin target's map function.
2571 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002572static int thin_bio_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002573{
2574 int r;
2575 struct thin_c *tc = ti->private;
2576 dm_block_t block = get_bio_block(tc, bio);
2577 struct dm_thin_device *td = tc->td;
2578 struct dm_thin_lookup_result result;
Joe Thornbera374bb22014-10-10 13:43:14 +01002579 struct dm_bio_prison_cell *virt_cell, *data_cell;
Joe Thornbere8088072012-12-21 20:23:31 +00002580 struct dm_cell_key key;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002581
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00002582 thin_hook_bio(tc, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01002583
Joe Thornber738211f2014-03-03 15:52:28 +00002584 if (tc->requeue_mode) {
2585 bio_endio(bio, DM_ENDIO_REQUEUE);
2586 return DM_MAPIO_SUBMITTED;
2587 }
2588
Joe Thornbere49e5822012-07-27 15:08:16 +01002589 if (get_pool_mode(tc->pool) == PM_FAIL) {
2590 bio_io_error(bio);
2591 return DM_MAPIO_SUBMITTED;
2592 }
2593
Joe Thornber104655f2012-03-28 18:41:28 +01002594 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
Joe Thornber7d327fe2014-10-06 15:45:59 +01002595 thin_defer_bio_with_throttle(tc, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002596 return DM_MAPIO_SUBMITTED;
2597 }
2598
Joe Thornberc822ed92014-10-10 09:41:09 +01002599 /*
2600 * We must hold the virtual cell before doing the lookup, otherwise
2601 * there's a race with discard.
2602 */
2603 build_virtual_key(tc->td, block, &key);
Joe Thornbera374bb22014-10-10 13:43:14 +01002604 if (bio_detain(tc->pool, &key, bio, &virt_cell))
Joe Thornberc822ed92014-10-10 09:41:09 +01002605 return DM_MAPIO_SUBMITTED;
2606
Joe Thornber991d9fa2011-10-31 20:21:18 +00002607 r = dm_thin_find_block(td, block, 0, &result);
2608
2609 /*
2610 * Note that we defer readahead too.
2611 */
2612 switch (r) {
2613 case 0:
2614 if (unlikely(result.shared)) {
2615 /*
2616 * We have a race condition here between the
2617 * result.shared value returned by the lookup and
2618 * snapshot creation, which may cause new
2619 * sharing.
2620 *
2621 * To avoid this always quiesce the origin before
2622 * taking the snap. You want to do this anyway to
2623 * ensure a consistent application view
2624 * (i.e. lockfs).
2625 *
2626 * More distant ancestors are irrelevant. The
2627 * shared flag will be set in their case.
2628 */
Joe Thornbera374bb22014-10-10 13:43:14 +01002629 thin_defer_cell(tc, virt_cell);
Joe Thornbere8088072012-12-21 20:23:31 +00002630 return DM_MAPIO_SUBMITTED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002631 }
Joe Thornbere8088072012-12-21 20:23:31 +00002632
Joe Thornbere8088072012-12-21 20:23:31 +00002633 build_data_key(tc->td, result.block, &key);
Joe Thornbera374bb22014-10-10 13:43:14 +01002634 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2635 cell_defer_no_holder(tc, virt_cell);
Joe Thornbere8088072012-12-21 20:23:31 +00002636 return DM_MAPIO_SUBMITTED;
2637 }
2638
2639 inc_all_io_entry(tc->pool, bio);
Joe Thornbera374bb22014-10-10 13:43:14 +01002640 cell_defer_no_holder(tc, data_cell);
2641 cell_defer_no_holder(tc, virt_cell);
Joe Thornbere8088072012-12-21 20:23:31 +00002642
2643 remap(tc, bio, result.block);
2644 return DM_MAPIO_REMAPPED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002645
2646 case -ENODATA:
Joe Thornbere49e5822012-07-27 15:08:16 +01002647 case -EWOULDBLOCK:
Joe Thornbera374bb22014-10-10 13:43:14 +01002648 thin_defer_cell(tc, virt_cell);
Joe Thornber2aab3852012-12-21 20:23:33 +00002649 return DM_MAPIO_SUBMITTED;
Joe Thornbere49e5822012-07-27 15:08:16 +01002650
2651 default:
2652 /*
2653 * Must always call bio_io_error on failure.
2654 * dm_thin_find_block can fail with -EINVAL if the
2655 * pool is switched to fail-io mode.
2656 */
2657 bio_io_error(bio);
Joe Thornbera374bb22014-10-10 13:43:14 +01002658 cell_defer_no_holder(tc, virt_cell);
Joe Thornber2aab3852012-12-21 20:23:33 +00002659 return DM_MAPIO_SUBMITTED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002660 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002661}
2662
2663static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2664{
Joe Thornber991d9fa2011-10-31 20:21:18 +00002665 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
Mike Snitzer760fe672014-03-20 08:36:47 -04002666 struct request_queue *q;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002667
Mike Snitzer760fe672014-03-20 08:36:47 -04002668 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2669 return 1;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002670
Mike Snitzer760fe672014-03-20 08:36:47 -04002671 q = bdev_get_queue(pt->data_dev->bdev);
2672 return bdi_congested(&q->backing_dev_info, bdi_bits);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002673}
2674
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002675static void requeue_bios(struct pool *pool)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002676{
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002677 unsigned long flags;
2678 struct thin_c *tc;
2679
2680 rcu_read_lock();
2681 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2682 spin_lock_irqsave(&tc->lock, flags);
2683 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2684 bio_list_init(&tc->retry_on_resume_list);
2685 spin_unlock_irqrestore(&tc->lock, flags);
2686 }
2687 rcu_read_unlock();
Joe Thornber991d9fa2011-10-31 20:21:18 +00002688}
2689
2690/*----------------------------------------------------------------
2691 * Binding of control targets to a pool object
2692 *--------------------------------------------------------------*/
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002693static bool data_dev_supports_discard(struct pool_c *pt)
2694{
2695 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2696
2697 return q && blk_queue_discard(q);
2698}
2699
Joe Thornber58051b92013-03-20 17:21:25 +00002700static bool is_factor(sector_t block_size, uint32_t n)
2701{
2702 return !sector_div(block_size, n);
2703}
2704
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002705/*
2706 * If discard_passdown was enabled verify that the data device
Mike Snitzer0424caa2012-09-26 23:45:47 +01002707 * supports discards. Disable discard_passdown if not.
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002708 */
Mike Snitzer0424caa2012-09-26 23:45:47 +01002709static void disable_passdown_if_not_supported(struct pool_c *pt)
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002710{
Mike Snitzer0424caa2012-09-26 23:45:47 +01002711 struct pool *pool = pt->pool;
2712 struct block_device *data_bdev = pt->data_dev->bdev;
2713 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
Mike Snitzer0424caa2012-09-26 23:45:47 +01002714 const char *reason = NULL;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002715 char buf[BDEVNAME_SIZE];
2716
Mike Snitzer0424caa2012-09-26 23:45:47 +01002717 if (!pt->adjusted_pf.discard_passdown)
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002718 return;
2719
Mike Snitzer0424caa2012-09-26 23:45:47 +01002720 if (!data_dev_supports_discard(pt))
2721 reason = "discard unsupported";
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002722
Mike Snitzer0424caa2012-09-26 23:45:47 +01002723 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2724 reason = "max discard sectors smaller than a block";
2725
Mike Snitzer0424caa2012-09-26 23:45:47 +01002726 if (reason) {
2727 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2728 pt->adjusted_pf.discard_passdown = false;
2729 }
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002730}
2731
Joe Thornber991d9fa2011-10-31 20:21:18 +00002732static int bind_control_target(struct pool *pool, struct dm_target *ti)
2733{
2734 struct pool_c *pt = ti->private;
2735
Joe Thornbere49e5822012-07-27 15:08:16 +01002736 /*
Joe Thornber9b7aaa62013-12-04 16:58:19 -05002737 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
Joe Thornbere49e5822012-07-27 15:08:16 +01002738 */
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002739 enum pool_mode old_mode = get_pool_mode(pool);
Mike Snitzer0424caa2012-09-26 23:45:47 +01002740 enum pool_mode new_mode = pt->adjusted_pf.mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01002741
Joe Thornber9b7aaa62013-12-04 16:58:19 -05002742 /*
Mike Snitzer8b64e882013-12-20 14:27:28 -05002743 * Don't change the pool's mode until set_pool_mode() below.
2744 * Otherwise the pool's process_* function pointers may
2745 * not match the desired pool mode.
2746 */
2747 pt->adjusted_pf.mode = old_mode;
2748
2749 pool->ti = ti;
2750 pool->pf = pt->adjusted_pf;
2751 pool->low_water_blocks = pt->low_water_blocks;
2752
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002753 set_pool_mode(pool, new_mode);
Mike Snitzerf4026932012-05-19 01:01:01 +01002754
Joe Thornber991d9fa2011-10-31 20:21:18 +00002755 return 0;
2756}
2757
2758static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2759{
2760 if (pool->ti == ti)
2761 pool->ti = NULL;
2762}
2763
2764/*----------------------------------------------------------------
2765 * Pool creation
2766 *--------------------------------------------------------------*/
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002767/* Initialize pool features. */
2768static void pool_features_init(struct pool_features *pf)
2769{
Joe Thornbere49e5822012-07-27 15:08:16 +01002770 pf->mode = PM_WRITE;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002771 pf->zero_new_blocks = true;
2772 pf->discard_enabled = true;
2773 pf->discard_passdown = true;
Mike Snitzer787a996c2013-12-06 16:21:43 -05002774 pf->error_if_no_space = false;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002775}
2776
Joe Thornber991d9fa2011-10-31 20:21:18 +00002777static void __pool_destroy(struct pool *pool)
2778{
2779 __pool_table_remove(pool);
2780
Joe Thornbera822c832015-07-03 10:22:42 +01002781 vfree(pool->cell_sort_array);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002782 if (dm_pool_metadata_close(pool->pmd) < 0)
2783 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2784
Mike Snitzer44feb382012-10-12 21:02:10 +01002785 dm_bio_prison_destroy(pool->prison);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002786 dm_kcopyd_client_destroy(pool->copier);
2787
2788 if (pool->wq)
2789 destroy_workqueue(pool->wq);
2790
2791 if (pool->next_mapping)
2792 mempool_free(pool->next_mapping, pool->mapping_pool);
2793 mempool_destroy(pool->mapping_pool);
Mike Snitzer44feb382012-10-12 21:02:10 +01002794 dm_deferred_set_destroy(pool->shared_read_ds);
2795 dm_deferred_set_destroy(pool->all_io_ds);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002796 kfree(pool);
2797}
2798
Mike Snitzera24c2562012-06-03 00:30:00 +01002799static struct kmem_cache *_new_mapping_cache;
Mike Snitzera24c2562012-06-03 00:30:00 +01002800
Joe Thornber991d9fa2011-10-31 20:21:18 +00002801static struct pool *pool_create(struct mapped_device *pool_md,
2802 struct block_device *metadata_dev,
Joe Thornbere49e5822012-07-27 15:08:16 +01002803 unsigned long block_size,
2804 int read_only, char **error)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002805{
2806 int r;
2807 void *err_p;
2808 struct pool *pool;
2809 struct dm_pool_metadata *pmd;
Joe Thornbere49e5822012-07-27 15:08:16 +01002810 bool format_device = read_only ? false : true;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002811
Joe Thornbere49e5822012-07-27 15:08:16 +01002812 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002813 if (IS_ERR(pmd)) {
2814 *error = "Error creating metadata object";
2815 return (struct pool *)pmd;
2816 }
2817
2818 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2819 if (!pool) {
2820 *error = "Error allocating memory for pool";
2821 err_p = ERR_PTR(-ENOMEM);
2822 goto bad_pool;
2823 }
2824
2825 pool->pmd = pmd;
2826 pool->sectors_per_block = block_size;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +01002827 if (block_size & (block_size - 1))
2828 pool->sectors_per_block_shift = -1;
2829 else
2830 pool->sectors_per_block_shift = __ffs(block_size);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002831 pool->low_water_blocks = 0;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002832 pool_features_init(&pool->pf);
Joe Thornbera195db22014-10-06 16:30:06 -04002833 pool->prison = dm_bio_prison_create();
Joe Thornber991d9fa2011-10-31 20:21:18 +00002834 if (!pool->prison) {
2835 *error = "Error creating pool's bio prison";
2836 err_p = ERR_PTR(-ENOMEM);
2837 goto bad_prison;
2838 }
2839
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00002840 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002841 if (IS_ERR(pool->copier)) {
2842 r = PTR_ERR(pool->copier);
2843 *error = "Error creating pool's kcopyd client";
2844 err_p = ERR_PTR(r);
2845 goto bad_kcopyd_client;
2846 }
2847
2848 /*
2849 * Create singlethreaded workqueue that will service all devices
2850 * that use this metadata.
2851 */
2852 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2853 if (!pool->wq) {
2854 *error = "Error creating pool's workqueue";
2855 err_p = ERR_PTR(-ENOMEM);
2856 goto bad_wq;
2857 }
2858
Joe Thornber7d327fe2014-10-06 15:45:59 +01002859 throttle_init(&pool->throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002860 INIT_WORK(&pool->worker, do_worker);
Joe Thornber905e51b2012-03-28 18:41:27 +01002861 INIT_DELAYED_WORK(&pool->waker, do_waker);
Joe Thornber85ad643b2014-05-09 15:59:38 +01002862 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002863 spin_lock_init(&pool->lock);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002864 bio_list_init(&pool->deferred_flush_bios);
2865 INIT_LIST_HEAD(&pool->prepared_mappings);
Joe Thornber104655f2012-03-28 18:41:28 +01002866 INIT_LIST_HEAD(&pool->prepared_discards);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04002867 INIT_LIST_HEAD(&pool->active_thins);
Joe Thornber88a66212013-12-04 20:16:12 -05002868 pool->low_water_triggered = false;
Mike Snitzer80e96c52014-11-07 15:09:46 -05002869 pool->suspended = true;
Mike Snitzer44feb382012-10-12 21:02:10 +01002870
2871 pool->shared_read_ds = dm_deferred_set_create();
2872 if (!pool->shared_read_ds) {
2873 *error = "Error creating pool's shared read deferred set";
2874 err_p = ERR_PTR(-ENOMEM);
2875 goto bad_shared_read_ds;
2876 }
2877
2878 pool->all_io_ds = dm_deferred_set_create();
2879 if (!pool->all_io_ds) {
2880 *error = "Error creating pool's all io deferred set";
2881 err_p = ERR_PTR(-ENOMEM);
2882 goto bad_all_io_ds;
2883 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002884
2885 pool->next_mapping = NULL;
Mike Snitzera24c2562012-06-03 00:30:00 +01002886 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2887 _new_mapping_cache);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002888 if (!pool->mapping_pool) {
2889 *error = "Error creating pool's mapping mempool";
2890 err_p = ERR_PTR(-ENOMEM);
2891 goto bad_mapping_pool;
2892 }
2893
Joe Thornbera822c832015-07-03 10:22:42 +01002894 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
2895 if (!pool->cell_sort_array) {
2896 *error = "Error allocating cell sort array";
2897 err_p = ERR_PTR(-ENOMEM);
2898 goto bad_sort_array;
2899 }
2900
Joe Thornber991d9fa2011-10-31 20:21:18 +00002901 pool->ref_count = 1;
Joe Thornber905e51b2012-03-28 18:41:27 +01002902 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002903 pool->pool_md = pool_md;
2904 pool->md_dev = metadata_dev;
2905 __pool_table_insert(pool);
2906
2907 return pool;
2908
Joe Thornbera822c832015-07-03 10:22:42 +01002909bad_sort_array:
2910 mempool_destroy(pool->mapping_pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002911bad_mapping_pool:
Mike Snitzer44feb382012-10-12 21:02:10 +01002912 dm_deferred_set_destroy(pool->all_io_ds);
2913bad_all_io_ds:
2914 dm_deferred_set_destroy(pool->shared_read_ds);
2915bad_shared_read_ds:
Joe Thornber991d9fa2011-10-31 20:21:18 +00002916 destroy_workqueue(pool->wq);
2917bad_wq:
2918 dm_kcopyd_client_destroy(pool->copier);
2919bad_kcopyd_client:
Mike Snitzer44feb382012-10-12 21:02:10 +01002920 dm_bio_prison_destroy(pool->prison);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002921bad_prison:
2922 kfree(pool);
2923bad_pool:
2924 if (dm_pool_metadata_close(pmd))
2925 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2926
2927 return err_p;
2928}
2929
2930static void __pool_inc(struct pool *pool)
2931{
2932 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2933 pool->ref_count++;
2934}
2935
2936static void __pool_dec(struct pool *pool)
2937{
2938 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2939 BUG_ON(!pool->ref_count);
2940 if (!--pool->ref_count)
2941 __pool_destroy(pool);
2942}
2943
2944static struct pool *__pool_find(struct mapped_device *pool_md,
2945 struct block_device *metadata_dev,
Joe Thornbere49e5822012-07-27 15:08:16 +01002946 unsigned long block_size, int read_only,
2947 char **error, int *created)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002948{
2949 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2950
2951 if (pool) {
Mike Snitzerf09996c2012-07-27 15:07:59 +01002952 if (pool->pool_md != pool_md) {
2953 *error = "metadata device already in use by a pool";
Joe Thornber991d9fa2011-10-31 20:21:18 +00002954 return ERR_PTR(-EBUSY);
Mike Snitzerf09996c2012-07-27 15:07:59 +01002955 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002956 __pool_inc(pool);
2957
2958 } else {
2959 pool = __pool_table_lookup(pool_md);
2960 if (pool) {
Mike Snitzerf09996c2012-07-27 15:07:59 +01002961 if (pool->md_dev != metadata_dev) {
2962 *error = "different pool cannot replace a pool";
Joe Thornber991d9fa2011-10-31 20:21:18 +00002963 return ERR_PTR(-EINVAL);
Mike Snitzerf09996c2012-07-27 15:07:59 +01002964 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002965 __pool_inc(pool);
2966
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002967 } else {
Joe Thornbere49e5822012-07-27 15:08:16 +01002968 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002969 *created = 1;
2970 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002971 }
2972
2973 return pool;
2974}
2975
2976/*----------------------------------------------------------------
2977 * Pool target methods
2978 *--------------------------------------------------------------*/
2979static void pool_dtr(struct dm_target *ti)
2980{
2981 struct pool_c *pt = ti->private;
2982
2983 mutex_lock(&dm_thin_pool_table.mutex);
2984
2985 unbind_control_target(pt->pool, ti);
2986 __pool_dec(pt->pool);
2987 dm_put_device(ti, pt->metadata_dev);
2988 dm_put_device(ti, pt->data_dev);
2989 kfree(pt);
2990
2991 mutex_unlock(&dm_thin_pool_table.mutex);
2992}
2993
Joe Thornber991d9fa2011-10-31 20:21:18 +00002994static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2995 struct dm_target *ti)
2996{
2997 int r;
2998 unsigned argc;
2999 const char *arg_name;
3000
3001 static struct dm_arg _args[] = {
Mike Snitzer74aa45c2014-01-15 19:07:58 -05003002 {0, 4, "Invalid number of pool feature arguments"},
Joe Thornber991d9fa2011-10-31 20:21:18 +00003003 };
3004
3005 /*
3006 * No feature arguments supplied.
3007 */
3008 if (!as->argc)
3009 return 0;
3010
3011 r = dm_read_arg_group(_args, as, &argc, &ti->error);
3012 if (r)
3013 return -EINVAL;
3014
3015 while (argc && !r) {
3016 arg_name = dm_shift_arg(as);
3017 argc--;
3018
Joe Thornbere49e5822012-07-27 15:08:16 +01003019 if (!strcasecmp(arg_name, "skip_block_zeroing"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01003020 pf->zero_new_blocks = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003021
Joe Thornbere49e5822012-07-27 15:08:16 +01003022 else if (!strcasecmp(arg_name, "ignore_discard"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01003023 pf->discard_enabled = false;
Joe Thornbere49e5822012-07-27 15:08:16 +01003024
3025 else if (!strcasecmp(arg_name, "no_discard_passdown"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01003026 pf->discard_passdown = false;
Joe Thornbere49e5822012-07-27 15:08:16 +01003027
3028 else if (!strcasecmp(arg_name, "read_only"))
3029 pf->mode = PM_READ_ONLY;
3030
Mike Snitzer787a996c2013-12-06 16:21:43 -05003031 else if (!strcasecmp(arg_name, "error_if_no_space"))
3032 pf->error_if_no_space = true;
3033
Joe Thornbere49e5822012-07-27 15:08:16 +01003034 else {
3035 ti->error = "Unrecognised pool feature requested";
3036 r = -EINVAL;
3037 break;
3038 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003039 }
3040
3041 return r;
3042}
3043
Joe Thornberac8c3f32013-05-10 14:37:21 +01003044static void metadata_low_callback(void *context)
3045{
3046 struct pool *pool = context;
3047
3048 DMWARN("%s: reached low water mark for metadata device: sending event.",
3049 dm_device_name(pool->pool_md));
3050
3051 dm_table_event(pool->ti->table);
3052}
3053
Mike Snitzer7d489352014-02-12 23:58:15 -05003054static sector_t get_dev_size(struct block_device *bdev)
Joe Thornberb17446d2013-05-10 14:37:18 +01003055{
Mike Snitzer7d489352014-02-12 23:58:15 -05003056 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
3057}
3058
3059static void warn_if_metadata_device_too_big(struct block_device *bdev)
3060{
3061 sector_t metadata_dev_size = get_dev_size(bdev);
Joe Thornberb17446d2013-05-10 14:37:18 +01003062 char buffer[BDEVNAME_SIZE];
3063
Mike Snitzer7d489352014-02-12 23:58:15 -05003064 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
Joe Thornberb17446d2013-05-10 14:37:18 +01003065 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
3066 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
Mike Snitzer7d489352014-02-12 23:58:15 -05003067}
3068
3069static sector_t get_metadata_dev_size(struct block_device *bdev)
3070{
3071 sector_t metadata_dev_size = get_dev_size(bdev);
3072
3073 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
3074 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
Joe Thornberb17446d2013-05-10 14:37:18 +01003075
3076 return metadata_dev_size;
3077}
3078
Joe Thornber24347e92013-05-10 14:37:19 +01003079static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
3080{
3081 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
3082
Mike Snitzer7d489352014-02-12 23:58:15 -05003083 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
Joe Thornber24347e92013-05-10 14:37:19 +01003084
3085 return metadata_dev_size;
3086}
3087
Joe Thornber991d9fa2011-10-31 20:21:18 +00003088/*
Joe Thornberac8c3f32013-05-10 14:37:21 +01003089 * When a metadata threshold is crossed a dm event is triggered, and
3090 * userland should respond by growing the metadata device. We could let
3091 * userland set the threshold, like we do with the data threshold, but I'm
3092 * not sure they know enough to do this well.
3093 */
3094static dm_block_t calc_metadata_threshold(struct pool_c *pt)
3095{
3096 /*
3097 * 4M is ample for all ops with the possible exception of thin
3098 * device deletion which is harmless if it fails (just retry the
3099 * delete after you've grown the device).
3100 */
3101 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
3102 return min((dm_block_t)1024ULL /* 4M */, quarter);
3103}
3104
3105/*
Joe Thornber991d9fa2011-10-31 20:21:18 +00003106 * thin-pool <metadata dev> <data dev>
3107 * <data block size (sectors)>
3108 * <low water mark (blocks)>
3109 * [<#feature args> [<arg>]*]
3110 *
3111 * Optional feature arguments are:
3112 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003113 * ignore_discard: disable discard
3114 * no_discard_passdown: don't pass discards down to the data device
Mike Snitzer787a996c2013-12-06 16:21:43 -05003115 * read_only: Don't allow any changes to be made to the pool metadata.
3116 * error_if_no_space: error IOs, instead of queueing, if no space.
Joe Thornber991d9fa2011-10-31 20:21:18 +00003117 */
3118static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3119{
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003120 int r, pool_created = 0;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003121 struct pool_c *pt;
3122 struct pool *pool;
3123 struct pool_features pf;
3124 struct dm_arg_set as;
3125 struct dm_dev *data_dev;
3126 unsigned long block_size;
3127 dm_block_t low_water_blocks;
3128 struct dm_dev *metadata_dev;
Joe Thornber5d0db962013-05-10 14:37:19 +01003129 fmode_t metadata_mode;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003130
3131 /*
3132 * FIXME Remove validation from scope of lock.
3133 */
3134 mutex_lock(&dm_thin_pool_table.mutex);
3135
3136 if (argc < 4) {
3137 ti->error = "Invalid argument count";
3138 r = -EINVAL;
3139 goto out_unlock;
3140 }
Joe Thornber5d0db962013-05-10 14:37:19 +01003141
Joe Thornber991d9fa2011-10-31 20:21:18 +00003142 as.argc = argc;
3143 as.argv = argv;
3144
Joe Thornber5d0db962013-05-10 14:37:19 +01003145 /*
3146 * Set default pool features.
3147 */
3148 pool_features_init(&pf);
3149
3150 dm_consume_args(&as, 4);
3151 r = parse_pool_features(&as, &pf, ti);
3152 if (r)
3153 goto out_unlock;
3154
3155 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
3156 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003157 if (r) {
3158 ti->error = "Error opening metadata block device";
3159 goto out_unlock;
3160 }
Mike Snitzer7d489352014-02-12 23:58:15 -05003161 warn_if_metadata_device_too_big(metadata_dev->bdev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003162
3163 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
3164 if (r) {
3165 ti->error = "Error getting data device";
3166 goto out_metadata;
3167 }
3168
3169 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
3170 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
3171 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003172 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00003173 ti->error = "Invalid block size";
3174 r = -EINVAL;
3175 goto out;
3176 }
3177
3178 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
3179 ti->error = "Invalid low water mark";
3180 r = -EINVAL;
3181 goto out;
3182 }
3183
Joe Thornber991d9fa2011-10-31 20:21:18 +00003184 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
3185 if (!pt) {
3186 r = -ENOMEM;
3187 goto out;
3188 }
3189
3190 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
Joe Thornbere49e5822012-07-27 15:08:16 +01003191 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003192 if (IS_ERR(pool)) {
3193 r = PTR_ERR(pool);
3194 goto out_free_pt;
3195 }
3196
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003197 /*
3198 * 'pool_created' reflects whether this is the first table load.
3199 * Top level discard support is not allowed to be changed after
3200 * initial load. This would require a pool reload to trigger thin
3201 * device changes.
3202 */
3203 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
3204 ti->error = "Discard support cannot be disabled once enabled";
3205 r = -EINVAL;
3206 goto out_flags_changed;
3207 }
3208
Joe Thornber991d9fa2011-10-31 20:21:18 +00003209 pt->pool = pool;
3210 pt->ti = ti;
3211 pt->metadata_dev = metadata_dev;
3212 pt->data_dev = data_dev;
3213 pt->low_water_blocks = low_water_blocks;
Mike Snitzer0424caa2012-09-26 23:45:47 +01003214 pt->adjusted_pf = pt->requested_pf = pf;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00003215 ti->num_flush_bios = 1;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01003216
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003217 /*
3218 * Only need to enable discards if the pool should pass
3219 * them down to the data device. The thin device's discard
3220 * processing will cause mappings to be removed from the btree.
3221 */
Mike Snitzerb60ab992013-09-19 18:49:11 -04003222 ti->discard_zeroes_data_unsupported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003223 if (pf.discard_enabled && pf.discard_passdown) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00003224 ti->num_discard_bios = 1;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01003225
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003226 /*
3227 * Setting 'discards_supported' circumvents the normal
3228 * stacking of discard limits (this keeps the pool and
3229 * thin devices' discard limits consistent).
3230 */
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01003231 ti->discards_supported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003232 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003233 ti->private = pt;
3234
Joe Thornberac8c3f32013-05-10 14:37:21 +01003235 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
3236 calc_metadata_threshold(pt),
3237 metadata_low_callback,
3238 pool);
3239 if (r)
3240 goto out_free_pt;
3241
Joe Thornber991d9fa2011-10-31 20:21:18 +00003242 pt->callbacks.congested_fn = pool_is_congested;
3243 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3244
3245 mutex_unlock(&dm_thin_pool_table.mutex);
3246
3247 return 0;
3248
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003249out_flags_changed:
3250 __pool_dec(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003251out_free_pt:
3252 kfree(pt);
3253out:
3254 dm_put_device(ti, data_dev);
3255out_metadata:
3256 dm_put_device(ti, metadata_dev);
3257out_unlock:
3258 mutex_unlock(&dm_thin_pool_table.mutex);
3259
3260 return r;
3261}
3262
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00003263static int pool_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003264{
3265 int r;
3266 struct pool_c *pt = ti->private;
3267 struct pool *pool = pt->pool;
3268 unsigned long flags;
3269
3270 /*
3271 * As this is a singleton target, ti->begin is always zero.
3272 */
3273 spin_lock_irqsave(&pool->lock, flags);
3274 bio->bi_bdev = pt->data_dev->bdev;
3275 r = DM_MAPIO_REMAPPED;
3276 spin_unlock_irqrestore(&pool->lock, flags);
3277
3278 return r;
3279}
3280
Joe Thornberb17446d2013-05-10 14:37:18 +01003281static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
3282{
3283 int r;
3284 struct pool_c *pt = ti->private;
3285 struct pool *pool = pt->pool;
3286 sector_t data_size = ti->len;
3287 dm_block_t sb_data_size;
3288
3289 *need_commit = false;
3290
3291 (void) sector_div(data_size, pool->sectors_per_block);
3292
3293 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3294 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003295 DMERR("%s: failed to retrieve data device size",
3296 dm_device_name(pool->pool_md));
Joe Thornberb17446d2013-05-10 14:37:18 +01003297 return r;
3298 }
3299
3300 if (data_size < sb_data_size) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003301 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3302 dm_device_name(pool->pool_md),
Joe Thornberb17446d2013-05-10 14:37:18 +01003303 (unsigned long long)data_size, sb_data_size);
3304 return -EINVAL;
3305
3306 } else if (data_size > sb_data_size) {
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05003307 if (dm_pool_metadata_needs_check(pool->pmd)) {
3308 DMERR("%s: unable to grow the data device until repaired.",
3309 dm_device_name(pool->pool_md));
3310 return 0;
3311 }
3312
Mike Snitzer6f7f51d2013-12-04 10:25:53 -05003313 if (sb_data_size)
3314 DMINFO("%s: growing the data device from %llu to %llu blocks",
3315 dm_device_name(pool->pool_md),
3316 sb_data_size, (unsigned long long)data_size);
Joe Thornberb17446d2013-05-10 14:37:18 +01003317 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3318 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05003319 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
Joe Thornberb17446d2013-05-10 14:37:18 +01003320 return r;
3321 }
3322
3323 *need_commit = true;
3324 }
3325
3326 return 0;
3327}
3328
Joe Thornber24347e92013-05-10 14:37:19 +01003329static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3330{
3331 int r;
3332 struct pool_c *pt = ti->private;
3333 struct pool *pool = pt->pool;
3334 dm_block_t metadata_dev_size, sb_metadata_dev_size;
3335
3336 *need_commit = false;
3337
Alasdair G Kergon610bba82013-05-19 18:57:50 +01003338 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
Joe Thornber24347e92013-05-10 14:37:19 +01003339
3340 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3341 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003342 DMERR("%s: failed to retrieve metadata device size",
3343 dm_device_name(pool->pool_md));
Joe Thornber24347e92013-05-10 14:37:19 +01003344 return r;
3345 }
3346
3347 if (metadata_dev_size < sb_metadata_dev_size) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003348 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3349 dm_device_name(pool->pool_md),
Joe Thornber24347e92013-05-10 14:37:19 +01003350 metadata_dev_size, sb_metadata_dev_size);
3351 return -EINVAL;
3352
3353 } else if (metadata_dev_size > sb_metadata_dev_size) {
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05003354 if (dm_pool_metadata_needs_check(pool->pmd)) {
3355 DMERR("%s: unable to grow the metadata device until repaired.",
3356 dm_device_name(pool->pool_md));
3357 return 0;
3358 }
3359
Mike Snitzer7d489352014-02-12 23:58:15 -05003360 warn_if_metadata_device_too_big(pool->md_dev);
Mike Snitzer6f7f51d2013-12-04 10:25:53 -05003361 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3362 dm_device_name(pool->pool_md),
3363 sb_metadata_dev_size, metadata_dev_size);
Joe Thornber24347e92013-05-10 14:37:19 +01003364 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3365 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05003366 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
Joe Thornber24347e92013-05-10 14:37:19 +01003367 return r;
3368 }
3369
3370 *need_commit = true;
3371 }
3372
3373 return 0;
3374}
3375
Joe Thornber991d9fa2011-10-31 20:21:18 +00003376/*
3377 * Retrieves the number of blocks of the data device from
3378 * the superblock and compares it to the actual device size,
3379 * thus resizing the data device in case it has grown.
3380 *
3381 * This both copes with opening preallocated data devices in the ctr
3382 * being followed by a resume
3383 * -and-
3384 * calling the resume method individually after userspace has
3385 * grown the data device in reaction to a table event.
3386 */
3387static int pool_preresume(struct dm_target *ti)
3388{
3389 int r;
Joe Thornber24347e92013-05-10 14:37:19 +01003390 bool need_commit1, need_commit2;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003391 struct pool_c *pt = ti->private;
3392 struct pool *pool = pt->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003393
3394 /*
3395 * Take control of the pool object.
3396 */
3397 r = bind_control_target(pool, ti);
3398 if (r)
3399 return r;
3400
Joe Thornberb17446d2013-05-10 14:37:18 +01003401 r = maybe_resize_data_dev(ti, &need_commit1);
3402 if (r)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003403 return r;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003404
Joe Thornber24347e92013-05-10 14:37:19 +01003405 r = maybe_resize_metadata_dev(ti, &need_commit2);
3406 if (r)
3407 return r;
3408
3409 if (need_commit1 || need_commit2)
Joe Thornber020cc3b2013-12-04 15:05:36 -05003410 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003411
3412 return 0;
3413}
3414
Mike Snitzer583024d2014-10-28 20:58:45 -04003415static void pool_suspend_active_thins(struct pool *pool)
3416{
3417 struct thin_c *tc;
3418
3419 /* Suspend all active thin devices */
3420 tc = get_first_thin(pool);
3421 while (tc) {
3422 dm_internal_suspend_noflush(tc->thin_md);
3423 tc = get_next_thin(pool, tc);
3424 }
3425}
3426
3427static void pool_resume_active_thins(struct pool *pool)
3428{
3429 struct thin_c *tc;
3430
3431 /* Resume all active thin devices */
3432 tc = get_first_thin(pool);
3433 while (tc) {
3434 dm_internal_resume(tc->thin_md);
3435 tc = get_next_thin(pool, tc);
3436 }
3437}
3438
Joe Thornber991d9fa2011-10-31 20:21:18 +00003439static void pool_resume(struct dm_target *ti)
3440{
3441 struct pool_c *pt = ti->private;
3442 struct pool *pool = pt->pool;
3443 unsigned long flags;
3444
Mike Snitzer583024d2014-10-28 20:58:45 -04003445 /*
3446 * Must requeue active_thins' bios and then resume
3447 * active_thins _before_ clearing 'suspend' flag.
3448 */
3449 requeue_bios(pool);
3450 pool_resume_active_thins(pool);
3451
Joe Thornber991d9fa2011-10-31 20:21:18 +00003452 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber88a66212013-12-04 20:16:12 -05003453 pool->low_water_triggered = false;
Mike Snitzer80e96c52014-11-07 15:09:46 -05003454 pool->suspended = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003455 spin_unlock_irqrestore(&pool->lock, flags);
Mike Snitzer80e96c52014-11-07 15:09:46 -05003456
Joe Thornber905e51b2012-03-28 18:41:27 +01003457 do_waker(&pool->waker.work);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003458}
3459
Mike Snitzer80e96c52014-11-07 15:09:46 -05003460static void pool_presuspend(struct dm_target *ti)
3461{
3462 struct pool_c *pt = ti->private;
3463 struct pool *pool = pt->pool;
3464 unsigned long flags;
3465
3466 spin_lock_irqsave(&pool->lock, flags);
3467 pool->suspended = true;
3468 spin_unlock_irqrestore(&pool->lock, flags);
Mike Snitzer583024d2014-10-28 20:58:45 -04003469
3470 pool_suspend_active_thins(pool);
Mike Snitzer80e96c52014-11-07 15:09:46 -05003471}
3472
3473static void pool_presuspend_undo(struct dm_target *ti)
3474{
3475 struct pool_c *pt = ti->private;
3476 struct pool *pool = pt->pool;
3477 unsigned long flags;
3478
Mike Snitzer583024d2014-10-28 20:58:45 -04003479 pool_resume_active_thins(pool);
3480
Mike Snitzer80e96c52014-11-07 15:09:46 -05003481 spin_lock_irqsave(&pool->lock, flags);
3482 pool->suspended = false;
3483 spin_unlock_irqrestore(&pool->lock, flags);
3484}
3485
Joe Thornber991d9fa2011-10-31 20:21:18 +00003486static void pool_postsuspend(struct dm_target *ti)
3487{
Joe Thornber991d9fa2011-10-31 20:21:18 +00003488 struct pool_c *pt = ti->private;
3489 struct pool *pool = pt->pool;
3490
Joe Thornber905e51b2012-03-28 18:41:27 +01003491 cancel_delayed_work(&pool->waker);
Joe Thornber85ad643b2014-05-09 15:59:38 +01003492 cancel_delayed_work(&pool->no_space_timeout);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003493 flush_workqueue(pool->wq);
Joe Thornber020cc3b2013-12-04 15:05:36 -05003494 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003495}
3496
3497static int check_arg_count(unsigned argc, unsigned args_required)
3498{
3499 if (argc != args_required) {
3500 DMWARN("Message received with %u arguments instead of %u.",
3501 argc, args_required);
3502 return -EINVAL;
3503 }
3504
3505 return 0;
3506}
3507
3508static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3509{
3510 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3511 *dev_id <= MAX_DEV_ID)
3512 return 0;
3513
3514 if (warning)
3515 DMWARN("Message received with invalid device id: %s", arg);
3516
3517 return -EINVAL;
3518}
3519
3520static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3521{
3522 dm_thin_id dev_id;
3523 int r;
3524
3525 r = check_arg_count(argc, 2);
3526 if (r)
3527 return r;
3528
3529 r = read_dev_id(argv[1], &dev_id, 1);
3530 if (r)
3531 return r;
3532
3533 r = dm_pool_create_thin(pool->pmd, dev_id);
3534 if (r) {
3535 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3536 argv[1]);
3537 return r;
3538 }
3539
3540 return 0;
3541}
3542
3543static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3544{
3545 dm_thin_id dev_id;
3546 dm_thin_id origin_dev_id;
3547 int r;
3548
3549 r = check_arg_count(argc, 3);
3550 if (r)
3551 return r;
3552
3553 r = read_dev_id(argv[1], &dev_id, 1);
3554 if (r)
3555 return r;
3556
3557 r = read_dev_id(argv[2], &origin_dev_id, 1);
3558 if (r)
3559 return r;
3560
3561 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3562 if (r) {
3563 DMWARN("Creation of new snapshot %s of device %s failed.",
3564 argv[1], argv[2]);
3565 return r;
3566 }
3567
3568 return 0;
3569}
3570
3571static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3572{
3573 dm_thin_id dev_id;
3574 int r;
3575
3576 r = check_arg_count(argc, 2);
3577 if (r)
3578 return r;
3579
3580 r = read_dev_id(argv[1], &dev_id, 1);
3581 if (r)
3582 return r;
3583
3584 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3585 if (r)
3586 DMWARN("Deletion of thin device %s failed.", argv[1]);
3587
3588 return r;
3589}
3590
3591static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3592{
3593 dm_thin_id old_id, new_id;
3594 int r;
3595
3596 r = check_arg_count(argc, 3);
3597 if (r)
3598 return r;
3599
3600 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3601 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3602 return -EINVAL;
3603 }
3604
3605 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3606 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3607 return -EINVAL;
3608 }
3609
3610 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3611 if (r) {
3612 DMWARN("Failed to change transaction id from %s to %s.",
3613 argv[1], argv[2]);
3614 return r;
3615 }
3616
3617 return 0;
3618}
3619
Joe Thornbercc8394d2012-06-03 00:30:01 +01003620static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3621{
3622 int r;
3623
3624 r = check_arg_count(argc, 1);
3625 if (r)
3626 return r;
3627
Joe Thornber020cc3b2013-12-04 15:05:36 -05003628 (void) commit(pool);
Joe Thornber0d200ae2012-07-03 12:55:31 +01003629
Joe Thornbercc8394d2012-06-03 00:30:01 +01003630 r = dm_pool_reserve_metadata_snap(pool->pmd);
3631 if (r)
3632 DMWARN("reserve_metadata_snap message failed.");
3633
3634 return r;
3635}
3636
3637static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3638{
3639 int r;
3640
3641 r = check_arg_count(argc, 1);
3642 if (r)
3643 return r;
3644
3645 r = dm_pool_release_metadata_snap(pool->pmd);
3646 if (r)
3647 DMWARN("release_metadata_snap message failed.");
3648
3649 return r;
3650}
3651
Joe Thornber991d9fa2011-10-31 20:21:18 +00003652/*
3653 * Messages supported:
3654 * create_thin <dev_id>
3655 * create_snap <dev_id> <origin_id>
3656 * delete <dev_id>
Joe Thornber991d9fa2011-10-31 20:21:18 +00003657 * set_transaction_id <current_trans_id> <new_trans_id>
Joe Thornbercc8394d2012-06-03 00:30:01 +01003658 * reserve_metadata_snap
3659 * release_metadata_snap
Joe Thornber991d9fa2011-10-31 20:21:18 +00003660 */
3661static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3662{
3663 int r = -EINVAL;
3664 struct pool_c *pt = ti->private;
3665 struct pool *pool = pt->pool;
3666
Joe Thornber2a7eaea2015-01-26 11:38:21 +00003667 if (get_pool_mode(pool) >= PM_READ_ONLY) {
3668 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3669 dm_device_name(pool->pool_md));
Mike Snitzerfd467692015-06-09 12:31:26 -04003670 return -EOPNOTSUPP;
Joe Thornber2a7eaea2015-01-26 11:38:21 +00003671 }
3672
Joe Thornber991d9fa2011-10-31 20:21:18 +00003673 if (!strcasecmp(argv[0], "create_thin"))
3674 r = process_create_thin_mesg(argc, argv, pool);
3675
3676 else if (!strcasecmp(argv[0], "create_snap"))
3677 r = process_create_snap_mesg(argc, argv, pool);
3678
3679 else if (!strcasecmp(argv[0], "delete"))
3680 r = process_delete_mesg(argc, argv, pool);
3681
3682 else if (!strcasecmp(argv[0], "set_transaction_id"))
3683 r = process_set_transaction_id_mesg(argc, argv, pool);
3684
Joe Thornbercc8394d2012-06-03 00:30:01 +01003685 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3686 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3687
3688 else if (!strcasecmp(argv[0], "release_metadata_snap"))
3689 r = process_release_metadata_snap_mesg(argc, argv, pool);
3690
Joe Thornber991d9fa2011-10-31 20:21:18 +00003691 else
3692 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3693
Joe Thornbere49e5822012-07-27 15:08:16 +01003694 if (!r)
Joe Thornber020cc3b2013-12-04 15:05:36 -05003695 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003696
3697 return r;
3698}
3699
Joe Thornbere49e5822012-07-27 15:08:16 +01003700static void emit_flags(struct pool_features *pf, char *result,
3701 unsigned sz, unsigned maxlen)
3702{
3703 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
Mike Snitzer787a996c2013-12-06 16:21:43 -05003704 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3705 pf->error_if_no_space;
Joe Thornbere49e5822012-07-27 15:08:16 +01003706 DMEMIT("%u ", count);
3707
3708 if (!pf->zero_new_blocks)
3709 DMEMIT("skip_block_zeroing ");
3710
3711 if (!pf->discard_enabled)
3712 DMEMIT("ignore_discard ");
3713
3714 if (!pf->discard_passdown)
3715 DMEMIT("no_discard_passdown ");
3716
3717 if (pf->mode == PM_READ_ONLY)
3718 DMEMIT("read_only ");
Mike Snitzer787a996c2013-12-06 16:21:43 -05003719
3720 if (pf->error_if_no_space)
3721 DMEMIT("error_if_no_space ");
Joe Thornbere49e5822012-07-27 15:08:16 +01003722}
3723
Joe Thornber991d9fa2011-10-31 20:21:18 +00003724/*
3725 * Status line is:
3726 * <transaction id> <used metadata sectors>/<total metadata sectors>
3727 * <used data sectors>/<total data sectors> <held metadata root>
3728 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003729static void pool_status(struct dm_target *ti, status_type_t type,
3730 unsigned status_flags, char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003731{
Joe Thornbere49e5822012-07-27 15:08:16 +01003732 int r;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003733 unsigned sz = 0;
3734 uint64_t transaction_id;
3735 dm_block_t nr_free_blocks_data;
3736 dm_block_t nr_free_blocks_metadata;
3737 dm_block_t nr_blocks_data;
3738 dm_block_t nr_blocks_metadata;
3739 dm_block_t held_root;
3740 char buf[BDEVNAME_SIZE];
3741 char buf2[BDEVNAME_SIZE];
3742 struct pool_c *pt = ti->private;
3743 struct pool *pool = pt->pool;
3744
3745 switch (type) {
3746 case STATUSTYPE_INFO:
Joe Thornbere49e5822012-07-27 15:08:16 +01003747 if (get_pool_mode(pool) == PM_FAIL) {
3748 DMEMIT("Fail");
3749 break;
3750 }
3751
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01003752 /* Commit to ensure statistics aren't out-of-date */
3753 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
Joe Thornber020cc3b2013-12-04 15:05:36 -05003754 (void) commit(pool);
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01003755
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003756 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3757 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003758 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3759 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003760 goto err;
3761 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003762
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003763 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3764 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003765 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3766 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003767 goto err;
3768 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003769
3770 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003771 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003772 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3773 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003774 goto err;
3775 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003776
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003777 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3778 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003779 DMERR("%s: dm_pool_get_free_block_count returned %d",
3780 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003781 goto err;
3782 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003783
3784 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003785 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003786 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3787 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003788 goto err;
3789 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003790
Joe Thornbercc8394d2012-06-03 00:30:01 +01003791 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003792 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04003793 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3794 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003795 goto err;
3796 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003797
3798 DMEMIT("%llu %llu/%llu %llu/%llu ",
3799 (unsigned long long)transaction_id,
3800 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3801 (unsigned long long)nr_blocks_metadata,
3802 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3803 (unsigned long long)nr_blocks_data);
3804
3805 if (held_root)
Joe Thornbere49e5822012-07-27 15:08:16 +01003806 DMEMIT("%llu ", held_root);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003807 else
Joe Thornbere49e5822012-07-27 15:08:16 +01003808 DMEMIT("- ");
3809
Joe Thornber3e1a0692014-03-03 16:03:26 +00003810 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3811 DMEMIT("out_of_data_space ");
3812 else if (pool->pf.mode == PM_READ_ONLY)
Joe Thornbere49e5822012-07-27 15:08:16 +01003813 DMEMIT("ro ");
3814 else
3815 DMEMIT("rw ");
3816
Mike Snitzer018debe2012-12-21 20:23:32 +00003817 if (!pool->pf.discard_enabled)
Mike Snitzer787a996c2013-12-06 16:21:43 -05003818 DMEMIT("ignore_discard ");
Mike Snitzer018debe2012-12-21 20:23:32 +00003819 else if (pool->pf.discard_passdown)
Mike Snitzer787a996c2013-12-06 16:21:43 -05003820 DMEMIT("discard_passdown ");
Joe Thornbere49e5822012-07-27 15:08:16 +01003821 else
Mike Snitzer787a996c2013-12-06 16:21:43 -05003822 DMEMIT("no_discard_passdown ");
3823
3824 if (pool->pf.error_if_no_space)
3825 DMEMIT("error_if_no_space ");
3826 else
3827 DMEMIT("queue_if_no_space ");
Joe Thornber991d9fa2011-10-31 20:21:18 +00003828
3829 break;
3830
3831 case STATUSTYPE_TABLE:
3832 DMEMIT("%s %s %lu %llu ",
3833 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3834 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3835 (unsigned long)pool->sectors_per_block,
3836 (unsigned long long)pt->low_water_blocks);
Mike Snitzer0424caa2012-09-26 23:45:47 +01003837 emit_flags(&pt->requested_pf, result, sz, maxlen);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003838 break;
3839 }
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003840 return;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003841
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003842err:
3843 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00003844}
3845
3846static int pool_iterate_devices(struct dm_target *ti,
3847 iterate_devices_callout_fn fn, void *data)
3848{
3849 struct pool_c *pt = ti->private;
3850
3851 return fn(ti, pt->data_dev, 0, ti->len, data);
3852}
3853
3854static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3855 struct bio_vec *biovec, int max_size)
3856{
3857 struct pool_c *pt = ti->private;
3858 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3859
3860 if (!q->merge_bvec_fn)
3861 return max_size;
3862
3863 bvm->bi_bdev = pt->data_dev->bdev;
3864
3865 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3866}
3867
3868static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3869{
3870 struct pool_c *pt = ti->private;
3871 struct pool *pool = pt->pool;
Mike Snitzer604ea902014-10-09 18:43:25 -04003872 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3873
3874 /*
Mike Snitzerd200c302014-11-20 18:07:43 -05003875 * If max_sectors is smaller than pool->sectors_per_block adjust it
3876 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3877 * This is especially beneficial when the pool's data device is a RAID
3878 * device that has a full stripe width that matches pool->sectors_per_block
3879 * -- because even though partial RAID stripe-sized IOs will be issued to a
3880 * single RAID stripe; when aggregated they will end on a full RAID stripe
3881 * boundary.. which avoids additional partial RAID stripe writes cascading
Mike Snitzer604ea902014-10-09 18:43:25 -04003882 */
Mike Snitzer604ea902014-10-09 18:43:25 -04003883 if (limits->max_sectors < pool->sectors_per_block) {
3884 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3885 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3886 limits->max_sectors--;
3887 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3888 }
Mike Snitzer604ea902014-10-09 18:43:25 -04003889 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003890
Mike Snitzer0cc67cd2013-08-20 15:02:41 -04003891 /*
3892 * If the system-determined stacked limits are compatible with the
3893 * pool's blocksize (io_opt is a factor) do not override them.
3894 */
3895 if (io_opt_sectors < pool->sectors_per_block ||
Mike Snitzer604ea902014-10-09 18:43:25 -04003896 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3897 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3898 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3899 else
3900 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
Mike Snitzer0cc67cd2013-08-20 15:02:41 -04003901 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3902 }
Mike Snitzer0424caa2012-09-26 23:45:47 +01003903
3904 /*
3905 * pt->adjusted_pf is a staging area for the actual features to use.
3906 * They get transferred to the live pool in bind_control_target()
3907 * called from pool_preresume().
3908 */
Mike Snitzerb60ab992013-09-19 18:49:11 -04003909 if (!pt->adjusted_pf.discard_enabled) {
3910 /*
3911 * Must explicitly disallow stacking discard limits otherwise the
3912 * block layer will stack them if pool's data device has support.
3913 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3914 * user to see that, so make sure to set all discard limits to 0.
3915 */
3916 limits->discard_granularity = 0;
Mike Snitzer0424caa2012-09-26 23:45:47 +01003917 return;
Mike Snitzerb60ab992013-09-19 18:49:11 -04003918 }
Mike Snitzer0424caa2012-09-26 23:45:47 +01003919
3920 disable_passdown_if_not_supported(pt);
3921
Joe Thornber34fbcf62015-04-16 12:58:35 +01003922 /*
3923 * The pool uses the same discard limits as the underlying data
3924 * device. DM core has already set this up.
3925 */
Joe Thornber991d9fa2011-10-31 20:21:18 +00003926}
3927
3928static struct target_type pool_target = {
3929 .name = "thin-pool",
3930 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3931 DM_TARGET_IMMUTABLE,
Joe Thornber34fbcf62015-04-16 12:58:35 +01003932 .version = {1, 15, 0},
Joe Thornber991d9fa2011-10-31 20:21:18 +00003933 .module = THIS_MODULE,
3934 .ctr = pool_ctr,
3935 .dtr = pool_dtr,
3936 .map = pool_map,
Mike Snitzer80e96c52014-11-07 15:09:46 -05003937 .presuspend = pool_presuspend,
3938 .presuspend_undo = pool_presuspend_undo,
Joe Thornber991d9fa2011-10-31 20:21:18 +00003939 .postsuspend = pool_postsuspend,
3940 .preresume = pool_preresume,
3941 .resume = pool_resume,
3942 .message = pool_message,
3943 .status = pool_status,
3944 .merge = pool_merge,
3945 .iterate_devices = pool_iterate_devices,
3946 .io_hints = pool_io_hints,
3947};
3948
3949/*----------------------------------------------------------------
3950 * Thin target methods
3951 *--------------------------------------------------------------*/
Joe Thornberb10ebd32014-04-08 11:29:01 +01003952static void thin_get(struct thin_c *tc)
3953{
3954 atomic_inc(&tc->refcount);
3955}
3956
3957static void thin_put(struct thin_c *tc)
3958{
3959 if (atomic_dec_and_test(&tc->refcount))
3960 complete(&tc->can_destroy);
3961}
3962
Joe Thornber991d9fa2011-10-31 20:21:18 +00003963static void thin_dtr(struct dm_target *ti)
3964{
3965 struct thin_c *tc = ti->private;
Mike Snitzerc140e1c2014-03-20 21:17:14 -04003966 unsigned long flags;
3967
3968 spin_lock_irqsave(&tc->pool->lock, flags);
3969 list_del_rcu(&tc->list);
3970 spin_unlock_irqrestore(&tc->pool->lock, flags);
3971 synchronize_rcu();
Joe Thornber991d9fa2011-10-31 20:21:18 +00003972
Mikulas Patocka17181fb2014-11-05 17:00:13 -05003973 thin_put(tc);
3974 wait_for_completion(&tc->can_destroy);
3975
Joe Thornber991d9fa2011-10-31 20:21:18 +00003976 mutex_lock(&dm_thin_pool_table.mutex);
3977
3978 __pool_dec(tc->pool);
3979 dm_pool_close_thin_device(tc->td);
3980 dm_put_device(ti, tc->pool_dev);
Joe Thornber2dd9c252012-03-28 18:41:28 +01003981 if (tc->origin_dev)
3982 dm_put_device(ti, tc->origin_dev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003983 kfree(tc);
3984
3985 mutex_unlock(&dm_thin_pool_table.mutex);
3986}
3987
3988/*
3989 * Thin target parameters:
3990 *
Joe Thornber2dd9c252012-03-28 18:41:28 +01003991 * <pool_dev> <dev_id> [origin_dev]
Joe Thornber991d9fa2011-10-31 20:21:18 +00003992 *
3993 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3994 * dev_id: the internal device identifier
Joe Thornber2dd9c252012-03-28 18:41:28 +01003995 * origin_dev: a device external to the pool that should act as the origin
Joe Thornber67e2e2b2012-03-28 18:41:29 +01003996 *
3997 * If the pool device has discards disabled, they get disabled for the thin
3998 * device as well.
Joe Thornber991d9fa2011-10-31 20:21:18 +00003999 */
4000static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
4001{
4002 int r;
4003 struct thin_c *tc;
Joe Thornber2dd9c252012-03-28 18:41:28 +01004004 struct dm_dev *pool_dev, *origin_dev;
Joe Thornber991d9fa2011-10-31 20:21:18 +00004005 struct mapped_device *pool_md;
Joe Thornber5e3283e2014-04-08 11:08:41 +01004006 unsigned long flags;
Joe Thornber991d9fa2011-10-31 20:21:18 +00004007
4008 mutex_lock(&dm_thin_pool_table.mutex);
4009
Joe Thornber2dd9c252012-03-28 18:41:28 +01004010 if (argc != 2 && argc != 3) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00004011 ti->error = "Invalid argument count";
4012 r = -EINVAL;
4013 goto out_unlock;
4014 }
4015
4016 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
4017 if (!tc) {
4018 ti->error = "Out of memory";
4019 r = -ENOMEM;
4020 goto out_unlock;
4021 }
Mike Snitzer583024d2014-10-28 20:58:45 -04004022 tc->thin_md = dm_table_get_md(ti->table);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04004023 spin_lock_init(&tc->lock);
Joe Thornbera374bb22014-10-10 13:43:14 +01004024 INIT_LIST_HEAD(&tc->deferred_cells);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04004025 bio_list_init(&tc->deferred_bio_list);
4026 bio_list_init(&tc->retry_on_resume_list);
Mike Snitzer67324ea2014-03-21 18:33:41 -04004027 tc->sort_bio_list = RB_ROOT;
Joe Thornber991d9fa2011-10-31 20:21:18 +00004028
Joe Thornber2dd9c252012-03-28 18:41:28 +01004029 if (argc == 3) {
4030 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
4031 if (r) {
4032 ti->error = "Error opening origin device";
4033 goto bad_origin_dev;
4034 }
4035 tc->origin_dev = origin_dev;
4036 }
4037
Joe Thornber991d9fa2011-10-31 20:21:18 +00004038 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
4039 if (r) {
4040 ti->error = "Error opening pool device";
4041 goto bad_pool_dev;
4042 }
4043 tc->pool_dev = pool_dev;
4044
4045 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4046 ti->error = "Invalid device id";
4047 r = -EINVAL;
4048 goto bad_common;
4049 }
4050
4051 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4052 if (!pool_md) {
4053 ti->error = "Couldn't get pool mapped device";
4054 r = -EINVAL;
4055 goto bad_common;
4056 }
4057
4058 tc->pool = __pool_table_lookup(pool_md);
4059 if (!tc->pool) {
4060 ti->error = "Couldn't find pool object";
4061 r = -EINVAL;
4062 goto bad_pool_lookup;
4063 }
4064 __pool_inc(tc->pool);
4065
Joe Thornbere49e5822012-07-27 15:08:16 +01004066 if (get_pool_mode(tc->pool) == PM_FAIL) {
4067 ti->error = "Couldn't open thin device, Pool is in fail mode";
Mike Snitzer1acacc02014-02-19 20:32:33 -05004068 r = -EINVAL;
Mike Snitzer80e96c52014-11-07 15:09:46 -05004069 goto bad_pool;
Joe Thornbere49e5822012-07-27 15:08:16 +01004070 }
4071
Joe Thornber991d9fa2011-10-31 20:21:18 +00004072 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4073 if (r) {
4074 ti->error = "Couldn't open thin internal device";
Mike Snitzer80e96c52014-11-07 15:09:46 -05004075 goto bad_pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00004076 }
4077
Mike Snitzer542f9032012-07-27 15:08:00 +01004078 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4079 if (r)
Mike Snitzer80e96c52014-11-07 15:09:46 -05004080 goto bad;
Mike Snitzer542f9032012-07-27 15:08:00 +01004081
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00004082 ti->num_flush_bios = 1;
Joe Thornber16ad3d12012-07-27 15:08:07 +01004083 ti->flush_supported = true;
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00004084 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01004085
4086 /* In case the pool supports discards, pass them on. */
Mike Snitzerb60ab992013-09-19 18:49:11 -04004087 ti->discard_zeroes_data_unsupported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01004088 if (tc->pool->pf.discard_enabled) {
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01004089 ti->discards_supported = true;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00004090 ti->num_discard_bios = 1;
Joe Thornber34fbcf62015-04-16 12:58:35 +01004091 ti->split_discard_bios = false;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01004092 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00004093
Joe Thornber991d9fa2011-10-31 20:21:18 +00004094 mutex_unlock(&dm_thin_pool_table.mutex);
4095
Joe Thornber5e3283e2014-04-08 11:08:41 +01004096 spin_lock_irqsave(&tc->pool->lock, flags);
Mike Snitzer80e96c52014-11-07 15:09:46 -05004097 if (tc->pool->suspended) {
4098 spin_unlock_irqrestore(&tc->pool->lock, flags);
4099 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
4100 ti->error = "Unable to activate thin device while pool is suspended";
4101 r = -EINVAL;
4102 goto bad;
4103 }
Marc Dionne2b94e892014-12-17 07:59:59 -05004104 atomic_set(&tc->refcount, 1);
4105 init_completion(&tc->can_destroy);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04004106 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
Joe Thornber5e3283e2014-04-08 11:08:41 +01004107 spin_unlock_irqrestore(&tc->pool->lock, flags);
Mike Snitzerc140e1c2014-03-20 21:17:14 -04004108 /*
4109 * This synchronize_rcu() call is needed here otherwise we risk a
4110 * wake_worker() call finding no bios to process (because the newly
4111 * added tc isn't yet visible). So this reduces latency since we
4112 * aren't then dependent on the periodic commit to wake_worker().
4113 */
4114 synchronize_rcu();
4115
Mike Snitzer80e96c52014-11-07 15:09:46 -05004116 dm_put(pool_md);
4117
Joe Thornber991d9fa2011-10-31 20:21:18 +00004118 return 0;
4119
Mike Snitzer80e96c52014-11-07 15:09:46 -05004120bad:
Mike Snitzer1acacc02014-02-19 20:32:33 -05004121 dm_pool_close_thin_device(tc->td);
Mike Snitzer80e96c52014-11-07 15:09:46 -05004122bad_pool:
Joe Thornber991d9fa2011-10-31 20:21:18 +00004123 __pool_dec(tc->pool);
4124bad_pool_lookup:
4125 dm_put(pool_md);
4126bad_common:
4127 dm_put_device(ti, tc->pool_dev);
4128bad_pool_dev:
Joe Thornber2dd9c252012-03-28 18:41:28 +01004129 if (tc->origin_dev)
4130 dm_put_device(ti, tc->origin_dev);
4131bad_origin_dev:
Joe Thornber991d9fa2011-10-31 20:21:18 +00004132 kfree(tc);
4133out_unlock:
4134 mutex_unlock(&dm_thin_pool_table.mutex);
4135
4136 return r;
4137}
4138
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00004139static int thin_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00004140{
Kent Overstreet4f024f32013-10-11 15:44:27 -07004141 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004142
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00004143 return thin_bio_map(ti, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004144}
4145
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00004146static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
Joe Thornbereb2aa482012-03-28 18:41:28 +01004147{
4148 unsigned long flags;
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00004149 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01004150 struct list_head work;
Mike Snitzera24c2562012-06-03 00:30:00 +01004151 struct dm_thin_new_mapping *m, *tmp;
Joe Thornbereb2aa482012-03-28 18:41:28 +01004152 struct pool *pool = h->tc->pool;
4153
4154 if (h->shared_read_entry) {
4155 INIT_LIST_HEAD(&work);
Mike Snitzer44feb382012-10-12 21:02:10 +01004156 dm_deferred_entry_dec(h->shared_read_entry, &work);
Joe Thornbereb2aa482012-03-28 18:41:28 +01004157
4158 spin_lock_irqsave(&pool->lock, flags);
4159 list_for_each_entry_safe(m, tmp, &work, list) {
4160 list_del(&m->list);
Joe Thornber50f3c3e2014-06-13 13:57:09 +01004161 __complete_mapping_preparation(m);
Joe Thornbereb2aa482012-03-28 18:41:28 +01004162 }
4163 spin_unlock_irqrestore(&pool->lock, flags);
4164 }
4165
Joe Thornber104655f2012-03-28 18:41:28 +01004166 if (h->all_io_entry) {
4167 INIT_LIST_HEAD(&work);
Mike Snitzer44feb382012-10-12 21:02:10 +01004168 dm_deferred_entry_dec(h->all_io_entry, &work);
Joe Thornber563af182012-12-21 20:23:31 +00004169 if (!list_empty(&work)) {
4170 spin_lock_irqsave(&pool->lock, flags);
4171 list_for_each_entry_safe(m, tmp, &work, list)
Mike Snitzerdaec3382013-12-11 14:01:20 -05004172 list_add_tail(&m->list, &pool->prepared_discards);
Joe Thornber563af182012-12-21 20:23:31 +00004173 spin_unlock_irqrestore(&pool->lock, flags);
4174 wake_worker(pool);
4175 }
Joe Thornber104655f2012-03-28 18:41:28 +01004176 }
4177
Joe Thornber34fbcf62015-04-16 12:58:35 +01004178 if (h->cell)
4179 cell_defer_no_holder(h->tc, h->cell);
4180
Joe Thornbereb2aa482012-03-28 18:41:28 +01004181 return 0;
4182}
4183
Joe Thornber738211f2014-03-03 15:52:28 +00004184static void thin_presuspend(struct dm_target *ti)
4185{
4186 struct thin_c *tc = ti->private;
4187
4188 if (dm_noflush_suspending(ti))
4189 noflush_work(tc, do_noflush_start);
4190}
4191
Joe Thornber991d9fa2011-10-31 20:21:18 +00004192static void thin_postsuspend(struct dm_target *ti)
4193{
Joe Thornber738211f2014-03-03 15:52:28 +00004194 struct thin_c *tc = ti->private;
4195
4196 /*
4197 * The dm_noflush_suspending flag has been cleared by now, so
4198 * unfortunately we must always run this.
4199 */
4200 noflush_work(tc, do_noflush_stop);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004201}
4202
Joe Thornbere5aea7b2014-06-13 14:47:24 +01004203static int thin_preresume(struct dm_target *ti)
4204{
4205 struct thin_c *tc = ti->private;
4206
4207 if (tc->origin_dev)
4208 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4209
4210 return 0;
4211}
4212
Joe Thornber991d9fa2011-10-31 20:21:18 +00004213/*
4214 * <nr mapped sectors> <highest mapped sector>
4215 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00004216static void thin_status(struct dm_target *ti, status_type_t type,
4217 unsigned status_flags, char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00004218{
4219 int r;
4220 ssize_t sz = 0;
4221 dm_block_t mapped, highest;
4222 char buf[BDEVNAME_SIZE];
4223 struct thin_c *tc = ti->private;
4224
Joe Thornbere49e5822012-07-27 15:08:16 +01004225 if (get_pool_mode(tc->pool) == PM_FAIL) {
4226 DMEMIT("Fail");
Mikulas Patockafd7c0922013-03-01 22:45:44 +00004227 return;
Joe Thornbere49e5822012-07-27 15:08:16 +01004228 }
4229
Joe Thornber991d9fa2011-10-31 20:21:18 +00004230 if (!tc->td)
4231 DMEMIT("-");
4232 else {
4233 switch (type) {
4234 case STATUSTYPE_INFO:
4235 r = dm_thin_get_mapped_count(tc->td, &mapped);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00004236 if (r) {
4237 DMERR("dm_thin_get_mapped_count returned %d", r);
4238 goto err;
4239 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00004240
4241 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00004242 if (r < 0) {
4243 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
4244 goto err;
4245 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00004246
4247 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4248 if (r)
4249 DMEMIT("%llu", ((highest + 1) *
4250 tc->pool->sectors_per_block) - 1);
4251 else
4252 DMEMIT("-");
4253 break;
4254
4255 case STATUSTYPE_TABLE:
4256 DMEMIT("%s %lu",
4257 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4258 (unsigned long) tc->dev_id);
Joe Thornber2dd9c252012-03-28 18:41:28 +01004259 if (tc->origin_dev)
4260 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
Joe Thornber991d9fa2011-10-31 20:21:18 +00004261 break;
4262 }
4263 }
4264
Mikulas Patockafd7c0922013-03-01 22:45:44 +00004265 return;
4266
4267err:
4268 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00004269}
4270
Mike Snitzer36f12ae2014-10-09 15:24:12 -04004271static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
4272 struct bio_vec *biovec, int max_size)
4273{
4274 struct thin_c *tc = ti->private;
4275 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
4276
4277 if (!q->merge_bvec_fn)
4278 return max_size;
4279
4280 bvm->bi_bdev = tc->pool_dev->bdev;
4281 bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
4282
4283 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
4284}
4285
Joe Thornber991d9fa2011-10-31 20:21:18 +00004286static int thin_iterate_devices(struct dm_target *ti,
4287 iterate_devices_callout_fn fn, void *data)
4288{
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01004289 sector_t blocks;
Joe Thornber991d9fa2011-10-31 20:21:18 +00004290 struct thin_c *tc = ti->private;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01004291 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00004292
4293 /*
4294 * We can't call dm_pool_get_data_dev_size() since that blocks. So
4295 * we follow a more convoluted path through to the pool's target.
4296 */
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01004297 if (!pool->ti)
Joe Thornber991d9fa2011-10-31 20:21:18 +00004298 return 0; /* nothing is bound */
4299
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01004300 blocks = pool->ti->len;
4301 (void) sector_div(blocks, pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004302 if (blocks)
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01004303 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004304
4305 return 0;
4306}
4307
Joe Thornber34fbcf62015-04-16 12:58:35 +01004308static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4309{
4310 struct thin_c *tc = ti->private;
4311 struct pool *pool = tc->pool;
4312
4313 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4314 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4315}
4316
Joe Thornber991d9fa2011-10-31 20:21:18 +00004317static struct target_type thin_target = {
4318 .name = "thin",
Joe Thornber34fbcf62015-04-16 12:58:35 +01004319 .version = {1, 15, 0},
Joe Thornber991d9fa2011-10-31 20:21:18 +00004320 .module = THIS_MODULE,
4321 .ctr = thin_ctr,
4322 .dtr = thin_dtr,
4323 .map = thin_map,
Joe Thornbereb2aa482012-03-28 18:41:28 +01004324 .end_io = thin_endio,
Joe Thornbere5aea7b2014-06-13 14:47:24 +01004325 .preresume = thin_preresume,
Joe Thornber738211f2014-03-03 15:52:28 +00004326 .presuspend = thin_presuspend,
Joe Thornber991d9fa2011-10-31 20:21:18 +00004327 .postsuspend = thin_postsuspend,
4328 .status = thin_status,
Mike Snitzer36f12ae2014-10-09 15:24:12 -04004329 .merge = thin_merge,
Joe Thornber991d9fa2011-10-31 20:21:18 +00004330 .iterate_devices = thin_iterate_devices,
Joe Thornber34fbcf62015-04-16 12:58:35 +01004331 .io_hints = thin_io_hints,
Joe Thornber991d9fa2011-10-31 20:21:18 +00004332};
4333
4334/*----------------------------------------------------------------*/
4335
4336static int __init dm_thin_init(void)
4337{
4338 int r;
4339
4340 pool_table_init();
4341
4342 r = dm_register_target(&thin_target);
4343 if (r)
4344 return r;
4345
4346 r = dm_register_target(&pool_target);
4347 if (r)
Mike Snitzera24c2562012-06-03 00:30:00 +01004348 goto bad_pool_target;
4349
4350 r = -ENOMEM;
4351
Mike Snitzera24c2562012-06-03 00:30:00 +01004352 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4353 if (!_new_mapping_cache)
4354 goto bad_new_mapping_cache;
4355
Mike Snitzera24c2562012-06-03 00:30:00 +01004356 return 0;
4357
Mike Snitzera24c2562012-06-03 00:30:00 +01004358bad_new_mapping_cache:
Mike Snitzera24c2562012-06-03 00:30:00 +01004359 dm_unregister_target(&pool_target);
4360bad_pool_target:
4361 dm_unregister_target(&thin_target);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004362
4363 return r;
4364}
4365
4366static void dm_thin_exit(void)
4367{
4368 dm_unregister_target(&thin_target);
4369 dm_unregister_target(&pool_target);
Mike Snitzera24c2562012-06-03 00:30:00 +01004370
Mike Snitzera24c2562012-06-03 00:30:00 +01004371 kmem_cache_destroy(_new_mapping_cache);
Joe Thornber991d9fa2011-10-31 20:21:18 +00004372}
4373
4374module_init(dm_thin_init);
4375module_exit(dm_thin_exit);
4376
Mike Snitzer80c57892014-05-20 13:38:33 -04004377module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4378MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4379
Alasdair G Kergon7cab8bf2012-05-12 01:43:19 +01004380MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
Joe Thornber991d9fa2011-10-31 20:21:18 +00004381MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4382MODULE_LICENSE("GPL");