blob: a04eba905922dd3ff2c50b0c0943ee3914abc5c2 [file] [log] [blame]
Joe Thornber991d9fa2011-10-31 20:21:18 +00001/*
Joe Thornbere49e5822012-07-27 15:08:16 +01002 * Copyright (C) 2011-2012 Red Hat UK.
Joe Thornber991d9fa2011-10-31 20:21:18 +00003 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
Mike Snitzer4f81a412012-10-12 21:02:13 +01008#include "dm-bio-prison.h"
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01009#include "dm.h"
Joe Thornber991d9fa2011-10-31 20:21:18 +000010
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
14#include <linux/list.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18
19#define DM_MSG_PREFIX "thin"
20
21/*
22 * Tunable constants
23 */
Alasdair G Kergon7768ed32012-07-27 15:07:57 +010024#define ENDIO_HOOK_POOL_SIZE 1024
Joe Thornber991d9fa2011-10-31 20:21:18 +000025#define MAPPING_POOL_SIZE 1024
26#define PRISON_CELLS 1024
Joe Thornber905e51b2012-03-28 18:41:27 +010027#define COMMIT_PERIOD HZ
Joe Thornber991d9fa2011-10-31 20:21:18 +000028
Mikulas Patockadf5d2e92013-03-01 22:45:49 +000029DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
30 "A percentage of time allocated for copy on write");
31
Joe Thornber991d9fa2011-10-31 20:21:18 +000032/*
33 * The block size of the device holding pool data must be
34 * between 64KB and 1GB.
35 */
36#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
37#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
38
39/*
Joe Thornber991d9fa2011-10-31 20:21:18 +000040 * Device id is restricted to 24 bits.
41 */
42#define MAX_DEV_ID ((1 << 24) - 1)
43
44/*
45 * How do we handle breaking sharing of data blocks?
46 * =================================================
47 *
48 * We use a standard copy-on-write btree to store the mappings for the
49 * devices (note I'm talking about copy-on-write of the metadata here, not
50 * the data). When you take an internal snapshot you clone the root node
51 * of the origin btree. After this there is no concept of an origin or a
52 * snapshot. They are just two device trees that happen to point to the
53 * same data blocks.
54 *
55 * When we get a write in we decide if it's to a shared data block using
56 * some timestamp magic. If it is, we have to break sharing.
57 *
58 * Let's say we write to a shared block in what was the origin. The
59 * steps are:
60 *
61 * i) plug io further to this physical block. (see bio_prison code).
62 *
63 * ii) quiesce any read io to that shared data block. Obviously
Mike Snitzer44feb382012-10-12 21:02:10 +010064 * including all devices that share this block. (see dm_deferred_set code)
Joe Thornber991d9fa2011-10-31 20:21:18 +000065 *
66 * iii) copy the data block to a newly allocate block. This step can be
67 * missed out if the io covers the block. (schedule_copy).
68 *
69 * iv) insert the new mapping into the origin's btree
Joe Thornberfe878f32012-03-28 18:41:24 +010070 * (process_prepared_mapping). This act of inserting breaks some
Joe Thornber991d9fa2011-10-31 20:21:18 +000071 * sharing of btree nodes between the two devices. Breaking sharing only
72 * effects the btree of that specific device. Btrees for the other
73 * devices that share the block never change. The btree for the origin
74 * device as it was after the last commit is untouched, ie. we're using
75 * persistent data structures in the functional programming sense.
76 *
77 * v) unplug io to this physical block, including the io that triggered
78 * the breaking of sharing.
79 *
80 * Steps (ii) and (iii) occur in parallel.
81 *
82 * The metadata _doesn't_ need to be committed before the io continues. We
83 * get away with this because the io is always written to a _new_ block.
84 * If there's a crash, then:
85 *
86 * - The origin mapping will point to the old origin block (the shared
87 * one). This will contain the data as it was before the io that triggered
88 * the breaking of sharing came in.
89 *
90 * - The snap mapping still points to the old block. As it would after
91 * the commit.
92 *
93 * The downside of this scheme is the timestamp magic isn't perfect, and
94 * will continue to think that data block in the snapshot device is shared
95 * even after the write to the origin has broken sharing. I suspect data
96 * blocks will typically be shared by many different devices, so we're
97 * breaking sharing n + 1 times, rather than n, where n is the number of
98 * devices that reference this data block. At the moment I think the
99 * benefits far, far outweigh the disadvantages.
100 */
101
102/*----------------------------------------------------------------*/
103
104/*
Joe Thornber991d9fa2011-10-31 20:21:18 +0000105 * Key building.
106 */
107static void build_data_key(struct dm_thin_device *td,
Mike Snitzer44feb382012-10-12 21:02:10 +0100108 dm_block_t b, struct dm_cell_key *key)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000109{
110 key->virtual = 0;
111 key->dev = dm_thin_dev_id(td);
112 key->block = b;
113}
114
115static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
Mike Snitzer44feb382012-10-12 21:02:10 +0100116 struct dm_cell_key *key)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000117{
118 key->virtual = 1;
119 key->dev = dm_thin_dev_id(td);
120 key->block = b;
121}
122
123/*----------------------------------------------------------------*/
124
125/*
126 * A pool device ties together a metadata device and a data device. It
127 * also provides the interface for creating and destroying internal
128 * devices.
129 */
Mike Snitzera24c2562012-06-03 00:30:00 +0100130struct dm_thin_new_mapping;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100131
Joe Thornbere49e5822012-07-27 15:08:16 +0100132/*
133 * The pool runs in 3 modes. Ordered in degraded order for comparisons.
134 */
135enum pool_mode {
136 PM_WRITE, /* metadata may be changed */
137 PM_READ_ONLY, /* metadata may not be changed */
138 PM_FAIL, /* all I/O fails */
139};
140
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100141struct pool_features {
Joe Thornbere49e5822012-07-27 15:08:16 +0100142 enum pool_mode mode;
143
Mike Snitzer9bc142d2012-09-26 23:45:46 +0100144 bool zero_new_blocks:1;
145 bool discard_enabled:1;
146 bool discard_passdown:1;
Mike Snitzer787a996c2013-12-06 16:21:43 -0500147 bool error_if_no_space:1;
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100148};
149
Joe Thornbere49e5822012-07-27 15:08:16 +0100150struct thin_c;
151typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
152typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
153
Joe Thornber991d9fa2011-10-31 20:21:18 +0000154struct pool {
155 struct list_head list;
156 struct dm_target *ti; /* Only set if a pool target is bound */
157
158 struct mapped_device *pool_md;
159 struct block_device *md_dev;
160 struct dm_pool_metadata *pmd;
161
Joe Thornber991d9fa2011-10-31 20:21:18 +0000162 dm_block_t low_water_blocks;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100163 uint32_t sectors_per_block;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +0100164 int sectors_per_block_shift;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000165
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100166 struct pool_features pf;
Joe Thornber88a66212013-12-04 20:16:12 -0500167 bool low_water_triggered:1; /* A dm event has been sent */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000168
Mike Snitzer44feb382012-10-12 21:02:10 +0100169 struct dm_bio_prison *prison;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000170 struct dm_kcopyd_client *copier;
171
172 struct workqueue_struct *wq;
173 struct work_struct worker;
Joe Thornber905e51b2012-03-28 18:41:27 +0100174 struct delayed_work waker;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000175
Joe Thornber905e51b2012-03-28 18:41:27 +0100176 unsigned long last_commit_jiffies;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100177 unsigned ref_count;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000178
179 spinlock_t lock;
180 struct bio_list deferred_bios;
181 struct bio_list deferred_flush_bios;
182 struct list_head prepared_mappings;
Joe Thornber104655f2012-03-28 18:41:28 +0100183 struct list_head prepared_discards;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000184
185 struct bio_list retry_on_resume_list;
186
Mike Snitzer44feb382012-10-12 21:02:10 +0100187 struct dm_deferred_set *shared_read_ds;
188 struct dm_deferred_set *all_io_ds;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000189
Mike Snitzera24c2562012-06-03 00:30:00 +0100190 struct dm_thin_new_mapping *next_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000191 mempool_t *mapping_pool;
Joe Thornbere49e5822012-07-27 15:08:16 +0100192
193 process_bio_fn process_bio;
194 process_bio_fn process_discard;
195
196 process_mapping_fn process_prepared_mapping;
197 process_mapping_fn process_prepared_discard;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000198};
199
Joe Thornbere49e5822012-07-27 15:08:16 +0100200static enum pool_mode get_pool_mode(struct pool *pool);
Mike Snitzer399cadd2013-12-05 16:03:33 -0500201static void out_of_data_space(struct pool *pool);
Joe Thornberb5330652013-12-04 19:51:33 -0500202static void metadata_operation_failed(struct pool *pool, const char *op, int r);
Joe Thornbere49e5822012-07-27 15:08:16 +0100203
Joe Thornber991d9fa2011-10-31 20:21:18 +0000204/*
205 * Target context for a pool.
206 */
207struct pool_c {
208 struct dm_target *ti;
209 struct pool *pool;
210 struct dm_dev *data_dev;
211 struct dm_dev *metadata_dev;
212 struct dm_target_callbacks callbacks;
213
214 dm_block_t low_water_blocks;
Mike Snitzer0424caa2012-09-26 23:45:47 +0100215 struct pool_features requested_pf; /* Features requested during table load */
216 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
Joe Thornber991d9fa2011-10-31 20:21:18 +0000217};
218
219/*
220 * Target context for a thin.
221 */
222struct thin_c {
223 struct dm_dev *pool_dev;
Joe Thornber2dd9c252012-03-28 18:41:28 +0100224 struct dm_dev *origin_dev;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000225 dm_thin_id dev_id;
226
227 struct pool *pool;
228 struct dm_thin_device *td;
229};
230
231/*----------------------------------------------------------------*/
232
Joe Thornber025b9682013-03-01 22:45:50 +0000233/*
234 * wake_worker() is used when new work is queued and when pool_resume is
235 * ready to continue deferred IO processing.
236 */
237static void wake_worker(struct pool *pool)
238{
239 queue_work(pool->wq, &pool->worker);
240}
241
242/*----------------------------------------------------------------*/
243
Joe Thornber6beca5e2013-03-01 22:45:50 +0000244static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
245 struct dm_bio_prison_cell **cell_result)
246{
247 int r;
248 struct dm_bio_prison_cell *cell_prealloc;
249
250 /*
251 * Allocate a cell from the prison's mempool.
252 * This might block but it can't fail.
253 */
254 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
255
256 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
257 if (r)
258 /*
259 * We reused an old cell; we can get rid of
260 * the new one.
261 */
262 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
263
264 return r;
265}
266
267static void cell_release(struct pool *pool,
268 struct dm_bio_prison_cell *cell,
269 struct bio_list *bios)
270{
271 dm_cell_release(pool->prison, cell, bios);
272 dm_bio_prison_free_cell(pool->prison, cell);
273}
274
275static void cell_release_no_holder(struct pool *pool,
276 struct dm_bio_prison_cell *cell,
277 struct bio_list *bios)
278{
279 dm_cell_release_no_holder(pool->prison, cell, bios);
280 dm_bio_prison_free_cell(pool->prison, cell);
281}
282
Joe Thornber025b9682013-03-01 22:45:50 +0000283static void cell_defer_no_holder_no_free(struct thin_c *tc,
284 struct dm_bio_prison_cell *cell)
285{
286 struct pool *pool = tc->pool;
287 unsigned long flags;
288
289 spin_lock_irqsave(&pool->lock, flags);
290 dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
291 spin_unlock_irqrestore(&pool->lock, flags);
292
293 wake_worker(pool);
294}
295
Joe Thornber6beca5e2013-03-01 22:45:50 +0000296static void cell_error(struct pool *pool,
297 struct dm_bio_prison_cell *cell)
298{
299 dm_cell_error(pool->prison, cell);
300 dm_bio_prison_free_cell(pool->prison, cell);
301}
302
303/*----------------------------------------------------------------*/
304
Joe Thornber991d9fa2011-10-31 20:21:18 +0000305/*
306 * A global list of pools that uses a struct mapped_device as a key.
307 */
308static struct dm_thin_pool_table {
309 struct mutex mutex;
310 struct list_head pools;
311} dm_thin_pool_table;
312
313static void pool_table_init(void)
314{
315 mutex_init(&dm_thin_pool_table.mutex);
316 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
317}
318
319static void __pool_table_insert(struct pool *pool)
320{
321 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
322 list_add(&pool->list, &dm_thin_pool_table.pools);
323}
324
325static void __pool_table_remove(struct pool *pool)
326{
327 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
328 list_del(&pool->list);
329}
330
331static struct pool *__pool_table_lookup(struct mapped_device *md)
332{
333 struct pool *pool = NULL, *tmp;
334
335 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
336
337 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
338 if (tmp->pool_md == md) {
339 pool = tmp;
340 break;
341 }
342 }
343
344 return pool;
345}
346
347static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
348{
349 struct pool *pool = NULL, *tmp;
350
351 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
352
353 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
354 if (tmp->md_dev == md_dev) {
355 pool = tmp;
356 break;
357 }
358 }
359
360 return pool;
361}
362
363/*----------------------------------------------------------------*/
364
Mike Snitzera24c2562012-06-03 00:30:00 +0100365struct dm_thin_endio_hook {
Joe Thornbereb2aa482012-03-28 18:41:28 +0100366 struct thin_c *tc;
Mike Snitzer44feb382012-10-12 21:02:10 +0100367 struct dm_deferred_entry *shared_read_entry;
368 struct dm_deferred_entry *all_io_entry;
Mike Snitzera24c2562012-06-03 00:30:00 +0100369 struct dm_thin_new_mapping *overwrite_mapping;
Joe Thornbereb2aa482012-03-28 18:41:28 +0100370};
371
Joe Thornber991d9fa2011-10-31 20:21:18 +0000372static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
373{
374 struct bio *bio;
375 struct bio_list bios;
376
377 bio_list_init(&bios);
378 bio_list_merge(&bios, master);
379 bio_list_init(master);
380
381 while ((bio = bio_list_pop(&bios))) {
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000382 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzera24c2562012-06-03 00:30:00 +0100383
Joe Thornbereb2aa482012-03-28 18:41:28 +0100384 if (h->tc == tc)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000385 bio_endio(bio, DM_ENDIO_REQUEUE);
386 else
387 bio_list_add(master, bio);
388 }
389}
390
391static void requeue_io(struct thin_c *tc)
392{
393 struct pool *pool = tc->pool;
394 unsigned long flags;
395
396 spin_lock_irqsave(&pool->lock, flags);
397 __requeue_bio_list(tc, &pool->deferred_bios);
398 __requeue_bio_list(tc, &pool->retry_on_resume_list);
399 spin_unlock_irqrestore(&pool->lock, flags);
400}
401
402/*
403 * This section of code contains the logic for processing a thin device's IO.
404 * Much of the code depends on pool object resources (lists, workqueues, etc)
405 * but most is exclusively called from the thin target rather than the thin-pool
406 * target.
407 */
408
Mike Snitzer58f77a22013-03-01 22:45:45 +0000409static bool block_size_is_power_of_two(struct pool *pool)
410{
411 return pool->sectors_per_block_shift >= 0;
412}
413
Joe Thornber991d9fa2011-10-31 20:21:18 +0000414static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
415{
Mike Snitzer58f77a22013-03-01 22:45:45 +0000416 struct pool *pool = tc->pool;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700417 sector_t block_nr = bio->bi_iter.bi_sector;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100418
Mike Snitzer58f77a22013-03-01 22:45:45 +0000419 if (block_size_is_power_of_two(pool))
420 block_nr >>= pool->sectors_per_block_shift;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +0100421 else
Mike Snitzer58f77a22013-03-01 22:45:45 +0000422 (void) sector_div(block_nr, pool->sectors_per_block);
Mike Snitzer55f2b8b2012-07-27 15:08:02 +0100423
424 return block_nr;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000425}
426
427static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
428{
429 struct pool *pool = tc->pool;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700430 sector_t bi_sector = bio->bi_iter.bi_sector;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000431
432 bio->bi_bdev = tc->pool_dev->bdev;
Mike Snitzer58f77a22013-03-01 22:45:45 +0000433 if (block_size_is_power_of_two(pool))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700434 bio->bi_iter.bi_sector =
435 (block << pool->sectors_per_block_shift) |
436 (bi_sector & (pool->sectors_per_block - 1));
Mike Snitzer58f77a22013-03-01 22:45:45 +0000437 else
Kent Overstreet4f024f32013-10-11 15:44:27 -0700438 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
Mike Snitzer58f77a22013-03-01 22:45:45 +0000439 sector_div(bi_sector, pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000440}
441
Joe Thornber2dd9c252012-03-28 18:41:28 +0100442static void remap_to_origin(struct thin_c *tc, struct bio *bio)
443{
444 bio->bi_bdev = tc->origin_dev->bdev;
445}
446
Joe Thornber4afdd682012-07-27 15:08:14 +0100447static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
448{
449 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
450 dm_thin_changed_this_transaction(tc->td);
451}
452
Joe Thornbere8088072012-12-21 20:23:31 +0000453static void inc_all_io_entry(struct pool *pool, struct bio *bio)
454{
455 struct dm_thin_endio_hook *h;
456
457 if (bio->bi_rw & REQ_DISCARD)
458 return;
459
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000460 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbere8088072012-12-21 20:23:31 +0000461 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
462}
463
Joe Thornber2dd9c252012-03-28 18:41:28 +0100464static void issue(struct thin_c *tc, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000465{
466 struct pool *pool = tc->pool;
467 unsigned long flags;
468
Joe Thornbere49e5822012-07-27 15:08:16 +0100469 if (!bio_triggers_commit(tc, bio)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000470 generic_make_request(bio);
Joe Thornbere49e5822012-07-27 15:08:16 +0100471 return;
472 }
473
474 /*
475 * Complete bio with an error if earlier I/O caused changes to
476 * the metadata that can't be committed e.g, due to I/O errors
477 * on the metadata device.
478 */
479 if (dm_thin_aborted_changes(tc->td)) {
480 bio_io_error(bio);
481 return;
482 }
483
484 /*
485 * Batch together any bios that trigger commits and then issue a
486 * single commit for them in process_deferred_bios().
487 */
488 spin_lock_irqsave(&pool->lock, flags);
489 bio_list_add(&pool->deferred_flush_bios, bio);
490 spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000491}
492
Joe Thornber2dd9c252012-03-28 18:41:28 +0100493static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
494{
495 remap_to_origin(tc, bio);
496 issue(tc, bio);
497}
498
499static void remap_and_issue(struct thin_c *tc, struct bio *bio,
500 dm_block_t block)
501{
502 remap(tc, bio, block);
503 issue(tc, bio);
504}
505
Joe Thornber991d9fa2011-10-31 20:21:18 +0000506/*----------------------------------------------------------------*/
507
508/*
509 * Bio endio functions.
510 */
Mike Snitzera24c2562012-06-03 00:30:00 +0100511struct dm_thin_new_mapping {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000512 struct list_head list;
513
Mike Snitzer7f214662013-12-17 13:43:31 -0500514 bool quiesced:1;
515 bool prepared:1;
516 bool pass_discard:1;
517 bool definitely_not_shared:1;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000518
Mike Snitzer7f214662013-12-17 13:43:31 -0500519 int err;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000520 struct thin_c *tc;
521 dm_block_t virt_block;
522 dm_block_t data_block;
Mike Snitzera24c2562012-06-03 00:30:00 +0100523 struct dm_bio_prison_cell *cell, *cell2;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000524
525 /*
526 * If the bio covers the whole area of a block then we can avoid
527 * zeroing or copying. Instead this bio is hooked. The bio will
528 * still be in the cell, so care has to be taken to avoid issuing
529 * the bio twice.
530 */
531 struct bio *bio;
532 bio_end_io_t *saved_bi_end_io;
533};
534
Mike Snitzera24c2562012-06-03 00:30:00 +0100535static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000536{
537 struct pool *pool = m->tc->pool;
538
Joe Thornbereb2aa482012-03-28 18:41:28 +0100539 if (m->quiesced && m->prepared) {
Mike Snitzerdaec3382013-12-11 14:01:20 -0500540 list_add_tail(&m->list, &pool->prepared_mappings);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000541 wake_worker(pool);
542 }
543}
544
545static void copy_complete(int read_err, unsigned long write_err, void *context)
546{
547 unsigned long flags;
Mike Snitzera24c2562012-06-03 00:30:00 +0100548 struct dm_thin_new_mapping *m = context;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000549 struct pool *pool = m->tc->pool;
550
551 m->err = read_err || write_err ? -EIO : 0;
552
553 spin_lock_irqsave(&pool->lock, flags);
Mike Snitzer7f214662013-12-17 13:43:31 -0500554 m->prepared = true;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000555 __maybe_add_mapping(m);
556 spin_unlock_irqrestore(&pool->lock, flags);
557}
558
559static void overwrite_endio(struct bio *bio, int err)
560{
561 unsigned long flags;
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000562 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzera24c2562012-06-03 00:30:00 +0100563 struct dm_thin_new_mapping *m = h->overwrite_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000564 struct pool *pool = m->tc->pool;
565
566 m->err = err;
567
568 spin_lock_irqsave(&pool->lock, flags);
Mike Snitzer7f214662013-12-17 13:43:31 -0500569 m->prepared = true;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000570 __maybe_add_mapping(m);
571 spin_unlock_irqrestore(&pool->lock, flags);
572}
573
Joe Thornber991d9fa2011-10-31 20:21:18 +0000574/*----------------------------------------------------------------*/
575
576/*
577 * Workqueue.
578 */
579
580/*
581 * Prepared mapping jobs.
582 */
583
584/*
585 * This sends the bios in the cell back to the deferred_bios list.
586 */
Joe Thornber2aab3852012-12-21 20:23:33 +0000587static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000588{
589 struct pool *pool = tc->pool;
590 unsigned long flags;
591
592 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000593 cell_release(pool, cell, &pool->deferred_bios);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000594 spin_unlock_irqrestore(&tc->pool->lock, flags);
595
596 wake_worker(pool);
597}
598
599/*
Joe Thornber6beca5e2013-03-01 22:45:50 +0000600 * Same as cell_defer above, except it omits the original holder of the cell.
Joe Thornber991d9fa2011-10-31 20:21:18 +0000601 */
Joe Thornberf286ba02012-12-21 20:23:33 +0000602static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000603{
Joe Thornber991d9fa2011-10-31 20:21:18 +0000604 struct pool *pool = tc->pool;
605 unsigned long flags;
606
Joe Thornber991d9fa2011-10-31 20:21:18 +0000607 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000608 cell_release_no_holder(pool, cell, &pool->deferred_bios);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000609 spin_unlock_irqrestore(&pool->lock, flags);
610
611 wake_worker(pool);
612}
613
Joe Thornbere49e5822012-07-27 15:08:16 +0100614static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
615{
Kent Overstreet196d38b2013-11-23 18:34:15 -0800616 if (m->bio) {
Joe Thornbere49e5822012-07-27 15:08:16 +0100617 m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet196d38b2013-11-23 18:34:15 -0800618 atomic_inc(&m->bio->bi_remaining);
619 }
Joe Thornber6beca5e2013-03-01 22:45:50 +0000620 cell_error(m->tc->pool, m->cell);
Joe Thornbere49e5822012-07-27 15:08:16 +0100621 list_del(&m->list);
622 mempool_free(m, m->tc->pool->mapping_pool);
623}
Joe Thornber025b9682013-03-01 22:45:50 +0000624
Mike Snitzera24c2562012-06-03 00:30:00 +0100625static void process_prepared_mapping(struct dm_thin_new_mapping *m)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000626{
627 struct thin_c *tc = m->tc;
Joe Thornber6beca5e2013-03-01 22:45:50 +0000628 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000629 struct bio *bio;
630 int r;
631
632 bio = m->bio;
Kent Overstreet196d38b2013-11-23 18:34:15 -0800633 if (bio) {
Joe Thornber991d9fa2011-10-31 20:21:18 +0000634 bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet196d38b2013-11-23 18:34:15 -0800635 atomic_inc(&bio->bi_remaining);
636 }
Joe Thornber991d9fa2011-10-31 20:21:18 +0000637
638 if (m->err) {
Joe Thornber6beca5e2013-03-01 22:45:50 +0000639 cell_error(pool, m->cell);
Joe Thornber905386f2012-07-27 15:08:05 +0100640 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000641 }
642
643 /*
644 * Commit the prepared block into the mapping btree.
645 * Any I/O for this block arriving after this point will get
646 * remapped to it directly.
647 */
648 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
649 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -0500650 metadata_operation_failed(pool, "dm_thin_insert_block", r);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000651 cell_error(pool, m->cell);
Joe Thornber905386f2012-07-27 15:08:05 +0100652 goto out;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000653 }
654
655 /*
656 * Release any bios held while the block was being provisioned.
657 * If we are processing a write bio that completely covers the block,
658 * we already processed it so can ignore it now when processing
659 * the bios in the cell.
660 */
661 if (bio) {
Joe Thornberf286ba02012-12-21 20:23:33 +0000662 cell_defer_no_holder(tc, m->cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000663 bio_endio(bio, 0);
664 } else
Joe Thornber2aab3852012-12-21 20:23:33 +0000665 cell_defer(tc, m->cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000666
Joe Thornber905386f2012-07-27 15:08:05 +0100667out:
Joe Thornber991d9fa2011-10-31 20:21:18 +0000668 list_del(&m->list);
Joe Thornber6beca5e2013-03-01 22:45:50 +0000669 mempool_free(m, pool->mapping_pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000670}
671
Joe Thornbere49e5822012-07-27 15:08:16 +0100672static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber104655f2012-03-28 18:41:28 +0100673{
Joe Thornber104655f2012-03-28 18:41:28 +0100674 struct thin_c *tc = m->tc;
675
Joe Thornbere49e5822012-07-27 15:08:16 +0100676 bio_io_error(m->bio);
Joe Thornberf286ba02012-12-21 20:23:33 +0000677 cell_defer_no_holder(tc, m->cell);
678 cell_defer_no_holder(tc, m->cell2);
Joe Thornbere49e5822012-07-27 15:08:16 +0100679 mempool_free(m, tc->pool->mapping_pool);
680}
Joe Thornber104655f2012-03-28 18:41:28 +0100681
Joe Thornbere49e5822012-07-27 15:08:16 +0100682static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
683{
684 struct thin_c *tc = m->tc;
685
Joe Thornbere8088072012-12-21 20:23:31 +0000686 inc_all_io_entry(tc->pool, m->bio);
Joe Thornberf286ba02012-12-21 20:23:33 +0000687 cell_defer_no_holder(tc, m->cell);
688 cell_defer_no_holder(tc, m->cell2);
Joe Thornbere8088072012-12-21 20:23:31 +0000689
Joe Thornber104655f2012-03-28 18:41:28 +0100690 if (m->pass_discard)
Joe Thornber19fa1a62013-12-17 12:09:40 -0500691 if (m->definitely_not_shared)
692 remap_and_issue(tc, m->bio, m->data_block);
693 else {
694 bool used = false;
695 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
696 bio_endio(m->bio, 0);
697 else
698 remap_and_issue(tc, m->bio, m->data_block);
699 }
Joe Thornber104655f2012-03-28 18:41:28 +0100700 else
701 bio_endio(m->bio, 0);
702
Joe Thornber104655f2012-03-28 18:41:28 +0100703 mempool_free(m, tc->pool->mapping_pool);
704}
705
Joe Thornbere49e5822012-07-27 15:08:16 +0100706static void process_prepared_discard(struct dm_thin_new_mapping *m)
707{
708 int r;
709 struct thin_c *tc = m->tc;
710
711 r = dm_thin_remove_block(tc->td, m->virt_block);
712 if (r)
Mike Snitzerc3977412012-12-21 20:23:34 +0000713 DMERR_LIMIT("dm_thin_remove_block() failed");
Joe Thornbere49e5822012-07-27 15:08:16 +0100714
715 process_prepared_discard_passdown(m);
716}
717
Joe Thornber104655f2012-03-28 18:41:28 +0100718static void process_prepared(struct pool *pool, struct list_head *head,
Joe Thornbere49e5822012-07-27 15:08:16 +0100719 process_mapping_fn *fn)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000720{
721 unsigned long flags;
722 struct list_head maps;
Mike Snitzera24c2562012-06-03 00:30:00 +0100723 struct dm_thin_new_mapping *m, *tmp;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000724
725 INIT_LIST_HEAD(&maps);
726 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +0100727 list_splice_init(head, &maps);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000728 spin_unlock_irqrestore(&pool->lock, flags);
729
730 list_for_each_entry_safe(m, tmp, &maps, list)
Joe Thornbere49e5822012-07-27 15:08:16 +0100731 (*fn)(m);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000732}
733
734/*
735 * Deferred bio jobs.
736 */
Joe Thornber104655f2012-03-28 18:41:28 +0100737static int io_overlaps_block(struct pool *pool, struct bio *bio)
738{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700739 return bio->bi_iter.bi_size ==
740 (pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber104655f2012-03-28 18:41:28 +0100741}
742
Joe Thornber991d9fa2011-10-31 20:21:18 +0000743static int io_overwrites_block(struct pool *pool, struct bio *bio)
744{
Joe Thornber104655f2012-03-28 18:41:28 +0100745 return (bio_data_dir(bio) == WRITE) &&
746 io_overlaps_block(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000747}
748
749static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
750 bio_end_io_t *fn)
751{
752 *save = bio->bi_end_io;
753 bio->bi_end_io = fn;
754}
755
756static int ensure_next_mapping(struct pool *pool)
757{
758 if (pool->next_mapping)
759 return 0;
760
761 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
762
763 return pool->next_mapping ? 0 : -ENOMEM;
764}
765
Mike Snitzera24c2562012-06-03 00:30:00 +0100766static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000767{
Mike Snitzer16961b02013-12-17 13:19:11 -0500768 struct dm_thin_new_mapping *m = pool->next_mapping;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000769
770 BUG_ON(!pool->next_mapping);
771
Mike Snitzer16961b02013-12-17 13:19:11 -0500772 memset(m, 0, sizeof(struct dm_thin_new_mapping));
773 INIT_LIST_HEAD(&m->list);
774 m->bio = NULL;
775
Joe Thornber991d9fa2011-10-31 20:21:18 +0000776 pool->next_mapping = NULL;
777
Mike Snitzer16961b02013-12-17 13:19:11 -0500778 return m;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000779}
780
781static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
Joe Thornber2dd9c252012-03-28 18:41:28 +0100782 struct dm_dev *origin, dm_block_t data_origin,
783 dm_block_t data_dest,
Mike Snitzera24c2562012-06-03 00:30:00 +0100784 struct dm_bio_prison_cell *cell, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000785{
786 int r;
787 struct pool *pool = tc->pool;
Mike Snitzera24c2562012-06-03 00:30:00 +0100788 struct dm_thin_new_mapping *m = get_next_mapping(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000789
Joe Thornber991d9fa2011-10-31 20:21:18 +0000790 m->tc = tc;
791 m->virt_block = virt_block;
792 m->data_block = data_dest;
793 m->cell = cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000794
Mike Snitzer44feb382012-10-12 21:02:10 +0100795 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
Mike Snitzer7f214662013-12-17 13:43:31 -0500796 m->quiesced = true;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000797
798 /*
799 * IO to pool_dev remaps to the pool target's data_dev.
800 *
801 * If the whole block of data is being overwritten, we can issue the
802 * bio immediately. Otherwise we use kcopyd to clone the data first.
803 */
804 if (io_overwrites_block(pool, bio)) {
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000805 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzera24c2562012-06-03 00:30:00 +0100806
Joe Thornbereb2aa482012-03-28 18:41:28 +0100807 h->overwrite_mapping = m;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000808 m->bio = bio;
809 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
Joe Thornbere8088072012-12-21 20:23:31 +0000810 inc_all_io_entry(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000811 remap_and_issue(tc, bio, data_dest);
812 } else {
813 struct dm_io_region from, to;
814
Joe Thornber2dd9c252012-03-28 18:41:28 +0100815 from.bdev = origin->bdev;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000816 from.sector = data_origin * pool->sectors_per_block;
817 from.count = pool->sectors_per_block;
818
819 to.bdev = tc->pool_dev->bdev;
820 to.sector = data_dest * pool->sectors_per_block;
821 to.count = pool->sectors_per_block;
822
823 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
824 0, copy_complete, m);
825 if (r < 0) {
826 mempool_free(m, pool->mapping_pool);
Mike Snitzerc3977412012-12-21 20:23:34 +0000827 DMERR_LIMIT("dm_kcopyd_copy() failed");
Joe Thornber6beca5e2013-03-01 22:45:50 +0000828 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000829 }
830 }
831}
832
Joe Thornber2dd9c252012-03-28 18:41:28 +0100833static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
834 dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzera24c2562012-06-03 00:30:00 +0100835 struct dm_bio_prison_cell *cell, struct bio *bio)
Joe Thornber2dd9c252012-03-28 18:41:28 +0100836{
837 schedule_copy(tc, virt_block, tc->pool_dev,
838 data_origin, data_dest, cell, bio);
839}
840
841static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
842 dm_block_t data_dest,
Mike Snitzera24c2562012-06-03 00:30:00 +0100843 struct dm_bio_prison_cell *cell, struct bio *bio)
Joe Thornber2dd9c252012-03-28 18:41:28 +0100844{
845 schedule_copy(tc, virt_block, tc->origin_dev,
846 virt_block, data_dest, cell, bio);
847}
848
Joe Thornber991d9fa2011-10-31 20:21:18 +0000849static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzera24c2562012-06-03 00:30:00 +0100850 dm_block_t data_block, struct dm_bio_prison_cell *cell,
Joe Thornber991d9fa2011-10-31 20:21:18 +0000851 struct bio *bio)
852{
853 struct pool *pool = tc->pool;
Mike Snitzera24c2562012-06-03 00:30:00 +0100854 struct dm_thin_new_mapping *m = get_next_mapping(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000855
Mike Snitzer7f214662013-12-17 13:43:31 -0500856 m->quiesced = true;
857 m->prepared = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000858 m->tc = tc;
859 m->virt_block = virt_block;
860 m->data_block = data_block;
861 m->cell = cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000862
863 /*
864 * If the whole block of data is being overwritten or we are not
865 * zeroing pre-existing data, we can issue the bio immediately.
866 * Otherwise we use kcopyd to zero the data first.
867 */
Joe Thornber67e2e2b2012-03-28 18:41:29 +0100868 if (!pool->pf.zero_new_blocks)
Joe Thornber991d9fa2011-10-31 20:21:18 +0000869 process_prepared_mapping(m);
870
871 else if (io_overwrites_block(pool, bio)) {
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000872 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzera24c2562012-06-03 00:30:00 +0100873
Joe Thornbereb2aa482012-03-28 18:41:28 +0100874 h->overwrite_mapping = m;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000875 m->bio = bio;
876 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
Joe Thornbere8088072012-12-21 20:23:31 +0000877 inc_all_io_entry(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000878 remap_and_issue(tc, bio, data_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000879 } else {
880 int r;
881 struct dm_io_region to;
882
883 to.bdev = tc->pool_dev->bdev;
884 to.sector = data_block * pool->sectors_per_block;
885 to.count = pool->sectors_per_block;
886
887 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
888 if (r < 0) {
889 mempool_free(m, pool->mapping_pool);
Mike Snitzerc3977412012-12-21 20:23:34 +0000890 DMERR_LIMIT("dm_kcopyd_zero() failed");
Joe Thornber6beca5e2013-03-01 22:45:50 +0000891 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000892 }
893 }
894}
895
Joe Thornbere49e5822012-07-27 15:08:16 +0100896/*
897 * A non-zero return indicates read_only or fail_io mode.
898 * Many callers don't care about the return value.
899 */
Joe Thornber020cc3b2013-12-04 15:05:36 -0500900static int commit(struct pool *pool)
Joe Thornbere49e5822012-07-27 15:08:16 +0100901{
902 int r;
903
904 if (get_pool_mode(pool) != PM_WRITE)
905 return -EINVAL;
906
Joe Thornber020cc3b2013-12-04 15:05:36 -0500907 r = dm_pool_commit_metadata(pool->pmd);
Joe Thornberb5330652013-12-04 19:51:33 -0500908 if (r)
909 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
Joe Thornbere49e5822012-07-27 15:08:16 +0100910
911 return r;
912}
913
Joe Thornber88a66212013-12-04 20:16:12 -0500914static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
915{
916 unsigned long flags;
917
918 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
919 DMWARN("%s: reached low water mark for data device: sending event.",
920 dm_device_name(pool->pool_md));
921 spin_lock_irqsave(&pool->lock, flags);
922 pool->low_water_triggered = true;
923 spin_unlock_irqrestore(&pool->lock, flags);
924 dm_table_event(pool->ti->table);
925 }
926}
927
Joe Thornber991d9fa2011-10-31 20:21:18 +0000928static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
929{
930 int r;
931 dm_block_t free_blocks;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000932 struct pool *pool = tc->pool;
933
Joe Thornber8d30abf2013-12-04 19:16:11 -0500934 if (get_pool_mode(pool) != PM_WRITE)
935 return -EINVAL;
936
Joe Thornber991d9fa2011-10-31 20:21:18 +0000937 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
Joe Thornberb5330652013-12-04 19:51:33 -0500938 if (r) {
939 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000940 return r;
Joe Thornberb5330652013-12-04 19:51:33 -0500941 }
Joe Thornber991d9fa2011-10-31 20:21:18 +0000942
Joe Thornber88a66212013-12-04 20:16:12 -0500943 check_low_water_mark(pool, free_blocks);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000944
945 if (!free_blocks) {
Mike Snitzer94563ba2013-08-22 09:56:18 -0400946 /*
947 * Try to commit to see if that will free up some
948 * more space.
949 */
Joe Thornber020cc3b2013-12-04 15:05:36 -0500950 r = commit(pool);
951 if (r)
952 return r;
Mike Snitzer94563ba2013-08-22 09:56:18 -0400953
954 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
Joe Thornberb5330652013-12-04 19:51:33 -0500955 if (r) {
956 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
Mike Snitzer94563ba2013-08-22 09:56:18 -0400957 return r;
Joe Thornberb5330652013-12-04 19:51:33 -0500958 }
Mike Snitzer94563ba2013-08-22 09:56:18 -0400959
Mike Snitzer94563ba2013-08-22 09:56:18 -0400960 if (!free_blocks) {
Mike Snitzer399cadd2013-12-05 16:03:33 -0500961 out_of_data_space(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000962 return -ENOSPC;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000963 }
964 }
965
966 r = dm_pool_alloc_data_block(pool->pmd, result);
Mike Snitzer4a02b342013-12-03 12:20:57 -0500967 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -0500968 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
Joe Thornber991d9fa2011-10-31 20:21:18 +0000969 return r;
Mike Snitzer4a02b342013-12-03 12:20:57 -0500970 }
Joe Thornber991d9fa2011-10-31 20:21:18 +0000971
972 return 0;
973}
974
975/*
976 * If we have run out of space, queue bios until the device is
977 * resumed, presumably after having been reloaded with more space.
978 */
979static void retry_on_resume(struct bio *bio)
980{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +0000981 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +0100982 struct thin_c *tc = h->tc;
Joe Thornber991d9fa2011-10-31 20:21:18 +0000983 struct pool *pool = tc->pool;
984 unsigned long flags;
985
986 spin_lock_irqsave(&pool->lock, flags);
987 bio_list_add(&pool->retry_on_resume_list, bio);
988 spin_unlock_irqrestore(&pool->lock, flags);
989}
990
Mike Snitzer8c0f0e82013-12-05 15:47:24 -0500991static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
992{
993 /*
994 * When pool is read-only, no cell locking is needed because
995 * nothing is changing.
996 */
997 WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
998
Mike Snitzer6d162022013-12-20 18:09:02 -0500999 if (pool->pf.error_if_no_space)
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001000 bio_io_error(bio);
Mike Snitzer6d162022013-12-20 18:09:02 -05001001 else
1002 retry_on_resume(bio);
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001003}
1004
Mike Snitzer399cadd2013-12-05 16:03:33 -05001005static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001006{
1007 struct bio *bio;
1008 struct bio_list bios;
1009
1010 bio_list_init(&bios);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001011 cell_release(pool, cell, &bios);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001012
1013 while ((bio = bio_list_pop(&bios)))
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001014 handle_unserviceable_bio(pool, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001015}
1016
Joe Thornber104655f2012-03-28 18:41:28 +01001017static void process_discard(struct thin_c *tc, struct bio *bio)
1018{
1019 int r;
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01001020 unsigned long flags;
Joe Thornber104655f2012-03-28 18:41:28 +01001021 struct pool *pool = tc->pool;
Mike Snitzera24c2562012-06-03 00:30:00 +01001022 struct dm_bio_prison_cell *cell, *cell2;
Mike Snitzer44feb382012-10-12 21:02:10 +01001023 struct dm_cell_key key, key2;
Joe Thornber104655f2012-03-28 18:41:28 +01001024 dm_block_t block = get_bio_block(tc, bio);
1025 struct dm_thin_lookup_result lookup_result;
Mike Snitzera24c2562012-06-03 00:30:00 +01001026 struct dm_thin_new_mapping *m;
Joe Thornber104655f2012-03-28 18:41:28 +01001027
1028 build_virtual_key(tc->td, block, &key);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001029 if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber104655f2012-03-28 18:41:28 +01001030 return;
1031
1032 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1033 switch (r) {
1034 case 0:
1035 /*
1036 * Check nobody is fiddling with this pool block. This can
1037 * happen if someone's in the process of breaking sharing
1038 * on this block.
1039 */
1040 build_data_key(tc->td, lookup_result.block, &key2);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001041 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
Joe Thornberf286ba02012-12-21 20:23:33 +00001042 cell_defer_no_holder(tc, cell);
Joe Thornber104655f2012-03-28 18:41:28 +01001043 break;
1044 }
1045
1046 if (io_overlaps_block(pool, bio)) {
1047 /*
1048 * IO may still be going to the destination block. We must
1049 * quiesce before we can do the removal.
1050 */
1051 m = get_next_mapping(pool);
1052 m->tc = tc;
Joe Thornber19fa1a62013-12-17 12:09:40 -05001053 m->pass_discard = pool->pf.discard_passdown;
1054 m->definitely_not_shared = !lookup_result.shared;
Joe Thornber104655f2012-03-28 18:41:28 +01001055 m->virt_block = block;
1056 m->data_block = lookup_result.block;
1057 m->cell = cell;
1058 m->cell2 = cell2;
Joe Thornber104655f2012-03-28 18:41:28 +01001059 m->bio = bio;
1060
Mike Snitzer44feb382012-10-12 21:02:10 +01001061 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01001062 spin_lock_irqsave(&pool->lock, flags);
Mike Snitzerdaec3382013-12-11 14:01:20 -05001063 list_add_tail(&m->list, &pool->prepared_discards);
Mike Snitzerc3a0ce22012-05-12 01:43:16 +01001064 spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber104655f2012-03-28 18:41:28 +01001065 wake_worker(pool);
1066 }
1067 } else {
Joe Thornbere8088072012-12-21 20:23:31 +00001068 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001069 cell_defer_no_holder(tc, cell);
1070 cell_defer_no_holder(tc, cell2);
Joe Thornbere8088072012-12-21 20:23:31 +00001071
Joe Thornber104655f2012-03-28 18:41:28 +01001072 /*
Mikulas Patocka49296302012-07-27 15:08:03 +01001073 * The DM core makes sure that the discard doesn't span
1074 * a block boundary. So we submit the discard of a
1075 * partial block appropriately.
Joe Thornber104655f2012-03-28 18:41:28 +01001076 */
Mikulas Patocka650d2a02012-07-20 14:25:05 +01001077 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1078 remap_and_issue(tc, bio, lookup_result.block);
1079 else
1080 bio_endio(bio, 0);
Joe Thornber104655f2012-03-28 18:41:28 +01001081 }
1082 break;
1083
1084 case -ENODATA:
1085 /*
1086 * It isn't provisioned, just forget it.
1087 */
Joe Thornberf286ba02012-12-21 20:23:33 +00001088 cell_defer_no_holder(tc, cell);
Joe Thornber104655f2012-03-28 18:41:28 +01001089 bio_endio(bio, 0);
1090 break;
1091
1092 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001093 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1094 __func__, r);
Joe Thornberf286ba02012-12-21 20:23:33 +00001095 cell_defer_no_holder(tc, cell);
Joe Thornber104655f2012-03-28 18:41:28 +01001096 bio_io_error(bio);
1097 break;
1098 }
1099}
1100
Joe Thornber991d9fa2011-10-31 20:21:18 +00001101static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer44feb382012-10-12 21:02:10 +01001102 struct dm_cell_key *key,
Joe Thornber991d9fa2011-10-31 20:21:18 +00001103 struct dm_thin_lookup_result *lookup_result,
Mike Snitzera24c2562012-06-03 00:30:00 +01001104 struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001105{
1106 int r;
1107 dm_block_t data_block;
Mike Snitzerd6fc2042013-08-21 17:40:11 -04001108 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001109
1110 r = alloc_data_block(tc, &data_block);
1111 switch (r) {
1112 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001113 schedule_internal_copy(tc, block, lookup_result->block,
1114 data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001115 break;
1116
1117 case -ENOSPC:
Mike Snitzer399cadd2013-12-05 16:03:33 -05001118 retry_bios_on_resume(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001119 break;
1120
1121 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001122 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1123 __func__, r);
Mike Snitzerd6fc2042013-08-21 17:40:11 -04001124 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001125 break;
1126 }
1127}
1128
1129static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1130 dm_block_t block,
1131 struct dm_thin_lookup_result *lookup_result)
1132{
Mike Snitzera24c2562012-06-03 00:30:00 +01001133 struct dm_bio_prison_cell *cell;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001134 struct pool *pool = tc->pool;
Mike Snitzer44feb382012-10-12 21:02:10 +01001135 struct dm_cell_key key;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001136
1137 /*
1138 * If cell is already occupied, then sharing is already in the process
1139 * of being broken so we have nothing further to do here.
1140 */
1141 build_data_key(tc->td, lookup_result->block, &key);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001142 if (bio_detain(pool, &key, bio, &cell))
Joe Thornber991d9fa2011-10-31 20:21:18 +00001143 return;
1144
Kent Overstreet4f024f32013-10-11 15:44:27 -07001145 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001146 break_sharing(tc, bio, block, &key, lookup_result, cell);
1147 else {
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001148 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornber991d9fa2011-10-31 20:21:18 +00001149
Mike Snitzer44feb382012-10-12 21:02:10 +01001150 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
Joe Thornbere8088072012-12-21 20:23:31 +00001151 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001152 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001153
Joe Thornber991d9fa2011-10-31 20:21:18 +00001154 remap_and_issue(tc, bio, lookup_result->block);
1155 }
1156}
1157
1158static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzera24c2562012-06-03 00:30:00 +01001159 struct dm_bio_prison_cell *cell)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001160{
1161 int r;
1162 dm_block_t data_block;
Joe Thornber6beca5e2013-03-01 22:45:50 +00001163 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001164
1165 /*
1166 * Remap empty bios (flushes) immediately, without provisioning.
1167 */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001168 if (!bio->bi_iter.bi_size) {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001169 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001170 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001171
Joe Thornber991d9fa2011-10-31 20:21:18 +00001172 remap_and_issue(tc, bio, 0);
1173 return;
1174 }
1175
1176 /*
1177 * Fill read bios with zeroes and complete them immediately.
1178 */
1179 if (bio_data_dir(bio) == READ) {
1180 zero_fill_bio(bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001181 cell_defer_no_holder(tc, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001182 bio_endio(bio, 0);
1183 return;
1184 }
1185
1186 r = alloc_data_block(tc, &data_block);
1187 switch (r) {
1188 case 0:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001189 if (tc->origin_dev)
1190 schedule_external_copy(tc, block, data_block, cell, bio);
1191 else
1192 schedule_zero(tc, block, data_block, cell, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001193 break;
1194
1195 case -ENOSPC:
Mike Snitzer399cadd2013-12-05 16:03:33 -05001196 retry_bios_on_resume(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001197 break;
1198
1199 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001200 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1201 __func__, r);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001202 cell_error(pool, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001203 break;
1204 }
1205}
1206
1207static void process_bio(struct thin_c *tc, struct bio *bio)
1208{
1209 int r;
Joe Thornber6beca5e2013-03-01 22:45:50 +00001210 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001211 dm_block_t block = get_bio_block(tc, bio);
Mike Snitzera24c2562012-06-03 00:30:00 +01001212 struct dm_bio_prison_cell *cell;
Mike Snitzer44feb382012-10-12 21:02:10 +01001213 struct dm_cell_key key;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001214 struct dm_thin_lookup_result lookup_result;
1215
1216 /*
1217 * If cell is already occupied, then the block is already
1218 * being provisioned so we have nothing further to do here.
1219 */
1220 build_virtual_key(tc->td, block, &key);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001221 if (bio_detain(pool, &key, bio, &cell))
Joe Thornber991d9fa2011-10-31 20:21:18 +00001222 return;
1223
1224 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1225 switch (r) {
1226 case 0:
Joe Thornbere8088072012-12-21 20:23:31 +00001227 if (lookup_result.shared) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001228 process_shared_bio(tc, bio, block, &lookup_result);
Joe Thornber6beca5e2013-03-01 22:45:50 +00001229 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
Joe Thornbere8088072012-12-21 20:23:31 +00001230 } else {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001231 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001232 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001233
Joe Thornber991d9fa2011-10-31 20:21:18 +00001234 remap_and_issue(tc, bio, lookup_result.block);
Joe Thornbere8088072012-12-21 20:23:31 +00001235 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001236 break;
1237
1238 case -ENODATA:
Joe Thornber2dd9c252012-03-28 18:41:28 +01001239 if (bio_data_dir(bio) == READ && tc->origin_dev) {
Joe Thornber6beca5e2013-03-01 22:45:50 +00001240 inc_all_io_entry(pool, bio);
Joe Thornberf286ba02012-12-21 20:23:33 +00001241 cell_defer_no_holder(tc, cell);
Joe Thornbere8088072012-12-21 20:23:31 +00001242
Joe Thornber2dd9c252012-03-28 18:41:28 +01001243 remap_to_origin_and_issue(tc, bio);
1244 } else
1245 provision_block(tc, bio, block, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001246 break;
1247
1248 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001249 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1250 __func__, r);
Joe Thornberf286ba02012-12-21 20:23:33 +00001251 cell_defer_no_holder(tc, cell);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001252 bio_io_error(bio);
1253 break;
1254 }
1255}
1256
Joe Thornbere49e5822012-07-27 15:08:16 +01001257static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1258{
1259 int r;
1260 int rw = bio_data_dir(bio);
1261 dm_block_t block = get_bio_block(tc, bio);
1262 struct dm_thin_lookup_result lookup_result;
1263
1264 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1265 switch (r) {
1266 case 0:
Kent Overstreet4f024f32013-10-11 15:44:27 -07001267 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001268 handle_unserviceable_bio(tc->pool, bio);
Joe Thornbere8088072012-12-21 20:23:31 +00001269 else {
1270 inc_all_io_entry(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001271 remap_and_issue(tc, bio, lookup_result.block);
Joe Thornbere8088072012-12-21 20:23:31 +00001272 }
Joe Thornbere49e5822012-07-27 15:08:16 +01001273 break;
1274
1275 case -ENODATA:
1276 if (rw != READ) {
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001277 handle_unserviceable_bio(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001278 break;
1279 }
1280
1281 if (tc->origin_dev) {
Joe Thornbere8088072012-12-21 20:23:31 +00001282 inc_all_io_entry(tc->pool, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001283 remap_to_origin_and_issue(tc, bio);
1284 break;
1285 }
1286
1287 zero_fill_bio(bio);
1288 bio_endio(bio, 0);
1289 break;
1290
1291 default:
Mike Snitzerc3977412012-12-21 20:23:34 +00001292 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1293 __func__, r);
Joe Thornbere49e5822012-07-27 15:08:16 +01001294 bio_io_error(bio);
1295 break;
1296 }
1297}
1298
1299static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1300{
1301 bio_io_error(bio);
1302}
1303
Joe Thornberac8c3f32013-05-10 14:37:21 +01001304/*
1305 * FIXME: should we also commit due to size of transaction, measured in
1306 * metadata blocks?
1307 */
Joe Thornber905e51b2012-03-28 18:41:27 +01001308static int need_commit_due_to_time(struct pool *pool)
1309{
1310 return jiffies < pool->last_commit_jiffies ||
1311 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1312}
1313
Joe Thornber991d9fa2011-10-31 20:21:18 +00001314static void process_deferred_bios(struct pool *pool)
1315{
1316 unsigned long flags;
1317 struct bio *bio;
1318 struct bio_list bios;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001319
1320 bio_list_init(&bios);
1321
1322 spin_lock_irqsave(&pool->lock, flags);
1323 bio_list_merge(&bios, &pool->deferred_bios);
1324 bio_list_init(&pool->deferred_bios);
1325 spin_unlock_irqrestore(&pool->lock, flags);
1326
1327 while ((bio = bio_list_pop(&bios))) {
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001328 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01001329 struct thin_c *tc = h->tc;
1330
Joe Thornber991d9fa2011-10-31 20:21:18 +00001331 /*
1332 * If we've got no free new_mapping structs, and processing
1333 * this bio might require one, we pause until there are some
1334 * prepared mappings to process.
1335 */
1336 if (ensure_next_mapping(pool)) {
1337 spin_lock_irqsave(&pool->lock, flags);
1338 bio_list_merge(&pool->deferred_bios, &bios);
1339 spin_unlock_irqrestore(&pool->lock, flags);
1340
1341 break;
1342 }
Joe Thornber104655f2012-03-28 18:41:28 +01001343
1344 if (bio->bi_rw & REQ_DISCARD)
Joe Thornbere49e5822012-07-27 15:08:16 +01001345 pool->process_discard(tc, bio);
Joe Thornber104655f2012-03-28 18:41:28 +01001346 else
Joe Thornbere49e5822012-07-27 15:08:16 +01001347 pool->process_bio(tc, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001348 }
1349
1350 /*
1351 * If there are any deferred flush bios, we must commit
1352 * the metadata before issuing them.
1353 */
1354 bio_list_init(&bios);
1355 spin_lock_irqsave(&pool->lock, flags);
1356 bio_list_merge(&bios, &pool->deferred_flush_bios);
1357 bio_list_init(&pool->deferred_flush_bios);
1358 spin_unlock_irqrestore(&pool->lock, flags);
1359
Mike Snitzer4d1662a2014-02-06 06:08:56 -05001360 if (bio_list_empty(&bios) &&
1361 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
Joe Thornber991d9fa2011-10-31 20:21:18 +00001362 return;
1363
Joe Thornber020cc3b2013-12-04 15:05:36 -05001364 if (commit(pool)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001365 while ((bio = bio_list_pop(&bios)))
1366 bio_io_error(bio);
1367 return;
1368 }
Joe Thornber905e51b2012-03-28 18:41:27 +01001369 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001370
1371 while ((bio = bio_list_pop(&bios)))
1372 generic_make_request(bio);
1373}
1374
1375static void do_worker(struct work_struct *ws)
1376{
1377 struct pool *pool = container_of(ws, struct pool, worker);
1378
Joe Thornbere49e5822012-07-27 15:08:16 +01001379 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1380 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001381 process_deferred_bios(pool);
1382}
1383
Joe Thornber905e51b2012-03-28 18:41:27 +01001384/*
1385 * We want to commit periodically so that not too much
1386 * unwritten data builds up.
1387 */
1388static void do_waker(struct work_struct *ws)
1389{
1390 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1391 wake_worker(pool);
1392 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1393}
1394
Joe Thornber991d9fa2011-10-31 20:21:18 +00001395/*----------------------------------------------------------------*/
1396
Joe Thornbere49e5822012-07-27 15:08:16 +01001397static enum pool_mode get_pool_mode(struct pool *pool)
1398{
1399 return pool->pf.mode;
1400}
1401
Mike Snitzer8b64e882013-12-20 14:27:28 -05001402static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
Joe Thornbere49e5822012-07-27 15:08:16 +01001403{
1404 int r;
Mike Snitzercdc2b412014-02-14 18:10:55 -05001405 struct pool_c *pt = pool->ti->private;
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05001406 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1407 enum pool_mode old_mode = get_pool_mode(pool);
1408
1409 /*
1410 * Never allow the pool to transition to PM_WRITE mode if user
1411 * intervention is required to verify metadata and data consistency.
1412 */
1413 if (new_mode == PM_WRITE && needs_check) {
1414 DMERR("%s: unable to switch pool to write mode until repaired.",
1415 dm_device_name(pool->pool_md));
1416 if (old_mode != new_mode)
1417 new_mode = old_mode;
1418 else
1419 new_mode = PM_READ_ONLY;
1420 }
1421 /*
1422 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1423 * not going to recover without a thin_repair. So we never let the
1424 * pool move out of the old mode.
1425 */
1426 if (old_mode == PM_FAIL)
1427 new_mode = old_mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01001428
Mike Snitzer8b64e882013-12-20 14:27:28 -05001429 switch (new_mode) {
Joe Thornbere49e5822012-07-27 15:08:16 +01001430 case PM_FAIL:
Mike Snitzer8b64e882013-12-20 14:27:28 -05001431 if (old_mode != new_mode)
1432 DMERR("%s: switching pool to failure mode",
1433 dm_device_name(pool->pool_md));
Joe Thornber5383ef32013-12-04 16:30:01 -05001434 dm_pool_metadata_read_only(pool->pmd);
Joe Thornbere49e5822012-07-27 15:08:16 +01001435 pool->process_bio = process_bio_fail;
1436 pool->process_discard = process_bio_fail;
1437 pool->process_prepared_mapping = process_prepared_mapping_fail;
1438 pool->process_prepared_discard = process_prepared_discard_fail;
1439 break;
1440
1441 case PM_READ_ONLY:
Mike Snitzer8b64e882013-12-20 14:27:28 -05001442 if (old_mode != new_mode)
1443 DMERR("%s: switching pool to read-only mode",
1444 dm_device_name(pool->pool_md));
Joe Thornbere49e5822012-07-27 15:08:16 +01001445 r = dm_pool_abort_metadata(pool->pmd);
1446 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04001447 DMERR("%s: aborting transaction failed",
1448 dm_device_name(pool->pool_md));
Mike Snitzer8b64e882013-12-20 14:27:28 -05001449 new_mode = PM_FAIL;
1450 set_pool_mode(pool, new_mode);
Joe Thornbere49e5822012-07-27 15:08:16 +01001451 } else {
1452 dm_pool_metadata_read_only(pool->pmd);
1453 pool->process_bio = process_bio_read_only;
1454 pool->process_discard = process_discard;
1455 pool->process_prepared_mapping = process_prepared_mapping_fail;
1456 pool->process_prepared_discard = process_prepared_discard_passdown;
1457 }
1458 break;
1459
1460 case PM_WRITE:
Mike Snitzer8b64e882013-12-20 14:27:28 -05001461 if (old_mode != new_mode)
1462 DMINFO("%s: switching pool to write mode",
1463 dm_device_name(pool->pool_md));
Joe Thornber9b7aaa62013-12-04 16:58:19 -05001464 dm_pool_metadata_read_write(pool->pmd);
Joe Thornbere49e5822012-07-27 15:08:16 +01001465 pool->process_bio = process_bio;
1466 pool->process_discard = process_discard;
1467 pool->process_prepared_mapping = process_prepared_mapping;
1468 pool->process_prepared_discard = process_prepared_discard;
1469 break;
1470 }
Mike Snitzer8b64e882013-12-20 14:27:28 -05001471
1472 pool->pf.mode = new_mode;
Mike Snitzercdc2b412014-02-14 18:10:55 -05001473 /*
1474 * The pool mode may have changed, sync it so bind_control_target()
1475 * doesn't cause an unexpected mode transition on resume.
1476 */
1477 pt->adjusted_pf.mode = new_mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01001478}
1479
Joe Thornberb5330652013-12-04 19:51:33 -05001480/*
1481 * Rather than calling set_pool_mode directly, use these which describe the
1482 * reason for mode degradation.
1483 */
Mike Snitzer399cadd2013-12-05 16:03:33 -05001484static void out_of_data_space(struct pool *pool)
1485{
1486 DMERR_LIMIT("%s: no free data space available.",
1487 dm_device_name(pool->pool_md));
Mike Snitzer399cadd2013-12-05 16:03:33 -05001488 set_pool_mode(pool, PM_READ_ONLY);
1489}
1490
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05001491static void abort_transaction(struct pool *pool)
1492{
1493 const char *dev_name = dm_device_name(pool->pool_md);
1494
1495 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1496 if (dm_pool_abort_metadata(pool->pmd)) {
1497 DMERR("%s: failed to abort metadata transaction", dev_name);
1498 set_pool_mode(pool, PM_FAIL);
1499 }
1500
1501 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
1502 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1503 set_pool_mode(pool, PM_FAIL);
1504 }
1505}
1506
Joe Thornberb5330652013-12-04 19:51:33 -05001507static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1508{
1509 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1510 dm_device_name(pool->pool_md), op, r);
1511
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05001512 abort_transaction(pool);
Joe Thornberb5330652013-12-04 19:51:33 -05001513 set_pool_mode(pool, PM_READ_ONLY);
1514}
1515
Joe Thornbere49e5822012-07-27 15:08:16 +01001516/*----------------------------------------------------------------*/
1517
Joe Thornber991d9fa2011-10-31 20:21:18 +00001518/*
1519 * Mapping functions.
1520 */
1521
1522/*
1523 * Called only while mapping a thin bio to hand it over to the workqueue.
1524 */
1525static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1526{
1527 unsigned long flags;
1528 struct pool *pool = tc->pool;
1529
1530 spin_lock_irqsave(&pool->lock, flags);
1531 bio_list_add(&pool->deferred_bios, bio);
1532 spin_unlock_irqrestore(&pool->lock, flags);
1533
1534 wake_worker(pool);
1535}
1536
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001537static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
Joe Thornbereb2aa482012-03-28 18:41:28 +01001538{
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001539 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01001540
1541 h->tc = tc;
1542 h->shared_read_entry = NULL;
Joe Thornbere8088072012-12-21 20:23:31 +00001543 h->all_io_entry = NULL;
Joe Thornbereb2aa482012-03-28 18:41:28 +01001544 h->overwrite_mapping = NULL;
Joe Thornbereb2aa482012-03-28 18:41:28 +01001545}
1546
Joe Thornber991d9fa2011-10-31 20:21:18 +00001547/*
1548 * Non-blocking function called from the thin target's map function.
1549 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001550static int thin_bio_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001551{
1552 int r;
1553 struct thin_c *tc = ti->private;
1554 dm_block_t block = get_bio_block(tc, bio);
1555 struct dm_thin_device *td = tc->td;
1556 struct dm_thin_lookup_result result;
Joe Thornber025b9682013-03-01 22:45:50 +00001557 struct dm_bio_prison_cell cell1, cell2;
1558 struct dm_bio_prison_cell *cell_result;
Joe Thornbere8088072012-12-21 20:23:31 +00001559 struct dm_cell_key key;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001560
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00001561 thin_hook_bio(tc, bio);
Joe Thornbere49e5822012-07-27 15:08:16 +01001562
1563 if (get_pool_mode(tc->pool) == PM_FAIL) {
1564 bio_io_error(bio);
1565 return DM_MAPIO_SUBMITTED;
1566 }
1567
Joe Thornber104655f2012-03-28 18:41:28 +01001568 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00001569 thin_defer_bio(tc, bio);
1570 return DM_MAPIO_SUBMITTED;
1571 }
1572
1573 r = dm_thin_find_block(td, block, 0, &result);
1574
1575 /*
1576 * Note that we defer readahead too.
1577 */
1578 switch (r) {
1579 case 0:
1580 if (unlikely(result.shared)) {
1581 /*
1582 * We have a race condition here between the
1583 * result.shared value returned by the lookup and
1584 * snapshot creation, which may cause new
1585 * sharing.
1586 *
1587 * To avoid this always quiesce the origin before
1588 * taking the snap. You want to do this anyway to
1589 * ensure a consistent application view
1590 * (i.e. lockfs).
1591 *
1592 * More distant ancestors are irrelevant. The
1593 * shared flag will be set in their case.
1594 */
1595 thin_defer_bio(tc, bio);
Joe Thornbere8088072012-12-21 20:23:31 +00001596 return DM_MAPIO_SUBMITTED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001597 }
Joe Thornbere8088072012-12-21 20:23:31 +00001598
1599 build_virtual_key(tc->td, block, &key);
Joe Thornber025b9682013-03-01 22:45:50 +00001600 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
Joe Thornbere8088072012-12-21 20:23:31 +00001601 return DM_MAPIO_SUBMITTED;
1602
1603 build_data_key(tc->td, result.block, &key);
Joe Thornber025b9682013-03-01 22:45:50 +00001604 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1605 cell_defer_no_holder_no_free(tc, &cell1);
Joe Thornbere8088072012-12-21 20:23:31 +00001606 return DM_MAPIO_SUBMITTED;
1607 }
1608
1609 inc_all_io_entry(tc->pool, bio);
Joe Thornber025b9682013-03-01 22:45:50 +00001610 cell_defer_no_holder_no_free(tc, &cell2);
1611 cell_defer_no_holder_no_free(tc, &cell1);
Joe Thornbere8088072012-12-21 20:23:31 +00001612
1613 remap(tc, bio, result.block);
1614 return DM_MAPIO_REMAPPED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001615
1616 case -ENODATA:
Joe Thornbere49e5822012-07-27 15:08:16 +01001617 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1618 /*
1619 * This block isn't provisioned, and we have no way
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001620 * of doing so.
Joe Thornbere49e5822012-07-27 15:08:16 +01001621 */
Mike Snitzer8c0f0e82013-12-05 15:47:24 -05001622 handle_unserviceable_bio(tc->pool, bio);
Joe Thornber2aab3852012-12-21 20:23:33 +00001623 return DM_MAPIO_SUBMITTED;
Joe Thornbere49e5822012-07-27 15:08:16 +01001624 }
1625 /* fall through */
1626
1627 case -EWOULDBLOCK:
Joe Thornber991d9fa2011-10-31 20:21:18 +00001628 /*
1629 * In future, the failed dm_thin_find_block above could
1630 * provide the hint to load the metadata into cache.
1631 */
Joe Thornber991d9fa2011-10-31 20:21:18 +00001632 thin_defer_bio(tc, bio);
Joe Thornber2aab3852012-12-21 20:23:33 +00001633 return DM_MAPIO_SUBMITTED;
Joe Thornbere49e5822012-07-27 15:08:16 +01001634
1635 default:
1636 /*
1637 * Must always call bio_io_error on failure.
1638 * dm_thin_find_block can fail with -EINVAL if the
1639 * pool is switched to fail-io mode.
1640 */
1641 bio_io_error(bio);
Joe Thornber2aab3852012-12-21 20:23:33 +00001642 return DM_MAPIO_SUBMITTED;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001643 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001644}
1645
1646static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1647{
1648 int r;
1649 unsigned long flags;
1650 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1651
1652 spin_lock_irqsave(&pt->pool->lock, flags);
1653 r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1654 spin_unlock_irqrestore(&pt->pool->lock, flags);
1655
1656 if (!r) {
1657 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1658 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1659 }
1660
1661 return r;
1662}
1663
1664static void __requeue_bios(struct pool *pool)
1665{
1666 bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1667 bio_list_init(&pool->retry_on_resume_list);
1668}
1669
1670/*----------------------------------------------------------------
1671 * Binding of control targets to a pool object
1672 *--------------------------------------------------------------*/
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001673static bool data_dev_supports_discard(struct pool_c *pt)
1674{
1675 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1676
1677 return q && blk_queue_discard(q);
1678}
1679
Joe Thornber58051b92013-03-20 17:21:25 +00001680static bool is_factor(sector_t block_size, uint32_t n)
1681{
1682 return !sector_div(block_size, n);
1683}
1684
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001685/*
1686 * If discard_passdown was enabled verify that the data device
Mike Snitzer0424caa2012-09-26 23:45:47 +01001687 * supports discards. Disable discard_passdown if not.
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001688 */
Mike Snitzer0424caa2012-09-26 23:45:47 +01001689static void disable_passdown_if_not_supported(struct pool_c *pt)
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001690{
Mike Snitzer0424caa2012-09-26 23:45:47 +01001691 struct pool *pool = pt->pool;
1692 struct block_device *data_bdev = pt->data_dev->bdev;
1693 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1694 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1695 const char *reason = NULL;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001696 char buf[BDEVNAME_SIZE];
1697
Mike Snitzer0424caa2012-09-26 23:45:47 +01001698 if (!pt->adjusted_pf.discard_passdown)
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001699 return;
1700
Mike Snitzer0424caa2012-09-26 23:45:47 +01001701 if (!data_dev_supports_discard(pt))
1702 reason = "discard unsupported";
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001703
Mike Snitzer0424caa2012-09-26 23:45:47 +01001704 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1705 reason = "max discard sectors smaller than a block";
1706
1707 else if (data_limits->discard_granularity > block_size)
1708 reason = "discard granularity larger than a block";
1709
Joe Thornber58051b92013-03-20 17:21:25 +00001710 else if (!is_factor(block_size, data_limits->discard_granularity))
Mike Snitzer0424caa2012-09-26 23:45:47 +01001711 reason = "discard granularity not a factor of block size";
1712
1713 if (reason) {
1714 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1715 pt->adjusted_pf.discard_passdown = false;
1716 }
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001717}
1718
Joe Thornber991d9fa2011-10-31 20:21:18 +00001719static int bind_control_target(struct pool *pool, struct dm_target *ti)
1720{
1721 struct pool_c *pt = ti->private;
1722
Joe Thornbere49e5822012-07-27 15:08:16 +01001723 /*
Joe Thornber9b7aaa62013-12-04 16:58:19 -05001724 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
Joe Thornbere49e5822012-07-27 15:08:16 +01001725 */
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05001726 enum pool_mode old_mode = get_pool_mode(pool);
Mike Snitzer0424caa2012-09-26 23:45:47 +01001727 enum pool_mode new_mode = pt->adjusted_pf.mode;
Joe Thornbere49e5822012-07-27 15:08:16 +01001728
Joe Thornber9b7aaa62013-12-04 16:58:19 -05001729 /*
Mike Snitzer8b64e882013-12-20 14:27:28 -05001730 * Don't change the pool's mode until set_pool_mode() below.
1731 * Otherwise the pool's process_* function pointers may
1732 * not match the desired pool mode.
1733 */
1734 pt->adjusted_pf.mode = old_mode;
1735
1736 pool->ti = ti;
1737 pool->pf = pt->adjusted_pf;
1738 pool->low_water_blocks = pt->low_water_blocks;
1739
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001740 set_pool_mode(pool, new_mode);
Mike Snitzerf4026932012-05-19 01:01:01 +01001741
Joe Thornber991d9fa2011-10-31 20:21:18 +00001742 return 0;
1743}
1744
1745static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1746{
1747 if (pool->ti == ti)
1748 pool->ti = NULL;
1749}
1750
1751/*----------------------------------------------------------------
1752 * Pool creation
1753 *--------------------------------------------------------------*/
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001754/* Initialize pool features. */
1755static void pool_features_init(struct pool_features *pf)
1756{
Joe Thornbere49e5822012-07-27 15:08:16 +01001757 pf->mode = PM_WRITE;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001758 pf->zero_new_blocks = true;
1759 pf->discard_enabled = true;
1760 pf->discard_passdown = true;
Mike Snitzer787a996c2013-12-06 16:21:43 -05001761 pf->error_if_no_space = false;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001762}
1763
Joe Thornber991d9fa2011-10-31 20:21:18 +00001764static void __pool_destroy(struct pool *pool)
1765{
1766 __pool_table_remove(pool);
1767
1768 if (dm_pool_metadata_close(pool->pmd) < 0)
1769 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1770
Mike Snitzer44feb382012-10-12 21:02:10 +01001771 dm_bio_prison_destroy(pool->prison);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001772 dm_kcopyd_client_destroy(pool->copier);
1773
1774 if (pool->wq)
1775 destroy_workqueue(pool->wq);
1776
1777 if (pool->next_mapping)
1778 mempool_free(pool->next_mapping, pool->mapping_pool);
1779 mempool_destroy(pool->mapping_pool);
Mike Snitzer44feb382012-10-12 21:02:10 +01001780 dm_deferred_set_destroy(pool->shared_read_ds);
1781 dm_deferred_set_destroy(pool->all_io_ds);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001782 kfree(pool);
1783}
1784
Mike Snitzera24c2562012-06-03 00:30:00 +01001785static struct kmem_cache *_new_mapping_cache;
Mike Snitzera24c2562012-06-03 00:30:00 +01001786
Joe Thornber991d9fa2011-10-31 20:21:18 +00001787static struct pool *pool_create(struct mapped_device *pool_md,
1788 struct block_device *metadata_dev,
Joe Thornbere49e5822012-07-27 15:08:16 +01001789 unsigned long block_size,
1790 int read_only, char **error)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001791{
1792 int r;
1793 void *err_p;
1794 struct pool *pool;
1795 struct dm_pool_metadata *pmd;
Joe Thornbere49e5822012-07-27 15:08:16 +01001796 bool format_device = read_only ? false : true;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001797
Joe Thornbere49e5822012-07-27 15:08:16 +01001798 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001799 if (IS_ERR(pmd)) {
1800 *error = "Error creating metadata object";
1801 return (struct pool *)pmd;
1802 }
1803
1804 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1805 if (!pool) {
1806 *error = "Error allocating memory for pool";
1807 err_p = ERR_PTR(-ENOMEM);
1808 goto bad_pool;
1809 }
1810
1811 pool->pmd = pmd;
1812 pool->sectors_per_block = block_size;
Mikulas Patockaf9a8e0c2012-07-27 15:08:03 +01001813 if (block_size & (block_size - 1))
1814 pool->sectors_per_block_shift = -1;
1815 else
1816 pool->sectors_per_block_shift = __ffs(block_size);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001817 pool->low_water_blocks = 0;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001818 pool_features_init(&pool->pf);
Mike Snitzer44feb382012-10-12 21:02:10 +01001819 pool->prison = dm_bio_prison_create(PRISON_CELLS);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001820 if (!pool->prison) {
1821 *error = "Error creating pool's bio prison";
1822 err_p = ERR_PTR(-ENOMEM);
1823 goto bad_prison;
1824 }
1825
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001826 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001827 if (IS_ERR(pool->copier)) {
1828 r = PTR_ERR(pool->copier);
1829 *error = "Error creating pool's kcopyd client";
1830 err_p = ERR_PTR(r);
1831 goto bad_kcopyd_client;
1832 }
1833
1834 /*
1835 * Create singlethreaded workqueue that will service all devices
1836 * that use this metadata.
1837 */
1838 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1839 if (!pool->wq) {
1840 *error = "Error creating pool's workqueue";
1841 err_p = ERR_PTR(-ENOMEM);
1842 goto bad_wq;
1843 }
1844
1845 INIT_WORK(&pool->worker, do_worker);
Joe Thornber905e51b2012-03-28 18:41:27 +01001846 INIT_DELAYED_WORK(&pool->waker, do_waker);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001847 spin_lock_init(&pool->lock);
1848 bio_list_init(&pool->deferred_bios);
1849 bio_list_init(&pool->deferred_flush_bios);
1850 INIT_LIST_HEAD(&pool->prepared_mappings);
Joe Thornber104655f2012-03-28 18:41:28 +01001851 INIT_LIST_HEAD(&pool->prepared_discards);
Joe Thornber88a66212013-12-04 20:16:12 -05001852 pool->low_water_triggered = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001853 bio_list_init(&pool->retry_on_resume_list);
Mike Snitzer44feb382012-10-12 21:02:10 +01001854
1855 pool->shared_read_ds = dm_deferred_set_create();
1856 if (!pool->shared_read_ds) {
1857 *error = "Error creating pool's shared read deferred set";
1858 err_p = ERR_PTR(-ENOMEM);
1859 goto bad_shared_read_ds;
1860 }
1861
1862 pool->all_io_ds = dm_deferred_set_create();
1863 if (!pool->all_io_ds) {
1864 *error = "Error creating pool's all io deferred set";
1865 err_p = ERR_PTR(-ENOMEM);
1866 goto bad_all_io_ds;
1867 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001868
1869 pool->next_mapping = NULL;
Mike Snitzera24c2562012-06-03 00:30:00 +01001870 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1871 _new_mapping_cache);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001872 if (!pool->mapping_pool) {
1873 *error = "Error creating pool's mapping mempool";
1874 err_p = ERR_PTR(-ENOMEM);
1875 goto bad_mapping_pool;
1876 }
1877
Joe Thornber991d9fa2011-10-31 20:21:18 +00001878 pool->ref_count = 1;
Joe Thornber905e51b2012-03-28 18:41:27 +01001879 pool->last_commit_jiffies = jiffies;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001880 pool->pool_md = pool_md;
1881 pool->md_dev = metadata_dev;
1882 __pool_table_insert(pool);
1883
1884 return pool;
1885
Joe Thornber991d9fa2011-10-31 20:21:18 +00001886bad_mapping_pool:
Mike Snitzer44feb382012-10-12 21:02:10 +01001887 dm_deferred_set_destroy(pool->all_io_ds);
1888bad_all_io_ds:
1889 dm_deferred_set_destroy(pool->shared_read_ds);
1890bad_shared_read_ds:
Joe Thornber991d9fa2011-10-31 20:21:18 +00001891 destroy_workqueue(pool->wq);
1892bad_wq:
1893 dm_kcopyd_client_destroy(pool->copier);
1894bad_kcopyd_client:
Mike Snitzer44feb382012-10-12 21:02:10 +01001895 dm_bio_prison_destroy(pool->prison);
Joe Thornber991d9fa2011-10-31 20:21:18 +00001896bad_prison:
1897 kfree(pool);
1898bad_pool:
1899 if (dm_pool_metadata_close(pmd))
1900 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1901
1902 return err_p;
1903}
1904
1905static void __pool_inc(struct pool *pool)
1906{
1907 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1908 pool->ref_count++;
1909}
1910
1911static void __pool_dec(struct pool *pool)
1912{
1913 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1914 BUG_ON(!pool->ref_count);
1915 if (!--pool->ref_count)
1916 __pool_destroy(pool);
1917}
1918
1919static struct pool *__pool_find(struct mapped_device *pool_md,
1920 struct block_device *metadata_dev,
Joe Thornbere49e5822012-07-27 15:08:16 +01001921 unsigned long block_size, int read_only,
1922 char **error, int *created)
Joe Thornber991d9fa2011-10-31 20:21:18 +00001923{
1924 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1925
1926 if (pool) {
Mike Snitzerf09996c2012-07-27 15:07:59 +01001927 if (pool->pool_md != pool_md) {
1928 *error = "metadata device already in use by a pool";
Joe Thornber991d9fa2011-10-31 20:21:18 +00001929 return ERR_PTR(-EBUSY);
Mike Snitzerf09996c2012-07-27 15:07:59 +01001930 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001931 __pool_inc(pool);
1932
1933 } else {
1934 pool = __pool_table_lookup(pool_md);
1935 if (pool) {
Mike Snitzerf09996c2012-07-27 15:07:59 +01001936 if (pool->md_dev != metadata_dev) {
1937 *error = "different pool cannot replace a pool";
Joe Thornber991d9fa2011-10-31 20:21:18 +00001938 return ERR_PTR(-EINVAL);
Mike Snitzerf09996c2012-07-27 15:07:59 +01001939 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001940 __pool_inc(pool);
1941
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001942 } else {
Joe Thornbere49e5822012-07-27 15:08:16 +01001943 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01001944 *created = 1;
1945 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00001946 }
1947
1948 return pool;
1949}
1950
1951/*----------------------------------------------------------------
1952 * Pool target methods
1953 *--------------------------------------------------------------*/
1954static void pool_dtr(struct dm_target *ti)
1955{
1956 struct pool_c *pt = ti->private;
1957
1958 mutex_lock(&dm_thin_pool_table.mutex);
1959
1960 unbind_control_target(pt->pool, ti);
1961 __pool_dec(pt->pool);
1962 dm_put_device(ti, pt->metadata_dev);
1963 dm_put_device(ti, pt->data_dev);
1964 kfree(pt);
1965
1966 mutex_unlock(&dm_thin_pool_table.mutex);
1967}
1968
Joe Thornber991d9fa2011-10-31 20:21:18 +00001969static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1970 struct dm_target *ti)
1971{
1972 int r;
1973 unsigned argc;
1974 const char *arg_name;
1975
1976 static struct dm_arg _args[] = {
Mike Snitzer74aa45c2014-01-15 19:07:58 -05001977 {0, 4, "Invalid number of pool feature arguments"},
Joe Thornber991d9fa2011-10-31 20:21:18 +00001978 };
1979
1980 /*
1981 * No feature arguments supplied.
1982 */
1983 if (!as->argc)
1984 return 0;
1985
1986 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1987 if (r)
1988 return -EINVAL;
1989
1990 while (argc && !r) {
1991 arg_name = dm_shift_arg(as);
1992 argc--;
1993
Joe Thornbere49e5822012-07-27 15:08:16 +01001994 if (!strcasecmp(arg_name, "skip_block_zeroing"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001995 pf->zero_new_blocks = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +00001996
Joe Thornbere49e5822012-07-27 15:08:16 +01001997 else if (!strcasecmp(arg_name, "ignore_discard"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01001998 pf->discard_enabled = false;
Joe Thornbere49e5822012-07-27 15:08:16 +01001999
2000 else if (!strcasecmp(arg_name, "no_discard_passdown"))
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002001 pf->discard_passdown = false;
Joe Thornbere49e5822012-07-27 15:08:16 +01002002
2003 else if (!strcasecmp(arg_name, "read_only"))
2004 pf->mode = PM_READ_ONLY;
2005
Mike Snitzer787a996c2013-12-06 16:21:43 -05002006 else if (!strcasecmp(arg_name, "error_if_no_space"))
2007 pf->error_if_no_space = true;
2008
Joe Thornbere49e5822012-07-27 15:08:16 +01002009 else {
2010 ti->error = "Unrecognised pool feature requested";
2011 r = -EINVAL;
2012 break;
2013 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002014 }
2015
2016 return r;
2017}
2018
Joe Thornberac8c3f32013-05-10 14:37:21 +01002019static void metadata_low_callback(void *context)
2020{
2021 struct pool *pool = context;
2022
2023 DMWARN("%s: reached low water mark for metadata device: sending event.",
2024 dm_device_name(pool->pool_md));
2025
2026 dm_table_event(pool->ti->table);
2027}
2028
Mike Snitzer7d489352014-02-12 23:58:15 -05002029static sector_t get_dev_size(struct block_device *bdev)
Joe Thornberb17446d2013-05-10 14:37:18 +01002030{
Mike Snitzer7d489352014-02-12 23:58:15 -05002031 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2032}
2033
2034static void warn_if_metadata_device_too_big(struct block_device *bdev)
2035{
2036 sector_t metadata_dev_size = get_dev_size(bdev);
Joe Thornberb17446d2013-05-10 14:37:18 +01002037 char buffer[BDEVNAME_SIZE];
2038
Mike Snitzer7d489352014-02-12 23:58:15 -05002039 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
Joe Thornberb17446d2013-05-10 14:37:18 +01002040 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2041 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
Mike Snitzer7d489352014-02-12 23:58:15 -05002042}
2043
2044static sector_t get_metadata_dev_size(struct block_device *bdev)
2045{
2046 sector_t metadata_dev_size = get_dev_size(bdev);
2047
2048 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2049 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
Joe Thornberb17446d2013-05-10 14:37:18 +01002050
2051 return metadata_dev_size;
2052}
2053
Joe Thornber24347e92013-05-10 14:37:19 +01002054static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2055{
2056 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2057
Mike Snitzer7d489352014-02-12 23:58:15 -05002058 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
Joe Thornber24347e92013-05-10 14:37:19 +01002059
2060 return metadata_dev_size;
2061}
2062
Joe Thornber991d9fa2011-10-31 20:21:18 +00002063/*
Joe Thornberac8c3f32013-05-10 14:37:21 +01002064 * When a metadata threshold is crossed a dm event is triggered, and
2065 * userland should respond by growing the metadata device. We could let
2066 * userland set the threshold, like we do with the data threshold, but I'm
2067 * not sure they know enough to do this well.
2068 */
2069static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2070{
2071 /*
2072 * 4M is ample for all ops with the possible exception of thin
2073 * device deletion which is harmless if it fails (just retry the
2074 * delete after you've grown the device).
2075 */
2076 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2077 return min((dm_block_t)1024ULL /* 4M */, quarter);
2078}
2079
2080/*
Joe Thornber991d9fa2011-10-31 20:21:18 +00002081 * thin-pool <metadata dev> <data dev>
2082 * <data block size (sectors)>
2083 * <low water mark (blocks)>
2084 * [<#feature args> [<arg>]*]
2085 *
2086 * Optional feature arguments are:
2087 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002088 * ignore_discard: disable discard
2089 * no_discard_passdown: don't pass discards down to the data device
Mike Snitzer787a996c2013-12-06 16:21:43 -05002090 * read_only: Don't allow any changes to be made to the pool metadata.
2091 * error_if_no_space: error IOs, instead of queueing, if no space.
Joe Thornber991d9fa2011-10-31 20:21:18 +00002092 */
2093static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2094{
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002095 int r, pool_created = 0;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002096 struct pool_c *pt;
2097 struct pool *pool;
2098 struct pool_features pf;
2099 struct dm_arg_set as;
2100 struct dm_dev *data_dev;
2101 unsigned long block_size;
2102 dm_block_t low_water_blocks;
2103 struct dm_dev *metadata_dev;
Joe Thornber5d0db962013-05-10 14:37:19 +01002104 fmode_t metadata_mode;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002105
2106 /*
2107 * FIXME Remove validation from scope of lock.
2108 */
2109 mutex_lock(&dm_thin_pool_table.mutex);
2110
2111 if (argc < 4) {
2112 ti->error = "Invalid argument count";
2113 r = -EINVAL;
2114 goto out_unlock;
2115 }
Joe Thornber5d0db962013-05-10 14:37:19 +01002116
Joe Thornber991d9fa2011-10-31 20:21:18 +00002117 as.argc = argc;
2118 as.argv = argv;
2119
Joe Thornber5d0db962013-05-10 14:37:19 +01002120 /*
2121 * Set default pool features.
2122 */
2123 pool_features_init(&pf);
2124
2125 dm_consume_args(&as, 4);
2126 r = parse_pool_features(&as, &pf, ti);
2127 if (r)
2128 goto out_unlock;
2129
2130 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2131 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002132 if (r) {
2133 ti->error = "Error opening metadata block device";
2134 goto out_unlock;
2135 }
Mike Snitzer7d489352014-02-12 23:58:15 -05002136 warn_if_metadata_device_too_big(metadata_dev->bdev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002137
2138 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2139 if (r) {
2140 ti->error = "Error getting data device";
2141 goto out_metadata;
2142 }
2143
2144 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2145 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2146 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01002147 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00002148 ti->error = "Invalid block size";
2149 r = -EINVAL;
2150 goto out;
2151 }
2152
2153 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2154 ti->error = "Invalid low water mark";
2155 r = -EINVAL;
2156 goto out;
2157 }
2158
Joe Thornber991d9fa2011-10-31 20:21:18 +00002159 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2160 if (!pt) {
2161 r = -ENOMEM;
2162 goto out;
2163 }
2164
2165 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
Joe Thornbere49e5822012-07-27 15:08:16 +01002166 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002167 if (IS_ERR(pool)) {
2168 r = PTR_ERR(pool);
2169 goto out_free_pt;
2170 }
2171
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002172 /*
2173 * 'pool_created' reflects whether this is the first table load.
2174 * Top level discard support is not allowed to be changed after
2175 * initial load. This would require a pool reload to trigger thin
2176 * device changes.
2177 */
2178 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2179 ti->error = "Discard support cannot be disabled once enabled";
2180 r = -EINVAL;
2181 goto out_flags_changed;
2182 }
2183
Joe Thornber991d9fa2011-10-31 20:21:18 +00002184 pt->pool = pool;
2185 pt->ti = ti;
2186 pt->metadata_dev = metadata_dev;
2187 pt->data_dev = data_dev;
2188 pt->low_water_blocks = low_water_blocks;
Mike Snitzer0424caa2012-09-26 23:45:47 +01002189 pt->adjusted_pf = pt->requested_pf = pf;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002190 ti->num_flush_bios = 1;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002191
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002192 /*
2193 * Only need to enable discards if the pool should pass
2194 * them down to the data device. The thin device's discard
2195 * processing will cause mappings to be removed from the btree.
2196 */
Mike Snitzerb60ab992013-09-19 18:49:11 -04002197 ti->discard_zeroes_data_unsupported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002198 if (pf.discard_enabled && pf.discard_passdown) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002199 ti->num_discard_bios = 1;
Mike Snitzer9bc142d2012-09-26 23:45:46 +01002200
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002201 /*
2202 * Setting 'discards_supported' circumvents the normal
2203 * stacking of discard limits (this keeps the pool and
2204 * thin devices' discard limits consistent).
2205 */
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01002206 ti->discards_supported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002207 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002208 ti->private = pt;
2209
Joe Thornberac8c3f32013-05-10 14:37:21 +01002210 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2211 calc_metadata_threshold(pt),
2212 metadata_low_callback,
2213 pool);
2214 if (r)
2215 goto out_free_pt;
2216
Joe Thornber991d9fa2011-10-31 20:21:18 +00002217 pt->callbacks.congested_fn = pool_is_congested;
2218 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2219
2220 mutex_unlock(&dm_thin_pool_table.mutex);
2221
2222 return 0;
2223
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002224out_flags_changed:
2225 __pool_dec(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002226out_free_pt:
2227 kfree(pt);
2228out:
2229 dm_put_device(ti, data_dev);
2230out_metadata:
2231 dm_put_device(ti, metadata_dev);
2232out_unlock:
2233 mutex_unlock(&dm_thin_pool_table.mutex);
2234
2235 return r;
2236}
2237
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002238static int pool_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002239{
2240 int r;
2241 struct pool_c *pt = ti->private;
2242 struct pool *pool = pt->pool;
2243 unsigned long flags;
2244
2245 /*
2246 * As this is a singleton target, ti->begin is always zero.
2247 */
2248 spin_lock_irqsave(&pool->lock, flags);
2249 bio->bi_bdev = pt->data_dev->bdev;
2250 r = DM_MAPIO_REMAPPED;
2251 spin_unlock_irqrestore(&pool->lock, flags);
2252
2253 return r;
2254}
2255
Joe Thornberb17446d2013-05-10 14:37:18 +01002256static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2257{
2258 int r;
2259 struct pool_c *pt = ti->private;
2260 struct pool *pool = pt->pool;
2261 sector_t data_size = ti->len;
2262 dm_block_t sb_data_size;
2263
2264 *need_commit = false;
2265
2266 (void) sector_div(data_size, pool->sectors_per_block);
2267
2268 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2269 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002270 DMERR("%s: failed to retrieve data device size",
2271 dm_device_name(pool->pool_md));
Joe Thornberb17446d2013-05-10 14:37:18 +01002272 return r;
2273 }
2274
2275 if (data_size < sb_data_size) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002276 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2277 dm_device_name(pool->pool_md),
Joe Thornberb17446d2013-05-10 14:37:18 +01002278 (unsigned long long)data_size, sb_data_size);
2279 return -EINVAL;
2280
2281 } else if (data_size > sb_data_size) {
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002282 if (dm_pool_metadata_needs_check(pool->pmd)) {
2283 DMERR("%s: unable to grow the data device until repaired.",
2284 dm_device_name(pool->pool_md));
2285 return 0;
2286 }
2287
Mike Snitzer6f7f51d2013-12-04 10:25:53 -05002288 if (sb_data_size)
2289 DMINFO("%s: growing the data device from %llu to %llu blocks",
2290 dm_device_name(pool->pool_md),
2291 sb_data_size, (unsigned long long)data_size);
Joe Thornberb17446d2013-05-10 14:37:18 +01002292 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2293 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05002294 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
Joe Thornberb17446d2013-05-10 14:37:18 +01002295 return r;
2296 }
2297
2298 *need_commit = true;
2299 }
2300
2301 return 0;
2302}
2303
Joe Thornber24347e92013-05-10 14:37:19 +01002304static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2305{
2306 int r;
2307 struct pool_c *pt = ti->private;
2308 struct pool *pool = pt->pool;
2309 dm_block_t metadata_dev_size, sb_metadata_dev_size;
2310
2311 *need_commit = false;
2312
Alasdair G Kergon610bba82013-05-19 18:57:50 +01002313 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
Joe Thornber24347e92013-05-10 14:37:19 +01002314
2315 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2316 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002317 DMERR("%s: failed to retrieve metadata device size",
2318 dm_device_name(pool->pool_md));
Joe Thornber24347e92013-05-10 14:37:19 +01002319 return r;
2320 }
2321
2322 if (metadata_dev_size < sb_metadata_dev_size) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002323 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2324 dm_device_name(pool->pool_md),
Joe Thornber24347e92013-05-10 14:37:19 +01002325 metadata_dev_size, sb_metadata_dev_size);
2326 return -EINVAL;
2327
2328 } else if (metadata_dev_size > sb_metadata_dev_size) {
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002329 if (dm_pool_metadata_needs_check(pool->pmd)) {
2330 DMERR("%s: unable to grow the metadata device until repaired.",
2331 dm_device_name(pool->pool_md));
2332 return 0;
2333 }
2334
Mike Snitzer7d489352014-02-12 23:58:15 -05002335 warn_if_metadata_device_too_big(pool->md_dev);
Mike Snitzer6f7f51d2013-12-04 10:25:53 -05002336 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2337 dm_device_name(pool->pool_md),
2338 sb_metadata_dev_size, metadata_dev_size);
Joe Thornber24347e92013-05-10 14:37:19 +01002339 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2340 if (r) {
Joe Thornberb5330652013-12-04 19:51:33 -05002341 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
Joe Thornber24347e92013-05-10 14:37:19 +01002342 return r;
2343 }
2344
2345 *need_commit = true;
2346 }
2347
2348 return 0;
2349}
2350
Joe Thornber991d9fa2011-10-31 20:21:18 +00002351/*
2352 * Retrieves the number of blocks of the data device from
2353 * the superblock and compares it to the actual device size,
2354 * thus resizing the data device in case it has grown.
2355 *
2356 * This both copes with opening preallocated data devices in the ctr
2357 * being followed by a resume
2358 * -and-
2359 * calling the resume method individually after userspace has
2360 * grown the data device in reaction to a table event.
2361 */
2362static int pool_preresume(struct dm_target *ti)
2363{
2364 int r;
Joe Thornber24347e92013-05-10 14:37:19 +01002365 bool need_commit1, need_commit2;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002366 struct pool_c *pt = ti->private;
2367 struct pool *pool = pt->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002368
2369 /*
2370 * Take control of the pool object.
2371 */
2372 r = bind_control_target(pool, ti);
2373 if (r)
2374 return r;
2375
Joe Thornberb17446d2013-05-10 14:37:18 +01002376 r = maybe_resize_data_dev(ti, &need_commit1);
2377 if (r)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002378 return r;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002379
Joe Thornber24347e92013-05-10 14:37:19 +01002380 r = maybe_resize_metadata_dev(ti, &need_commit2);
2381 if (r)
2382 return r;
2383
2384 if (need_commit1 || need_commit2)
Joe Thornber020cc3b2013-12-04 15:05:36 -05002385 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002386
2387 return 0;
2388}
2389
2390static void pool_resume(struct dm_target *ti)
2391{
2392 struct pool_c *pt = ti->private;
2393 struct pool *pool = pt->pool;
2394 unsigned long flags;
2395
2396 spin_lock_irqsave(&pool->lock, flags);
Joe Thornber88a66212013-12-04 20:16:12 -05002397 pool->low_water_triggered = false;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002398 __requeue_bios(pool);
2399 spin_unlock_irqrestore(&pool->lock, flags);
2400
Joe Thornber905e51b2012-03-28 18:41:27 +01002401 do_waker(&pool->waker.work);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002402}
2403
2404static void pool_postsuspend(struct dm_target *ti)
2405{
Joe Thornber991d9fa2011-10-31 20:21:18 +00002406 struct pool_c *pt = ti->private;
2407 struct pool *pool = pt->pool;
2408
Joe Thornber905e51b2012-03-28 18:41:27 +01002409 cancel_delayed_work(&pool->waker);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002410 flush_workqueue(pool->wq);
Joe Thornber020cc3b2013-12-04 15:05:36 -05002411 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002412}
2413
2414static int check_arg_count(unsigned argc, unsigned args_required)
2415{
2416 if (argc != args_required) {
2417 DMWARN("Message received with %u arguments instead of %u.",
2418 argc, args_required);
2419 return -EINVAL;
2420 }
2421
2422 return 0;
2423}
2424
2425static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2426{
2427 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2428 *dev_id <= MAX_DEV_ID)
2429 return 0;
2430
2431 if (warning)
2432 DMWARN("Message received with invalid device id: %s", arg);
2433
2434 return -EINVAL;
2435}
2436
2437static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2438{
2439 dm_thin_id dev_id;
2440 int r;
2441
2442 r = check_arg_count(argc, 2);
2443 if (r)
2444 return r;
2445
2446 r = read_dev_id(argv[1], &dev_id, 1);
2447 if (r)
2448 return r;
2449
2450 r = dm_pool_create_thin(pool->pmd, dev_id);
2451 if (r) {
2452 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2453 argv[1]);
2454 return r;
2455 }
2456
2457 return 0;
2458}
2459
2460static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2461{
2462 dm_thin_id dev_id;
2463 dm_thin_id origin_dev_id;
2464 int r;
2465
2466 r = check_arg_count(argc, 3);
2467 if (r)
2468 return r;
2469
2470 r = read_dev_id(argv[1], &dev_id, 1);
2471 if (r)
2472 return r;
2473
2474 r = read_dev_id(argv[2], &origin_dev_id, 1);
2475 if (r)
2476 return r;
2477
2478 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2479 if (r) {
2480 DMWARN("Creation of new snapshot %s of device %s failed.",
2481 argv[1], argv[2]);
2482 return r;
2483 }
2484
2485 return 0;
2486}
2487
2488static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2489{
2490 dm_thin_id dev_id;
2491 int r;
2492
2493 r = check_arg_count(argc, 2);
2494 if (r)
2495 return r;
2496
2497 r = read_dev_id(argv[1], &dev_id, 1);
2498 if (r)
2499 return r;
2500
2501 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2502 if (r)
2503 DMWARN("Deletion of thin device %s failed.", argv[1]);
2504
2505 return r;
2506}
2507
2508static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2509{
2510 dm_thin_id old_id, new_id;
2511 int r;
2512
2513 r = check_arg_count(argc, 3);
2514 if (r)
2515 return r;
2516
2517 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2518 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2519 return -EINVAL;
2520 }
2521
2522 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2523 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2524 return -EINVAL;
2525 }
2526
2527 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2528 if (r) {
2529 DMWARN("Failed to change transaction id from %s to %s.",
2530 argv[1], argv[2]);
2531 return r;
2532 }
2533
2534 return 0;
2535}
2536
Joe Thornbercc8394d2012-06-03 00:30:01 +01002537static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2538{
2539 int r;
2540
2541 r = check_arg_count(argc, 1);
2542 if (r)
2543 return r;
2544
Joe Thornber020cc3b2013-12-04 15:05:36 -05002545 (void) commit(pool);
Joe Thornber0d200ae2012-07-03 12:55:31 +01002546
Joe Thornbercc8394d2012-06-03 00:30:01 +01002547 r = dm_pool_reserve_metadata_snap(pool->pmd);
2548 if (r)
2549 DMWARN("reserve_metadata_snap message failed.");
2550
2551 return r;
2552}
2553
2554static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2555{
2556 int r;
2557
2558 r = check_arg_count(argc, 1);
2559 if (r)
2560 return r;
2561
2562 r = dm_pool_release_metadata_snap(pool->pmd);
2563 if (r)
2564 DMWARN("release_metadata_snap message failed.");
2565
2566 return r;
2567}
2568
Joe Thornber991d9fa2011-10-31 20:21:18 +00002569/*
2570 * Messages supported:
2571 * create_thin <dev_id>
2572 * create_snap <dev_id> <origin_id>
2573 * delete <dev_id>
2574 * trim <dev_id> <new_size_in_sectors>
2575 * set_transaction_id <current_trans_id> <new_trans_id>
Joe Thornbercc8394d2012-06-03 00:30:01 +01002576 * reserve_metadata_snap
2577 * release_metadata_snap
Joe Thornber991d9fa2011-10-31 20:21:18 +00002578 */
2579static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2580{
2581 int r = -EINVAL;
2582 struct pool_c *pt = ti->private;
2583 struct pool *pool = pt->pool;
2584
2585 if (!strcasecmp(argv[0], "create_thin"))
2586 r = process_create_thin_mesg(argc, argv, pool);
2587
2588 else if (!strcasecmp(argv[0], "create_snap"))
2589 r = process_create_snap_mesg(argc, argv, pool);
2590
2591 else if (!strcasecmp(argv[0], "delete"))
2592 r = process_delete_mesg(argc, argv, pool);
2593
2594 else if (!strcasecmp(argv[0], "set_transaction_id"))
2595 r = process_set_transaction_id_mesg(argc, argv, pool);
2596
Joe Thornbercc8394d2012-06-03 00:30:01 +01002597 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2598 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2599
2600 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2601 r = process_release_metadata_snap_mesg(argc, argv, pool);
2602
Joe Thornber991d9fa2011-10-31 20:21:18 +00002603 else
2604 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2605
Joe Thornbere49e5822012-07-27 15:08:16 +01002606 if (!r)
Joe Thornber020cc3b2013-12-04 15:05:36 -05002607 (void) commit(pool);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002608
2609 return r;
2610}
2611
Joe Thornbere49e5822012-07-27 15:08:16 +01002612static void emit_flags(struct pool_features *pf, char *result,
2613 unsigned sz, unsigned maxlen)
2614{
2615 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
Mike Snitzer787a996c2013-12-06 16:21:43 -05002616 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
2617 pf->error_if_no_space;
Joe Thornbere49e5822012-07-27 15:08:16 +01002618 DMEMIT("%u ", count);
2619
2620 if (!pf->zero_new_blocks)
2621 DMEMIT("skip_block_zeroing ");
2622
2623 if (!pf->discard_enabled)
2624 DMEMIT("ignore_discard ");
2625
2626 if (!pf->discard_passdown)
2627 DMEMIT("no_discard_passdown ");
2628
2629 if (pf->mode == PM_READ_ONLY)
2630 DMEMIT("read_only ");
Mike Snitzer787a996c2013-12-06 16:21:43 -05002631
2632 if (pf->error_if_no_space)
2633 DMEMIT("error_if_no_space ");
Joe Thornbere49e5822012-07-27 15:08:16 +01002634}
2635
Joe Thornber991d9fa2011-10-31 20:21:18 +00002636/*
2637 * Status line is:
2638 * <transaction id> <used metadata sectors>/<total metadata sectors>
2639 * <used data sectors>/<total data sectors> <held metadata root>
2640 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002641static void pool_status(struct dm_target *ti, status_type_t type,
2642 unsigned status_flags, char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002643{
Joe Thornbere49e5822012-07-27 15:08:16 +01002644 int r;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002645 unsigned sz = 0;
2646 uint64_t transaction_id;
2647 dm_block_t nr_free_blocks_data;
2648 dm_block_t nr_free_blocks_metadata;
2649 dm_block_t nr_blocks_data;
2650 dm_block_t nr_blocks_metadata;
2651 dm_block_t held_root;
2652 char buf[BDEVNAME_SIZE];
2653 char buf2[BDEVNAME_SIZE];
2654 struct pool_c *pt = ti->private;
2655 struct pool *pool = pt->pool;
2656
2657 switch (type) {
2658 case STATUSTYPE_INFO:
Joe Thornbere49e5822012-07-27 15:08:16 +01002659 if (get_pool_mode(pool) == PM_FAIL) {
2660 DMEMIT("Fail");
2661 break;
2662 }
2663
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01002664 /* Commit to ensure statistics aren't out-of-date */
2665 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
Joe Thornber020cc3b2013-12-04 15:05:36 -05002666 (void) commit(pool);
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +01002667
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002668 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2669 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002670 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2671 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002672 goto err;
2673 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002674
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002675 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2676 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002677 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2678 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002679 goto err;
2680 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002681
2682 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002683 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002684 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2685 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002686 goto err;
2687 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002688
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002689 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2690 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002691 DMERR("%s: dm_pool_get_free_block_count returned %d",
2692 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002693 goto err;
2694 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002695
2696 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002697 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002698 DMERR("%s: dm_pool_get_data_dev_size returned %d",
2699 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002700 goto err;
2701 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002702
Joe Thornbercc8394d2012-06-03 00:30:01 +01002703 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002704 if (r) {
Mike Snitzer4fa59712013-08-21 17:30:40 -04002705 DMERR("%s: dm_pool_get_metadata_snap returned %d",
2706 dm_device_name(pool->pool_md), r);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002707 goto err;
2708 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002709
2710 DMEMIT("%llu %llu/%llu %llu/%llu ",
2711 (unsigned long long)transaction_id,
2712 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2713 (unsigned long long)nr_blocks_metadata,
2714 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2715 (unsigned long long)nr_blocks_data);
2716
2717 if (held_root)
Joe Thornbere49e5822012-07-27 15:08:16 +01002718 DMEMIT("%llu ", held_root);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002719 else
Joe Thornbere49e5822012-07-27 15:08:16 +01002720 DMEMIT("- ");
2721
2722 if (pool->pf.mode == PM_READ_ONLY)
2723 DMEMIT("ro ");
2724 else
2725 DMEMIT("rw ");
2726
Mike Snitzer018debe2012-12-21 20:23:32 +00002727 if (!pool->pf.discard_enabled)
Mike Snitzer787a996c2013-12-06 16:21:43 -05002728 DMEMIT("ignore_discard ");
Mike Snitzer018debe2012-12-21 20:23:32 +00002729 else if (pool->pf.discard_passdown)
Mike Snitzer787a996c2013-12-06 16:21:43 -05002730 DMEMIT("discard_passdown ");
Joe Thornbere49e5822012-07-27 15:08:16 +01002731 else
Mike Snitzer787a996c2013-12-06 16:21:43 -05002732 DMEMIT("no_discard_passdown ");
2733
2734 if (pool->pf.error_if_no_space)
2735 DMEMIT("error_if_no_space ");
2736 else
2737 DMEMIT("queue_if_no_space ");
Joe Thornber991d9fa2011-10-31 20:21:18 +00002738
2739 break;
2740
2741 case STATUSTYPE_TABLE:
2742 DMEMIT("%s %s %lu %llu ",
2743 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2744 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2745 (unsigned long)pool->sectors_per_block,
2746 (unsigned long long)pt->low_water_blocks);
Mike Snitzer0424caa2012-09-26 23:45:47 +01002747 emit_flags(&pt->requested_pf, result, sz, maxlen);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002748 break;
2749 }
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002750 return;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002751
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002752err:
2753 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00002754}
2755
2756static int pool_iterate_devices(struct dm_target *ti,
2757 iterate_devices_callout_fn fn, void *data)
2758{
2759 struct pool_c *pt = ti->private;
2760
2761 return fn(ti, pt->data_dev, 0, ti->len, data);
2762}
2763
2764static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2765 struct bio_vec *biovec, int max_size)
2766{
2767 struct pool_c *pt = ti->private;
2768 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2769
2770 if (!q->merge_bvec_fn)
2771 return max_size;
2772
2773 bvm->bi_bdev = pt->data_dev->bdev;
2774
2775 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2776}
2777
Mike Snitzer0424caa2012-09-26 23:45:47 +01002778static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
Joe Thornber104655f2012-03-28 18:41:28 +01002779{
Mike Snitzer0424caa2012-09-26 23:45:47 +01002780 struct pool *pool = pt->pool;
2781 struct queue_limits *data_limits;
2782
Joe Thornber104655f2012-03-28 18:41:28 +01002783 limits->max_discard_sectors = pool->sectors_per_block;
2784
2785 /*
Mike Snitzer0424caa2012-09-26 23:45:47 +01002786 * discard_granularity is just a hint, and not enforced.
Joe Thornber104655f2012-03-28 18:41:28 +01002787 */
Mike Snitzer0424caa2012-09-26 23:45:47 +01002788 if (pt->adjusted_pf.discard_passdown) {
2789 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2790 limits->discard_granularity = data_limits->discard_granularity;
Mike Snitzerf13945d2013-03-01 22:45:44 +00002791 } else
Mike Snitzer0424caa2012-09-26 23:45:47 +01002792 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
Joe Thornber104655f2012-03-28 18:41:28 +01002793}
2794
Joe Thornber991d9fa2011-10-31 20:21:18 +00002795static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2796{
2797 struct pool_c *pt = ti->private;
2798 struct pool *pool = pt->pool;
Mike Snitzer0cc67cd2013-08-20 15:02:41 -04002799 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002800
Mike Snitzer0cc67cd2013-08-20 15:02:41 -04002801 /*
2802 * If the system-determined stacked limits are compatible with the
2803 * pool's blocksize (io_opt is a factor) do not override them.
2804 */
2805 if (io_opt_sectors < pool->sectors_per_block ||
2806 do_div(io_opt_sectors, pool->sectors_per_block)) {
2807 blk_limits_io_min(limits, 0);
2808 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2809 }
Mike Snitzer0424caa2012-09-26 23:45:47 +01002810
2811 /*
2812 * pt->adjusted_pf is a staging area for the actual features to use.
2813 * They get transferred to the live pool in bind_control_target()
2814 * called from pool_preresume().
2815 */
Mike Snitzerb60ab992013-09-19 18:49:11 -04002816 if (!pt->adjusted_pf.discard_enabled) {
2817 /*
2818 * Must explicitly disallow stacking discard limits otherwise the
2819 * block layer will stack them if pool's data device has support.
2820 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2821 * user to see that, so make sure to set all discard limits to 0.
2822 */
2823 limits->discard_granularity = 0;
Mike Snitzer0424caa2012-09-26 23:45:47 +01002824 return;
Mike Snitzerb60ab992013-09-19 18:49:11 -04002825 }
Mike Snitzer0424caa2012-09-26 23:45:47 +01002826
2827 disable_passdown_if_not_supported(pt);
2828
2829 set_discard_limits(pt, limits);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002830}
2831
2832static struct target_type pool_target = {
2833 .name = "thin-pool",
2834 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2835 DM_TARGET_IMMUTABLE,
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05002836 .version = {1, 11, 0},
Joe Thornber991d9fa2011-10-31 20:21:18 +00002837 .module = THIS_MODULE,
2838 .ctr = pool_ctr,
2839 .dtr = pool_dtr,
2840 .map = pool_map,
2841 .postsuspend = pool_postsuspend,
2842 .preresume = pool_preresume,
2843 .resume = pool_resume,
2844 .message = pool_message,
2845 .status = pool_status,
2846 .merge = pool_merge,
2847 .iterate_devices = pool_iterate_devices,
2848 .io_hints = pool_io_hints,
2849};
2850
2851/*----------------------------------------------------------------
2852 * Thin target methods
2853 *--------------------------------------------------------------*/
2854static void thin_dtr(struct dm_target *ti)
2855{
2856 struct thin_c *tc = ti->private;
2857
2858 mutex_lock(&dm_thin_pool_table.mutex);
2859
2860 __pool_dec(tc->pool);
2861 dm_pool_close_thin_device(tc->td);
2862 dm_put_device(ti, tc->pool_dev);
Joe Thornber2dd9c252012-03-28 18:41:28 +01002863 if (tc->origin_dev)
2864 dm_put_device(ti, tc->origin_dev);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002865 kfree(tc);
2866
2867 mutex_unlock(&dm_thin_pool_table.mutex);
2868}
2869
2870/*
2871 * Thin target parameters:
2872 *
Joe Thornber2dd9c252012-03-28 18:41:28 +01002873 * <pool_dev> <dev_id> [origin_dev]
Joe Thornber991d9fa2011-10-31 20:21:18 +00002874 *
2875 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2876 * dev_id: the internal device identifier
Joe Thornber2dd9c252012-03-28 18:41:28 +01002877 * origin_dev: a device external to the pool that should act as the origin
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002878 *
2879 * If the pool device has discards disabled, they get disabled for the thin
2880 * device as well.
Joe Thornber991d9fa2011-10-31 20:21:18 +00002881 */
2882static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2883{
2884 int r;
2885 struct thin_c *tc;
Joe Thornber2dd9c252012-03-28 18:41:28 +01002886 struct dm_dev *pool_dev, *origin_dev;
Joe Thornber991d9fa2011-10-31 20:21:18 +00002887 struct mapped_device *pool_md;
2888
2889 mutex_lock(&dm_thin_pool_table.mutex);
2890
Joe Thornber2dd9c252012-03-28 18:41:28 +01002891 if (argc != 2 && argc != 3) {
Joe Thornber991d9fa2011-10-31 20:21:18 +00002892 ti->error = "Invalid argument count";
2893 r = -EINVAL;
2894 goto out_unlock;
2895 }
2896
2897 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2898 if (!tc) {
2899 ti->error = "Out of memory";
2900 r = -ENOMEM;
2901 goto out_unlock;
2902 }
2903
Joe Thornber2dd9c252012-03-28 18:41:28 +01002904 if (argc == 3) {
2905 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2906 if (r) {
2907 ti->error = "Error opening origin device";
2908 goto bad_origin_dev;
2909 }
2910 tc->origin_dev = origin_dev;
2911 }
2912
Joe Thornber991d9fa2011-10-31 20:21:18 +00002913 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2914 if (r) {
2915 ti->error = "Error opening pool device";
2916 goto bad_pool_dev;
2917 }
2918 tc->pool_dev = pool_dev;
2919
2920 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2921 ti->error = "Invalid device id";
2922 r = -EINVAL;
2923 goto bad_common;
2924 }
2925
2926 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2927 if (!pool_md) {
2928 ti->error = "Couldn't get pool mapped device";
2929 r = -EINVAL;
2930 goto bad_common;
2931 }
2932
2933 tc->pool = __pool_table_lookup(pool_md);
2934 if (!tc->pool) {
2935 ti->error = "Couldn't find pool object";
2936 r = -EINVAL;
2937 goto bad_pool_lookup;
2938 }
2939 __pool_inc(tc->pool);
2940
Joe Thornbere49e5822012-07-27 15:08:16 +01002941 if (get_pool_mode(tc->pool) == PM_FAIL) {
2942 ti->error = "Couldn't open thin device, Pool is in fail mode";
Mike Snitzer1acacc02014-02-19 20:32:33 -05002943 r = -EINVAL;
Joe Thornbere49e5822012-07-27 15:08:16 +01002944 goto bad_thin_open;
2945 }
2946
Joe Thornber991d9fa2011-10-31 20:21:18 +00002947 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2948 if (r) {
2949 ti->error = "Couldn't open thin internal device";
2950 goto bad_thin_open;
2951 }
2952
Mike Snitzer542f9032012-07-27 15:08:00 +01002953 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2954 if (r)
Mike Snitzer1acacc02014-02-19 20:32:33 -05002955 goto bad_target_max_io_len;
Mike Snitzer542f9032012-07-27 15:08:00 +01002956
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002957 ti->num_flush_bios = 1;
Joe Thornber16ad3d12012-07-27 15:08:07 +01002958 ti->flush_supported = true;
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00002959 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002960
2961 /* In case the pool supports discards, pass them on. */
Mike Snitzerb60ab992013-09-19 18:49:11 -04002962 ti->discard_zeroes_data_unsupported = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002963 if (tc->pool->pf.discard_enabled) {
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01002964 ti->discards_supported = true;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002965 ti->num_discard_bios = 1;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002966 /* Discard bios must be split on a block boundary */
2967 ti->split_discard_bios = true;
Joe Thornber67e2e2b2012-03-28 18:41:29 +01002968 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00002969
2970 dm_put(pool_md);
2971
2972 mutex_unlock(&dm_thin_pool_table.mutex);
2973
2974 return 0;
2975
Mike Snitzer1acacc02014-02-19 20:32:33 -05002976bad_target_max_io_len:
2977 dm_pool_close_thin_device(tc->td);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002978bad_thin_open:
2979 __pool_dec(tc->pool);
2980bad_pool_lookup:
2981 dm_put(pool_md);
2982bad_common:
2983 dm_put_device(ti, tc->pool_dev);
2984bad_pool_dev:
Joe Thornber2dd9c252012-03-28 18:41:28 +01002985 if (tc->origin_dev)
2986 dm_put_device(ti, tc->origin_dev);
2987bad_origin_dev:
Joe Thornber991d9fa2011-10-31 20:21:18 +00002988 kfree(tc);
2989out_unlock:
2990 mutex_unlock(&dm_thin_pool_table.mutex);
2991
2992 return r;
2993}
2994
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002995static int thin_map(struct dm_target *ti, struct bio *bio)
Joe Thornber991d9fa2011-10-31 20:21:18 +00002996{
Kent Overstreet4f024f32013-10-11 15:44:27 -07002997 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
Joe Thornber991d9fa2011-10-31 20:21:18 +00002998
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002999 return thin_bio_map(ti, bio);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003000}
3001
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00003002static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
Joe Thornbereb2aa482012-03-28 18:41:28 +01003003{
3004 unsigned long flags;
Mikulas Patocka59c3d2c2012-12-21 20:23:40 +00003005 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Joe Thornbereb2aa482012-03-28 18:41:28 +01003006 struct list_head work;
Mike Snitzera24c2562012-06-03 00:30:00 +01003007 struct dm_thin_new_mapping *m, *tmp;
Joe Thornbereb2aa482012-03-28 18:41:28 +01003008 struct pool *pool = h->tc->pool;
3009
3010 if (h->shared_read_entry) {
3011 INIT_LIST_HEAD(&work);
Mike Snitzer44feb382012-10-12 21:02:10 +01003012 dm_deferred_entry_dec(h->shared_read_entry, &work);
Joe Thornbereb2aa482012-03-28 18:41:28 +01003013
3014 spin_lock_irqsave(&pool->lock, flags);
3015 list_for_each_entry_safe(m, tmp, &work, list) {
3016 list_del(&m->list);
Mike Snitzer7f214662013-12-17 13:43:31 -05003017 m->quiesced = true;
Joe Thornbereb2aa482012-03-28 18:41:28 +01003018 __maybe_add_mapping(m);
3019 }
3020 spin_unlock_irqrestore(&pool->lock, flags);
3021 }
3022
Joe Thornber104655f2012-03-28 18:41:28 +01003023 if (h->all_io_entry) {
3024 INIT_LIST_HEAD(&work);
Mike Snitzer44feb382012-10-12 21:02:10 +01003025 dm_deferred_entry_dec(h->all_io_entry, &work);
Joe Thornber563af182012-12-21 20:23:31 +00003026 if (!list_empty(&work)) {
3027 spin_lock_irqsave(&pool->lock, flags);
3028 list_for_each_entry_safe(m, tmp, &work, list)
Mike Snitzerdaec3382013-12-11 14:01:20 -05003029 list_add_tail(&m->list, &pool->prepared_discards);
Joe Thornber563af182012-12-21 20:23:31 +00003030 spin_unlock_irqrestore(&pool->lock, flags);
3031 wake_worker(pool);
3032 }
Joe Thornber104655f2012-03-28 18:41:28 +01003033 }
3034
Joe Thornbereb2aa482012-03-28 18:41:28 +01003035 return 0;
3036}
3037
Joe Thornber991d9fa2011-10-31 20:21:18 +00003038static void thin_postsuspend(struct dm_target *ti)
3039{
3040 if (dm_noflush_suspending(ti))
3041 requeue_io((struct thin_c *)ti->private);
3042}
3043
3044/*
3045 * <nr mapped sectors> <highest mapped sector>
3046 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003047static void thin_status(struct dm_target *ti, status_type_t type,
3048 unsigned status_flags, char *result, unsigned maxlen)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003049{
3050 int r;
3051 ssize_t sz = 0;
3052 dm_block_t mapped, highest;
3053 char buf[BDEVNAME_SIZE];
3054 struct thin_c *tc = ti->private;
3055
Joe Thornbere49e5822012-07-27 15:08:16 +01003056 if (get_pool_mode(tc->pool) == PM_FAIL) {
3057 DMEMIT("Fail");
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003058 return;
Joe Thornbere49e5822012-07-27 15:08:16 +01003059 }
3060
Joe Thornber991d9fa2011-10-31 20:21:18 +00003061 if (!tc->td)
3062 DMEMIT("-");
3063 else {
3064 switch (type) {
3065 case STATUSTYPE_INFO:
3066 r = dm_thin_get_mapped_count(tc->td, &mapped);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003067 if (r) {
3068 DMERR("dm_thin_get_mapped_count returned %d", r);
3069 goto err;
3070 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003071
3072 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003073 if (r < 0) {
3074 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3075 goto err;
3076 }
Joe Thornber991d9fa2011-10-31 20:21:18 +00003077
3078 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3079 if (r)
3080 DMEMIT("%llu", ((highest + 1) *
3081 tc->pool->sectors_per_block) - 1);
3082 else
3083 DMEMIT("-");
3084 break;
3085
3086 case STATUSTYPE_TABLE:
3087 DMEMIT("%s %lu",
3088 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3089 (unsigned long) tc->dev_id);
Joe Thornber2dd9c252012-03-28 18:41:28 +01003090 if (tc->origin_dev)
3091 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
Joe Thornber991d9fa2011-10-31 20:21:18 +00003092 break;
3093 }
3094 }
3095
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003096 return;
3097
3098err:
3099 DMEMIT("Error");
Joe Thornber991d9fa2011-10-31 20:21:18 +00003100}
3101
3102static int thin_iterate_devices(struct dm_target *ti,
3103 iterate_devices_callout_fn fn, void *data)
3104{
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003105 sector_t blocks;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003106 struct thin_c *tc = ti->private;
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003107 struct pool *pool = tc->pool;
Joe Thornber991d9fa2011-10-31 20:21:18 +00003108
3109 /*
3110 * We can't call dm_pool_get_data_dev_size() since that blocks. So
3111 * we follow a more convoluted path through to the pool's target.
3112 */
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003113 if (!pool->ti)
Joe Thornber991d9fa2011-10-31 20:21:18 +00003114 return 0; /* nothing is bound */
3115
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003116 blocks = pool->ti->len;
3117 (void) sector_div(blocks, pool->sectors_per_block);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003118 if (blocks)
Mike Snitzer55f2b8b2012-07-27 15:08:02 +01003119 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003120
3121 return 0;
3122}
3123
Joe Thornber991d9fa2011-10-31 20:21:18 +00003124static struct target_type thin_target = {
3125 .name = "thin",
Mike Snitzer07f2b6e2014-02-14 11:58:41 -05003126 .version = {1, 11, 0},
Joe Thornber991d9fa2011-10-31 20:21:18 +00003127 .module = THIS_MODULE,
3128 .ctr = thin_ctr,
3129 .dtr = thin_dtr,
3130 .map = thin_map,
Joe Thornbereb2aa482012-03-28 18:41:28 +01003131 .end_io = thin_endio,
Joe Thornber991d9fa2011-10-31 20:21:18 +00003132 .postsuspend = thin_postsuspend,
3133 .status = thin_status,
3134 .iterate_devices = thin_iterate_devices,
Joe Thornber991d9fa2011-10-31 20:21:18 +00003135};
3136
3137/*----------------------------------------------------------------*/
3138
3139static int __init dm_thin_init(void)
3140{
3141 int r;
3142
3143 pool_table_init();
3144
3145 r = dm_register_target(&thin_target);
3146 if (r)
3147 return r;
3148
3149 r = dm_register_target(&pool_target);
3150 if (r)
Mike Snitzera24c2562012-06-03 00:30:00 +01003151 goto bad_pool_target;
3152
3153 r = -ENOMEM;
3154
Mike Snitzera24c2562012-06-03 00:30:00 +01003155 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3156 if (!_new_mapping_cache)
3157 goto bad_new_mapping_cache;
3158
Mike Snitzera24c2562012-06-03 00:30:00 +01003159 return 0;
3160
Mike Snitzera24c2562012-06-03 00:30:00 +01003161bad_new_mapping_cache:
Mike Snitzera24c2562012-06-03 00:30:00 +01003162 dm_unregister_target(&pool_target);
3163bad_pool_target:
3164 dm_unregister_target(&thin_target);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003165
3166 return r;
3167}
3168
3169static void dm_thin_exit(void)
3170{
3171 dm_unregister_target(&thin_target);
3172 dm_unregister_target(&pool_target);
Mike Snitzera24c2562012-06-03 00:30:00 +01003173
Mike Snitzera24c2562012-06-03 00:30:00 +01003174 kmem_cache_destroy(_new_mapping_cache);
Joe Thornber991d9fa2011-10-31 20:21:18 +00003175}
3176
3177module_init(dm_thin_init);
3178module_exit(dm_thin_exit);
3179
Alasdair G Kergon7cab8bf2012-05-12 01:43:19 +01003180MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
Joe Thornber991d9fa2011-10-31 20:21:18 +00003181MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3182MODULE_LICENSE("GPL");