blob: c04d9f22d1607f52c5fbd6cc9c44a9e622b07ebf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/device-mapper.h>
Mikulas Patocka90fa1522009-01-06 03:04:54 +000011#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010020#include <linux/log2.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010021#include <linux/dm-kcopyd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Mikulas Patockab735fed2015-02-26 11:40:35 -050023#include "dm.h"
24
Jonathan Brassowaea53d92009-01-06 03:05:15 +000025#include "dm-exception-store.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027#define DM_MSG_PREFIX "snapshots"
28
Mikulas Patockad698aa42009-12-10 23:52:30 +000029static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30
31#define dm_target_is_snapshot_merge(ti) \
32 ((ti)->type->name == dm_snapshot_merge_target_name)
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Mikulas Patockacd45daf2008-07-21 12:00:32 +010035 * The size of the mempool used to track chunks in use.
36 */
37#define MIN_IOS 256
38
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010039#define DM_TRACKED_CHUNK_HASH_SIZE 16
40#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
41 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42
Jon Brassow191437a2009-12-10 23:52:10 +000043struct dm_exception_table {
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010044 uint32_t hash_mask;
45 unsigned hash_shift;
46 struct list_head *table;
47};
48
49struct dm_snapshot {
Mikulas Patocka0685a252017-11-23 16:15:43 -050050 struct mutex lock;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010051
52 struct dm_dev *origin;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +000053 struct dm_dev *cow;
54
55 struct dm_target *ti;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010056
57 /* List of snapshots per Origin */
58 struct list_head list;
59
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +000060 /*
61 * You can't use a snapshot if this is 0 (e.g. if full).
62 * A snapshot-merge target never clears this.
63 */
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010064 int valid;
65
Mikulas Patocka76c44f62015-06-21 16:31:33 -040066 /*
67 * The snapshot overflowed because of a write to the snapshot device.
68 * We don't have to invalidate the snapshot in this case, but we need
69 * to prevent further writes.
70 */
71 int snapshot_overflowed;
72
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010073 /* Origin writes don't trigger exceptions until this is set */
74 int active;
75
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010076 atomic_t pending_exceptions_count;
77
Mikulas Patocka230c83a2013-11-29 18:13:37 -050078 /* Protected by "lock" */
79 sector_t exception_start_sequence;
80
81 /* Protected by kcopyd single-threaded callback */
82 sector_t exception_complete_sequence;
83
84 /*
85 * A list of pending exceptions that completed out of order.
86 * Protected by kcopyd single-threaded callback.
87 */
88 struct list_head out_of_order_list;
89
Mike Snitzer924e6002010-03-06 02:32:33 +000090 mempool_t *pending_pool;
91
Jon Brassow191437a2009-12-10 23:52:10 +000092 struct dm_exception_table pending;
93 struct dm_exception_table complete;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010094
95 /*
96 * pe_lock protects all pending_exception operations and access
97 * as well as the snapshot_bios list.
98 */
99 spinlock_t pe_lock;
100
Mike Snitzer924e6002010-03-06 02:32:33 +0000101 /* Chunks with outstanding reads */
102 spinlock_t tracked_chunk_lock;
Mike Snitzer924e6002010-03-06 02:32:33 +0000103 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
104
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100105 /* The on disk metadata handler */
106 struct dm_exception_store *store;
107
Mikulas Patocka37524332019-10-02 06:15:53 -0400108 unsigned in_progress;
109 wait_queue_head_t in_progress_wait;
Nikos Tsironis69855b52018-10-31 17:53:08 -0400110
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100111 struct dm_kcopyd_client *kcopyd_client;
112
Mike Snitzer924e6002010-03-06 02:32:33 +0000113 /* Wait for events based on state_bits */
114 unsigned long state_bits;
115
116 /* Range of chunks currently being merged. */
117 chunk_t first_merging_chunk;
118 int num_merging_chunks;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000119
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +0000120 /*
121 * The merge operation failed if this flag is set.
122 * Failure modes are handled as follows:
123 * - I/O error reading the header
124 * => don't load the target; abort.
125 * - Header does not have "valid" flag set
126 * => use the origin; forget about the snapshot.
127 * - I/O error when reading exceptions
128 * => don't load the target; abort.
129 * (We can't use the intermediate origin state.)
130 * - I/O error while merging
131 * => stop merging; set merge_failed; process I/O normally.
132 */
133 int merge_failed;
134
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000135 /*
136 * Incoming bios that overlap with chunks being merged must wait
137 * for them to be committed.
138 */
139 struct bio_list bios_queued_during_merge;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100140};
141
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000142/*
143 * state_bits:
144 * RUNNING_MERGE - Merge operation is in progress.
145 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
146 * cleared afterwards.
147 */
148#define RUNNING_MERGE 0
149#define SHUTDOWN_MERGE 1
150
Nikos Tsironis69855b52018-10-31 17:53:08 -0400151/*
152 * Maximum number of chunks being copied on write.
153 *
154 * The value was decided experimentally as a trade-off between memory
155 * consumption, stalling the kernel's workqueues and maintaining a high enough
156 * throughput.
157 */
158#define DEFAULT_COW_THRESHOLD 2048
159
Mikulas Patocka37524332019-10-02 06:15:53 -0400160static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
161module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
Nikos Tsironis69855b52018-10-31 17:53:08 -0400162MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
163
Mikulas Patockadf5d2e92013-03-01 22:45:49 +0000164DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
165 "A percentage of time allocated for copy on write");
166
Mikulas Patockac2411042010-08-12 04:13:51 +0100167struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
168{
169 return s->origin;
170}
171EXPORT_SYMBOL(dm_snap_origin);
172
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000173struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
174{
175 return s->cow;
176}
177EXPORT_SYMBOL(dm_snap_cow);
178
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100179static sector_t chunk_to_sector(struct dm_exception_store *store,
180 chunk_t chunk)
181{
182 return chunk << store->chunk_shift;
183}
184
185static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
186{
187 /*
188 * There is only ever one instance of a particular block
189 * device so we can compare pointers safely.
190 */
191 return lhs == rhs;
192}
193
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100194struct dm_snap_pending_exception {
Jon Brassow1d4989c2009-12-10 23:52:10 +0000195 struct dm_exception e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /*
198 * Origin buffers waiting for this to complete are held
199 * in a bio list
200 */
201 struct bio_list origin_bios;
202 struct bio_list snapshot_bios;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 /* Pointer back to snapshot context */
205 struct dm_snapshot *snap;
206
207 /*
208 * 1 indicates the exception has already been sent to
209 * kcopyd.
210 */
211 int started;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +0100212
Mikulas Patocka230c83a2013-11-29 18:13:37 -0500213 /* There was copying error. */
214 int copy_error;
215
216 /* A sequence number, it is used for in-order completion. */
217 sector_t exception_sequence;
218
219 struct list_head out_of_order_entry;
220
Mikulas Patockaa6e50b42011-08-02 12:32:04 +0100221 /*
222 * For writing a complete chunk, bypassing the copy.
223 */
224 struct bio *full_bio;
225 bio_end_io_t *full_bio_end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226};
227
228/*
229 * Hash table mapping origin volumes to lists of snapshots and
230 * a lock to protect it
231 */
Christoph Lametere18b8902006-12-06 20:33:20 -0800232static struct kmem_cache *exception_cache;
233static struct kmem_cache *pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100235struct dm_snap_tracked_chunk {
236 struct hlist_node node;
237 chunk_t chunk;
238};
239
Mikulas Patockaee180262012-12-21 20:23:41 +0000240static void init_tracked_chunk(struct bio *bio)
241{
242 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
243 INIT_HLIST_NODE(&c->node);
244}
245
246static bool is_bio_tracked(struct bio *bio)
247{
248 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
249 return !hlist_unhashed(&c->node);
250}
251
252static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100253{
Mikulas Patocka42bc9542012-12-21 20:23:38 +0000254 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100255
256 c->chunk = chunk;
257
Mikulas Patocka9aa0c0e2012-12-21 20:23:33 +0000258 spin_lock_irq(&s->tracked_chunk_lock);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100259 hlist_add_head(&c->node,
260 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
Mikulas Patocka9aa0c0e2012-12-21 20:23:33 +0000261 spin_unlock_irq(&s->tracked_chunk_lock);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100262}
263
Mikulas Patockaee180262012-12-21 20:23:41 +0000264static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100265{
Mikulas Patockaee180262012-12-21 20:23:41 +0000266 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100267 unsigned long flags;
268
269 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
270 hlist_del(&c->node);
271 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100272}
273
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100274static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
275{
276 struct dm_snap_tracked_chunk *c;
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100277 int found = 0;
278
279 spin_lock_irq(&s->tracked_chunk_lock);
280
Sasha Levinb67bfe02013-02-27 17:06:00 -0800281 hlist_for_each_entry(c,
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100282 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
283 if (c->chunk == chunk) {
284 found = 1;
285 break;
286 }
287 }
288
289 spin_unlock_irq(&s->tracked_chunk_lock);
290
291 return found;
292}
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294/*
Mike Snitzer615d1eb2009-12-10 23:52:29 +0000295 * This conflicting I/O is extremely improbable in the caller,
296 * so msleep(1) is sufficient and there is no need for a wait queue.
297 */
298static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
299{
300 while (__chunk_is_tracked(s, chunk))
301 msleep(1);
302}
303
304/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 * One of these per registered origin, held in the snapshot_origins hash
306 */
307struct origin {
308 /* The origin device */
309 struct block_device *bdev;
310
311 struct list_head hash_list;
312
313 /* List of snapshots for this origin */
314 struct list_head snapshots;
315};
316
317/*
Mikulas Patockab735fed2015-02-26 11:40:35 -0500318 * This structure is allocated for each origin target
319 */
320struct dm_origin {
321 struct dm_dev *dev;
322 struct dm_target *ti;
323 unsigned split_boundary;
324 struct list_head hash_list;
325};
326
327/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 * Size of the hash table for origin volumes. If we make this
329 * the size of the minors list then it should be nearly perfect
330 */
331#define ORIGIN_HASH_SIZE 256
332#define ORIGIN_MASK 0xFF
333static struct list_head *_origins;
Mikulas Patockab735fed2015-02-26 11:40:35 -0500334static struct list_head *_dm_origins;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335static struct rw_semaphore _origins_lock;
336
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000337static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
338static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
339static uint64_t _pending_exceptions_done_count;
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341static int init_origin_hash(void)
342{
343 int i;
344
345 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
346 GFP_KERNEL);
347 if (!_origins) {
Mikulas Patockab735fed2015-02-26 11:40:35 -0500348 DMERR("unable to allocate memory for _origins");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return -ENOMEM;
350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
352 INIT_LIST_HEAD(_origins + i);
Mikulas Patockab735fed2015-02-26 11:40:35 -0500353
354 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
355 GFP_KERNEL);
356 if (!_dm_origins) {
357 DMERR("unable to allocate memory for _dm_origins");
358 kfree(_origins);
359 return -ENOMEM;
360 }
361 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
362 INIT_LIST_HEAD(_dm_origins + i);
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 init_rwsem(&_origins_lock);
365
366 return 0;
367}
368
369static void exit_origin_hash(void)
370{
371 kfree(_origins);
Mikulas Patockab735fed2015-02-26 11:40:35 -0500372 kfree(_dm_origins);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100375static unsigned origin_hash(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
377 return bdev->bd_dev & ORIGIN_MASK;
378}
379
380static struct origin *__lookup_origin(struct block_device *origin)
381{
382 struct list_head *ol;
383 struct origin *o;
384
385 ol = &_origins[origin_hash(origin)];
386 list_for_each_entry (o, ol, hash_list)
387 if (bdev_equal(o->bdev, origin))
388 return o;
389
390 return NULL;
391}
392
393static void __insert_origin(struct origin *o)
394{
395 struct list_head *sl = &_origins[origin_hash(o->bdev)];
396 list_add_tail(&o->hash_list, sl);
397}
398
Mikulas Patockab735fed2015-02-26 11:40:35 -0500399static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
400{
401 struct list_head *ol;
402 struct dm_origin *o;
403
404 ol = &_dm_origins[origin_hash(origin)];
405 list_for_each_entry (o, ol, hash_list)
406 if (bdev_equal(o->dev->bdev, origin))
407 return o;
408
409 return NULL;
410}
411
412static void __insert_dm_origin(struct dm_origin *o)
413{
414 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
415 list_add_tail(&o->hash_list, sl);
416}
417
418static void __remove_dm_origin(struct dm_origin *o)
419{
420 list_del(&o->hash_list);
421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/*
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000424 * _origins_lock must be held when calling this function.
425 * Returns number of snapshots registered using the supplied cow device, plus:
426 * snap_src - a snapshot suitable for use as a source of exception handover
427 * snap_dest - a snapshot capable of receiving exception handover.
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000428 * snap_merge - an existing snapshot-merge target linked to the same origin.
429 * There can be at most one snapshot-merge target. The parameter is optional.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000430 *
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000431 * Possible return values and states of snap_src and snap_dest.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000432 * 0: NULL, NULL - first new snapshot
433 * 1: snap_src, NULL - normal snapshot
434 * 2: snap_src, snap_dest - waiting for handover
435 * 2: snap_src, NULL - handed over, waiting for old to be deleted
436 * 1: NULL, snap_dest - source got destroyed without handover
437 */
438static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
439 struct dm_snapshot **snap_src,
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000440 struct dm_snapshot **snap_dest,
441 struct dm_snapshot **snap_merge)
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000442{
443 struct dm_snapshot *s;
444 struct origin *o;
445 int count = 0;
446 int active;
447
448 o = __lookup_origin(snap->origin->bdev);
449 if (!o)
450 goto out;
451
452 list_for_each_entry(s, &o->snapshots, list) {
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000453 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
454 *snap_merge = s;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000455 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
456 continue;
457
Mikulas Patocka0685a252017-11-23 16:15:43 -0500458 mutex_lock(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000459 active = s->active;
Mikulas Patocka0685a252017-11-23 16:15:43 -0500460 mutex_unlock(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000461
462 if (active) {
463 if (snap_src)
464 *snap_src = s;
465 } else if (snap_dest)
466 *snap_dest = s;
467
468 count++;
469 }
470
471out:
472 return count;
473}
474
475/*
476 * On success, returns 1 if this snapshot is a handover destination,
477 * otherwise returns 0.
478 */
479static int __validate_exception_handover(struct dm_snapshot *snap)
480{
481 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000482 struct dm_snapshot *snap_merge = NULL;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000483
484 /* Does snapshot need exceptions handed over to it? */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000485 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
486 &snap_merge) == 2) ||
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000487 snap_dest) {
488 snap->ti->error = "Snapshot cow pairing for exception "
489 "table handover failed";
490 return -EINVAL;
491 }
492
493 /*
494 * If no snap_src was found, snap cannot become a handover
495 * destination.
496 */
497 if (!snap_src)
498 return 0;
499
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000500 /*
501 * Non-snapshot-merge handover?
502 */
503 if (!dm_target_is_snapshot_merge(snap->ti))
504 return 1;
505
506 /*
507 * Do not allow more than one merging snapshot.
508 */
509 if (snap_merge) {
510 snap->ti->error = "A snapshot is already merging.";
511 return -EINVAL;
512 }
513
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000514 if (!snap_src->store->type->prepare_merge ||
515 !snap_src->store->type->commit_merge) {
516 snap->ti->error = "Snapshot exception store does not "
517 "support snapshot-merge.";
518 return -EINVAL;
519 }
520
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000521 return 1;
522}
523
524static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
525{
526 struct dm_snapshot *l;
527
528 /* Sort the list according to chunk size, largest-first smallest-last */
529 list_for_each_entry(l, &o->snapshots, list)
530 if (l->store->chunk_size < s->store->chunk_size)
531 break;
532 list_add_tail(&s->list, &l->list);
533}
534
535/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 * Make a note of the snapshot and its origin so we can look it
537 * up when the origin has a write on it.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000538 *
539 * Also validate snapshot exception store handovers.
540 * On success, returns 1 if this registration is a handover destination,
541 * otherwise returns 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 */
543static int register_snapshot(struct dm_snapshot *snap)
544{
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000545 struct origin *o, *new_o = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct block_device *bdev = snap->origin->bdev;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000547 int r = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000549 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
550 if (!new_o)
551 return -ENOMEM;
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 down_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000555 r = __validate_exception_handover(snap);
556 if (r < 0) {
557 kfree(new_o);
558 goto out;
559 }
560
561 o = __lookup_origin(bdev);
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000562 if (o)
563 kfree(new_o);
564 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 /* New origin */
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000566 o = new_o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 /* Initialise the struct */
569 INIT_LIST_HEAD(&o->snapshots);
570 o->bdev = bdev;
571
572 __insert_origin(o);
573 }
574
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000575 __insert_snapshot(o, snap);
576
577out:
578 up_write(&_origins_lock);
579
580 return r;
581}
582
583/*
584 * Move snapshot to correct place in list according to chunk size.
585 */
586static void reregister_snapshot(struct dm_snapshot *s)
587{
588 struct block_device *bdev = s->origin->bdev;
589
590 down_write(&_origins_lock);
591
592 list_del(&s->list);
593 __insert_snapshot(__lookup_origin(bdev), s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596}
597
598static void unregister_snapshot(struct dm_snapshot *s)
599{
600 struct origin *o;
601
602 down_write(&_origins_lock);
603 o = __lookup_origin(s->origin->bdev);
604
605 list_del(&s->list);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000606 if (o && list_empty(&o->snapshots)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 list_del(&o->hash_list);
608 kfree(o);
609 }
610
611 up_write(&_origins_lock);
612}
613
614/*
615 * Implementation of the exception hash tables.
Milan Brozd74f81f2008-02-08 02:11:27 +0000616 * The lowest hash_shift bits of the chunk number are ignored, allowing
617 * some consecutive chunks to be grouped together.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000619static int dm_exception_table_init(struct dm_exception_table *et,
620 uint32_t size, unsigned hash_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
622 unsigned int i;
623
Milan Brozd74f81f2008-02-08 02:11:27 +0000624 et->hash_shift = hash_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 et->hash_mask = size - 1;
626 et->table = dm_vcalloc(size, sizeof(struct list_head));
627 if (!et->table)
628 return -ENOMEM;
629
630 for (i = 0; i < size; i++)
631 INIT_LIST_HEAD(et->table + i);
632
633 return 0;
634}
635
Jon Brassow3510cb92009-12-10 23:52:11 +0000636static void dm_exception_table_exit(struct dm_exception_table *et,
637 struct kmem_cache *mem)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
639 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000640 struct dm_exception *ex, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 int i, size;
642
643 size = et->hash_mask + 1;
644 for (i = 0; i < size; i++) {
645 slot = et->table + i;
646
647 list_for_each_entry_safe (ex, next, slot, hash_list)
648 kmem_cache_free(mem, ex);
649 }
650
651 vfree(et->table);
652}
653
Jon Brassow191437a2009-12-10 23:52:10 +0000654static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
Milan Brozd74f81f2008-02-08 02:11:27 +0000656 return (chunk >> et->hash_shift) & et->hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657}
658
Jon Brassow3510cb92009-12-10 23:52:11 +0000659static void dm_remove_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
661 list_del(&e->hash_list);
662}
663
664/*
665 * Return the exception data for a sector, or NULL if not
666 * remapped.
667 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000668static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
669 chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
671 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000672 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674 slot = &et->table[exception_hash(et, chunk)];
675 list_for_each_entry (e, slot, hash_list)
Milan Brozd74f81f2008-02-08 02:11:27 +0000676 if (chunk >= e->old_chunk &&
677 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return e;
679
680 return NULL;
681}
682
Mikulas Patocka119bc542014-01-13 19:13:36 -0500683static struct dm_exception *alloc_completed_exception(gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
Jon Brassow1d4989c2009-12-10 23:52:10 +0000685 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Mikulas Patocka119bc542014-01-13 19:13:36 -0500687 e = kmem_cache_alloc(exception_cache, gfp);
688 if (!e && gfp == GFP_NOIO)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
690
691 return e;
692}
693
Jon Brassow3510cb92009-12-10 23:52:11 +0000694static void free_completed_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
696 kmem_cache_free(exception_cache, e);
697}
698
Mikulas Patocka92e86812008-07-21 12:00:35 +0100699static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Mikulas Patocka92e86812008-07-21 12:00:35 +0100701 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
702 GFP_NOIO);
703
Mikulas Patocka879129d22008-10-30 13:33:16 +0000704 atomic_inc(&s->pending_exceptions_count);
Mikulas Patocka92e86812008-07-21 12:00:35 +0100705 pe->snap = s;
706
707 return pe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100710static void free_pending_exception(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
Mikulas Patocka879129d22008-10-30 13:33:16 +0000712 struct dm_snapshot *s = pe->snap;
713
714 mempool_free(pe, s->pending_pool);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100715 smp_mb__before_atomic();
Mikulas Patocka879129d22008-10-30 13:33:16 +0000716 atomic_dec(&s->pending_exceptions_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
Jon Brassow3510cb92009-12-10 23:52:11 +0000719static void dm_insert_exception(struct dm_exception_table *eh,
720 struct dm_exception *new_e)
Milan Brozd74f81f2008-02-08 02:11:27 +0000721{
Milan Brozd74f81f2008-02-08 02:11:27 +0000722 struct list_head *l;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000723 struct dm_exception *e = NULL;
Milan Brozd74f81f2008-02-08 02:11:27 +0000724
725 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
726
727 /* Add immediately if this table doesn't support consecutive chunks */
728 if (!eh->hash_shift)
729 goto out;
730
731 /* List is ordered by old_chunk */
732 list_for_each_entry_reverse(e, l, hash_list) {
733 /* Insert after an existing chunk? */
734 if (new_e->old_chunk == (e->old_chunk +
735 dm_consecutive_chunk_count(e) + 1) &&
736 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
737 dm_consecutive_chunk_count(e) + 1)) {
738 dm_consecutive_chunk_count_inc(e);
Jon Brassow3510cb92009-12-10 23:52:11 +0000739 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000740 return;
741 }
742
743 /* Insert before an existing chunk? */
744 if (new_e->old_chunk == (e->old_chunk - 1) &&
745 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
746 dm_consecutive_chunk_count_inc(e);
747 e->old_chunk--;
748 e->new_chunk--;
Jon Brassow3510cb92009-12-10 23:52:11 +0000749 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000750 return;
751 }
752
753 if (new_e->old_chunk > e->old_chunk)
754 break;
755 }
756
757out:
758 list_add(&new_e->hash_list, e ? &e->hash_list : l);
759}
760
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000761/*
762 * Callback used by the exception stores to load exceptions when
763 * initialising.
764 */
765static int dm_add_exception(void *context, chunk_t old, chunk_t new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000767 struct dm_snapshot *s = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000768 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Mikulas Patocka119bc542014-01-13 19:13:36 -0500770 e = alloc_completed_exception(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (!e)
772 return -ENOMEM;
773
774 e->old_chunk = old;
Milan Brozd74f81f2008-02-08 02:11:27 +0000775
776 /* Consecutive_count is implicitly initialised to zero */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 e->new_chunk = new;
Milan Brozd74f81f2008-02-08 02:11:27 +0000778
Jon Brassow3510cb92009-12-10 23:52:11 +0000779 dm_insert_exception(&s->complete, e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return 0;
782}
783
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000784/*
785 * Return a minimum chunk size of all snapshots that have the specified origin.
786 * Return zero if the origin has no snapshots.
787 */
Mike Snitzer542f9032012-07-27 15:08:00 +0100788static uint32_t __minimum_chunk_size(struct origin *o)
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000789{
790 struct dm_snapshot *snap;
791 unsigned chunk_size = 0;
792
793 if (o)
794 list_for_each_entry(snap, &o->snapshots, list)
795 chunk_size = min_not_zero(chunk_size,
796 snap->store->chunk_size);
797
Mike Snitzer542f9032012-07-27 15:08:00 +0100798 return (uint32_t) chunk_size;
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000799}
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801/*
802 * Hard coded magic.
803 */
804static int calc_max_buckets(void)
805{
806 /* use a fixed size of 2MB */
807 unsigned long mem = 2 * 1024 * 1024;
808 mem /= sizeof(struct list_head);
809
810 return mem;
811}
812
813/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 * Allocate room for a suitable hash table.
815 */
Jonathan Brassowfee19982009-04-02 19:55:34 +0100816static int init_hash_tables(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817{
Mikulas Patocka60e356f2013-09-18 19:40:42 -0400818 sector_t hash_size, cow_dev_size, max_buckets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
820 /*
821 * Calculate based on the size of the original volume or
822 * the COW volume...
823 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000824 cow_dev_size = get_dev_size(s->cow->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 max_buckets = calc_max_buckets();
826
Mikulas Patocka60e356f2013-09-18 19:40:42 -0400827 hash_size = cow_dev_size >> s->store->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 hash_size = min(hash_size, max_buckets);
829
Mikulas Patocka8e87b9b2009-12-10 23:51:54 +0000830 if (hash_size < 64)
831 hash_size = 64;
Robert P. J. Day8defd832008-02-08 02:10:06 +0000832 hash_size = rounddown_pow_of_two(hash_size);
Jon Brassow3510cb92009-12-10 23:52:11 +0000833 if (dm_exception_table_init(&s->complete, hash_size,
834 DM_CHUNK_CONSECUTIVE_BITS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 return -ENOMEM;
836
837 /*
838 * Allocate hash table for in-flight exceptions
839 * Make this smaller than the real hash table
840 */
841 hash_size >>= 3;
842 if (hash_size < 64)
843 hash_size = 64;
844
Jon Brassow3510cb92009-12-10 23:52:11 +0000845 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
846 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return -ENOMEM;
848 }
849
850 return 0;
851}
852
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000853static void merge_shutdown(struct dm_snapshot *s)
854{
855 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100856 smp_mb__after_atomic();
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000857 wake_up_bit(&s->state_bits, RUNNING_MERGE);
858}
859
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000860static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
861{
862 s->first_merging_chunk = 0;
863 s->num_merging_chunks = 0;
864
865 return bio_list_get(&s->bios_queued_during_merge);
866}
867
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000868/*
869 * Remove one chunk from the index of completed exceptions.
870 */
871static int __remove_single_exception_chunk(struct dm_snapshot *s,
872 chunk_t old_chunk)
873{
874 struct dm_exception *e;
875
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000876 e = dm_lookup_exception(&s->complete, old_chunk);
877 if (!e) {
878 DMERR("Corruption detected: exception for block %llu is "
879 "on disk but not in memory",
880 (unsigned long long)old_chunk);
881 return -EINVAL;
882 }
883
884 /*
885 * If this is the only chunk using this exception, remove exception.
886 */
887 if (!dm_consecutive_chunk_count(e)) {
888 dm_remove_exception(e);
889 free_completed_exception(e);
890 return 0;
891 }
892
893 /*
894 * The chunk may be either at the beginning or the end of a
895 * group of consecutive chunks - never in the middle. We are
896 * removing chunks in the opposite order to that in which they
897 * were added, so this should always be true.
898 * Decrement the consecutive chunk counter and adjust the
899 * starting point if necessary.
900 */
901 if (old_chunk == e->old_chunk) {
902 e->old_chunk++;
903 e->new_chunk++;
904 } else if (old_chunk != e->old_chunk +
905 dm_consecutive_chunk_count(e)) {
906 DMERR("Attempt to merge block %llu from the "
907 "middle of a chunk range [%llu - %llu]",
908 (unsigned long long)old_chunk,
909 (unsigned long long)e->old_chunk,
910 (unsigned long long)
911 e->old_chunk + dm_consecutive_chunk_count(e));
912 return -EINVAL;
913 }
914
915 dm_consecutive_chunk_count_dec(e);
916
917 return 0;
918}
919
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000920static void flush_bios(struct bio *bio);
921
922static int remove_single_exception_chunk(struct dm_snapshot *s)
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000923{
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000924 struct bio *b = NULL;
925 int r;
926 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000927
Mikulas Patocka0685a252017-11-23 16:15:43 -0500928 mutex_lock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000929
930 /*
931 * Process chunks (and associated exceptions) in reverse order
932 * so that dm_consecutive_chunk_count_dec() accounting works.
933 */
934 do {
935 r = __remove_single_exception_chunk(s, old_chunk);
936 if (r)
937 goto out;
938 } while (old_chunk-- > s->first_merging_chunk);
939
940 b = __release_queued_bios_after_merge(s);
941
942out:
Mikulas Patocka0685a252017-11-23 16:15:43 -0500943 mutex_unlock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000944 if (b)
945 flush_bios(b);
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000946
947 return r;
948}
949
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000950static int origin_write_extent(struct dm_snapshot *merging_snap,
951 sector_t sector, unsigned chunk_size);
952
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000953static void merge_callback(int read_err, unsigned long write_err,
954 void *context);
955
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000956static uint64_t read_pending_exceptions_done_count(void)
957{
958 uint64_t pending_exceptions_done;
959
960 spin_lock(&_pending_exceptions_done_spinlock);
961 pending_exceptions_done = _pending_exceptions_done_count;
962 spin_unlock(&_pending_exceptions_done_spinlock);
963
964 return pending_exceptions_done;
965}
966
967static void increment_pending_exceptions_done_count(void)
968{
969 spin_lock(&_pending_exceptions_done_spinlock);
970 _pending_exceptions_done_count++;
971 spin_unlock(&_pending_exceptions_done_spinlock);
972
973 wake_up_all(&_pending_exceptions_done);
974}
975
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000976static void snapshot_merge_next_chunks(struct dm_snapshot *s)
977{
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000978 int i, linear_chunks;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000979 chunk_t old_chunk, new_chunk;
980 struct dm_io_region src, dest;
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000981 sector_t io_size;
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000982 uint64_t previous_count;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000983
984 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
985 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
986 goto shut;
987
988 /*
989 * valid flag never changes during merge, so no lock required.
990 */
991 if (!s->valid) {
992 DMERR("Snapshot is invalid: can't merge");
993 goto shut;
994 }
995
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000996 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
997 &new_chunk);
998 if (linear_chunks <= 0) {
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +0000999 if (linear_chunks < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001000 DMERR("Read error in exception store: "
1001 "shutting down merge");
Mikulas Patocka0685a252017-11-23 16:15:43 -05001002 mutex_lock(&s->lock);
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001003 s->merge_failed = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001004 mutex_unlock(&s->lock);
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001005 }
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001006 goto shut;
1007 }
1008
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001009 /* Adjust old_chunk and new_chunk to reflect start of linear region */
1010 old_chunk = old_chunk + 1 - linear_chunks;
1011 new_chunk = new_chunk + 1 - linear_chunks;
1012
1013 /*
1014 * Use one (potentially large) I/O to copy all 'linear_chunks'
1015 * from the exception store to the origin
1016 */
1017 io_size = linear_chunks * s->store->chunk_size;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001018
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001019 dest.bdev = s->origin->bdev;
1020 dest.sector = chunk_to_sector(s->store, old_chunk);
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001021 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001022
1023 src.bdev = s->cow->bdev;
1024 src.sector = chunk_to_sector(s->store, new_chunk);
1025 src.count = dest.count;
1026
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001027 /*
1028 * Reallocate any exceptions needed in other snapshots then
1029 * wait for the pending exceptions to complete.
1030 * Each time any pending exception (globally on the system)
1031 * completes we are woken and repeat the process to find out
1032 * if we can proceed. While this may not seem a particularly
1033 * efficient algorithm, it is not expected to have any
1034 * significant impact on performance.
1035 */
1036 previous_count = read_pending_exceptions_done_count();
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001037 while (origin_write_extent(s, dest.sector, io_size)) {
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001038 wait_event(_pending_exceptions_done,
1039 (read_pending_exceptions_done_count() !=
1040 previous_count));
1041 /* Retry after the wait, until all exceptions are done. */
1042 previous_count = read_pending_exceptions_done_count();
1043 }
1044
Mikulas Patocka0685a252017-11-23 16:15:43 -05001045 mutex_lock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001046 s->first_merging_chunk = old_chunk;
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001047 s->num_merging_chunks = linear_chunks;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001048 mutex_unlock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001049
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001050 /* Wait until writes to all 'linear_chunks' drain */
1051 for (i = 0; i < linear_chunks; i++)
1052 __check_for_conflicting_io(s, old_chunk + i);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001053
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001054 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1055 return;
1056
1057shut:
1058 merge_shutdown(s);
1059}
1060
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001061static void error_bios(struct bio *bio);
1062
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001063static void merge_callback(int read_err, unsigned long write_err, void *context)
1064{
1065 struct dm_snapshot *s = context;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001066 struct bio *b = NULL;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001067
1068 if (read_err || write_err) {
1069 if (read_err)
1070 DMERR("Read error: shutting down merge.");
1071 else
1072 DMERR("Write error: shutting down merge.");
1073 goto shut;
1074 }
1075
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001076 if (s->store->type->commit_merge(s->store,
1077 s->num_merging_chunks) < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001078 DMERR("Write error in exception store: shutting down merge");
1079 goto shut;
1080 }
1081
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001082 if (remove_single_exception_chunk(s) < 0)
1083 goto shut;
1084
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001085 snapshot_merge_next_chunks(s);
1086
1087 return;
1088
1089shut:
Mikulas Patocka0685a252017-11-23 16:15:43 -05001090 mutex_lock(&s->lock);
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001091 s->merge_failed = 1;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001092 b = __release_queued_bios_after_merge(s);
Mikulas Patocka0685a252017-11-23 16:15:43 -05001093 mutex_unlock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001094 error_bios(b);
1095
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001096 merge_shutdown(s);
1097}
1098
1099static void start_merge(struct dm_snapshot *s)
1100{
1101 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1102 snapshot_merge_next_chunks(s);
1103}
1104
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001105/*
1106 * Stop the merging process and wait until it finishes.
1107 */
1108static void stop_merge(struct dm_snapshot *s)
1109{
1110 set_bit(SHUTDOWN_MERGE, &s->state_bits);
NeilBrown74316202014-07-07 15:16:04 +10001111 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001112 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1113}
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115/*
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001116 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 */
1118static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1119{
1120 struct dm_snapshot *s;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001121 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 int r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001123 char *origin_path, *cow_path;
DingXiang4df2bf42016-02-02 12:29:18 +08001124 dev_t origin_dev, cow_dev;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001125 unsigned args_used, num_flush_bios = 1;
Mike Snitzer10b81062009-12-10 23:52:31 +00001126 fmode_t origin_mode = FMODE_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Mark McLoughlin4c7e3bf2006-10-03 01:15:25 -07001128 if (argc != 4) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001129 ti->error = "requires exactly 4 arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001131 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133
Mike Snitzer10b81062009-12-10 23:52:31 +00001134 if (dm_target_is_snapshot_merge(ti)) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001135 num_flush_bios = 2;
Mike Snitzer10b81062009-12-10 23:52:31 +00001136 origin_mode = FMODE_WRITE;
1137 }
1138
Kent Overstreet7ff8f212018-06-05 05:26:33 -04001139 s = kzalloc(sizeof(*s), GFP_KERNEL);
Jonathan Brassowfee19982009-04-02 19:55:34 +01001140 if (!s) {
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001141 ti->error = "Cannot allocate private snapshot structure";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 r = -ENOMEM;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001143 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 }
1145
Mikulas Patockac2411042010-08-12 04:13:51 +01001146 origin_path = argv[0];
1147 argv++;
1148 argc--;
1149
1150 r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1151 if (r) {
1152 ti->error = "Cannot get origin device";
1153 goto bad_origin;
1154 }
DingXiang4df2bf42016-02-02 12:29:18 +08001155 origin_dev = s->origin->bdev->bd_dev;
Mikulas Patockac2411042010-08-12 04:13:51 +01001156
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001157 cow_path = argv[0];
1158 argv++;
1159 argc--;
1160
DingXiang4df2bf42016-02-02 12:29:18 +08001161 cow_dev = dm_get_dev_t(cow_path);
1162 if (cow_dev && cow_dev == origin_dev) {
1163 ti->error = "COW device cannot be the same as origin device";
1164 r = -EINVAL;
1165 goto bad_cow;
1166 }
1167
Milan Broz024d37e2011-03-24 13:52:14 +00001168 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001169 if (r) {
1170 ti->error = "Cannot get COW device";
1171 goto bad_cow;
1172 }
1173
1174 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1175 if (r) {
1176 ti->error = "Couldn't create exception store";
1177 r = -EINVAL;
1178 goto bad_store;
1179 }
1180
1181 argv += args_used;
1182 argc -= args_used;
1183
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001184 s->ti = ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 s->valid = 1;
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001186 s->snapshot_overflowed = 0;
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001187 s->active = 0;
Mikulas Patocka879129d22008-10-30 13:33:16 +00001188 atomic_set(&s->pending_exceptions_count, 0);
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001189 s->exception_start_sequence = 0;
1190 s->exception_complete_sequence = 0;
1191 INIT_LIST_HEAD(&s->out_of_order_list);
Mikulas Patocka0685a252017-11-23 16:15:43 -05001192 mutex_init(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001193 INIT_LIST_HEAD(&s->list);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001194 spin_lock_init(&s->pe_lock);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001195 s->state_bits = 0;
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001196 s->merge_failed = 0;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001197 s->first_merging_chunk = 0;
1198 s->num_merging_chunks = 0;
1199 bio_list_init(&s->bios_queued_during_merge);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201 /* Allocate hash table for COW data */
Jonathan Brassowfee19982009-04-02 19:55:34 +01001202 if (init_hash_tables(s)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 ti->error = "Unable to allocate hash table space";
1204 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001205 goto bad_hash_tables;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 }
1207
Mikulas Patocka37524332019-10-02 06:15:53 -04001208 init_waitqueue_head(&s->in_progress_wait);
Nikos Tsironis69855b52018-10-31 17:53:08 -04001209
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001210 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001211 if (IS_ERR(s->kcopyd_client)) {
1212 r = PTR_ERR(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 ti->error = "Could not create kcopyd client";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001214 goto bad_kcopyd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 }
1216
Mikulas Patocka92e86812008-07-21 12:00:35 +01001217 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1218 if (!s->pending_pool) {
1219 ti->error = "Could not allocate mempool for pending exceptions";
Wei Yongjun09e8b812013-05-10 14:37:15 +01001220 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001221 goto bad_pending_pool;
Mikulas Patocka92e86812008-07-21 12:00:35 +01001222 }
1223
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001224 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1225 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1226
1227 spin_lock_init(&s->tracked_chunk_lock);
1228
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001229 ti->private = s;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001230 ti->num_flush_bios = num_flush_bios;
Mike Snitzer30187e12016-01-31 13:28:26 -05001231 ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001232
1233 /* Add snapshot to the list of snapshots for this origin */
1234 /* Exceptions aren't triggered till snapshot_resume() is called */
1235 r = register_snapshot(s);
1236 if (r == -ENOMEM) {
1237 ti->error = "Snapshot origin struct allocation failed";
1238 goto bad_load_and_register;
1239 } else if (r < 0) {
1240 /* invalid handover, register_snapshot has set ti->error */
1241 goto bad_load_and_register;
1242 }
1243
1244 /*
1245 * Metadata must only be loaded into one table at once, so skip this
1246 * if metadata will be handed over during resume.
1247 * Chunk size will be set during the handover - set it to zero to
1248 * ensure it's ignored.
1249 */
1250 if (r > 0) {
1251 s->store->chunk_size = 0;
1252 return 0;
1253 }
1254
Jonathan Brassow493df712009-04-02 19:55:31 +01001255 r = s->store->type->read_metadata(s->store, dm_add_exception,
1256 (void *)s);
Milan Broz07641472007-07-12 17:28:13 +01001257 if (r < 0) {
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001258 ti->error = "Failed to read snapshot metadata";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001259 goto bad_read_metadata;
Milan Broz07641472007-07-12 17:28:13 +01001260 } else if (r > 0) {
1261 s->valid = 0;
1262 DMWARN("Snapshot is marked invalid.");
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001263 }
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001264
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001265 if (!s->store->chunk_size) {
1266 ti->error = "Chunk size not set";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001267 goto bad_read_metadata;
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001268 }
Mike Snitzer542f9032012-07-27 15:08:00 +01001269
1270 r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1271 if (r)
1272 goto bad_read_metadata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
1274 return 0;
1275
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001276bad_read_metadata:
1277 unregister_snapshot(s);
1278
Jonathan Brassowfee19982009-04-02 19:55:34 +01001279bad_load_and_register:
Mikulas Patocka92e86812008-07-21 12:00:35 +01001280 mempool_destroy(s->pending_pool);
1281
Jonathan Brassowfee19982009-04-02 19:55:34 +01001282bad_pending_pool:
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001283 dm_kcopyd_client_destroy(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Jonathan Brassowfee19982009-04-02 19:55:34 +01001285bad_kcopyd:
Jon Brassow3510cb92009-12-10 23:52:11 +00001286 dm_exception_table_exit(&s->pending, pending_cache);
1287 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Jonathan Brassowfee19982009-04-02 19:55:34 +01001289bad_hash_tables:
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001290 dm_exception_store_destroy(s->store);
1291
1292bad_store:
1293 dm_put_device(ti, s->cow);
1294
1295bad_cow:
Mikulas Patockac2411042010-08-12 04:13:51 +01001296 dm_put_device(ti, s->origin);
1297
1298bad_origin:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 kfree(s);
1300
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001301bad:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return r;
1303}
1304
Milan Broz31c93a02006-12-08 02:41:11 -08001305static void __free_exceptions(struct dm_snapshot *s)
1306{
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001307 dm_kcopyd_client_destroy(s->kcopyd_client);
Milan Broz31c93a02006-12-08 02:41:11 -08001308 s->kcopyd_client = NULL;
1309
Jon Brassow3510cb92009-12-10 23:52:11 +00001310 dm_exception_table_exit(&s->pending, pending_cache);
1311 dm_exception_table_exit(&s->complete, exception_cache);
Milan Broz31c93a02006-12-08 02:41:11 -08001312}
1313
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001314static void __handover_exceptions(struct dm_snapshot *snap_src,
1315 struct dm_snapshot *snap_dest)
1316{
1317 union {
1318 struct dm_exception_table table_swap;
1319 struct dm_exception_store *store_swap;
1320 } u;
1321
1322 /*
1323 * Swap all snapshot context information between the two instances.
1324 */
1325 u.table_swap = snap_dest->complete;
1326 snap_dest->complete = snap_src->complete;
1327 snap_src->complete = u.table_swap;
1328
1329 u.store_swap = snap_dest->store;
1330 snap_dest->store = snap_src->store;
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001331 snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001332 snap_src->store = u.store_swap;
1333
1334 snap_dest->store->snap = snap_dest;
1335 snap_src->store->snap = snap_src;
1336
Mike Snitzer542f9032012-07-27 15:08:00 +01001337 snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001338 snap_dest->valid = snap_src->valid;
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001339 snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001340
1341 /*
1342 * Set source invalid to ensure it receives no further I/O.
1343 */
1344 snap_src->valid = 0;
1345}
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347static void snapshot_dtr(struct dm_target *ti)
1348{
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001349#ifdef CONFIG_DM_DEBUG
1350 int i;
1351#endif
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001352 struct dm_snapshot *s = ti->private;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001353 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001355 down_read(&_origins_lock);
1356 /* Check whether exception handover must be cancelled */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001357 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001358 if (snap_src && snap_dest && (s == snap_src)) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001359 mutex_lock(&snap_dest->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001360 snap_dest->valid = 0;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001361 mutex_unlock(&snap_dest->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001362 DMERR("Cancelling snapshot handover.");
1363 }
1364 up_read(&_origins_lock);
1365
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001366 if (dm_target_is_snapshot_merge(ti))
1367 stop_merge(s);
1368
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001369 /* Prevent further origin writes from using this snapshot. */
1370 /* After this returns there can be no new kcopyd jobs. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 unregister_snapshot(s);
1372
Mikulas Patocka879129d22008-10-30 13:33:16 +00001373 while (atomic_read(&s->pending_exceptions_count))
Mikulas Patocka90fa1522009-01-06 03:04:54 +00001374 msleep(1);
Mikulas Patocka879129d22008-10-30 13:33:16 +00001375 /*
1376 * Ensure instructions in mempool_destroy aren't reordered
1377 * before atomic_read.
1378 */
1379 smp_mb();
1380
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001381#ifdef CONFIG_DM_DEBUG
1382 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1383 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1384#endif
1385
Milan Broz31c93a02006-12-08 02:41:11 -08001386 __free_exceptions(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
Mikulas Patocka92e86812008-07-21 12:00:35 +01001388 mempool_destroy(s->pending_pool);
1389
Jonathan Brassowfee19982009-04-02 19:55:34 +01001390 dm_exception_store_destroy(s->store);
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001391
Mikulas Patocka0685a252017-11-23 16:15:43 -05001392 mutex_destroy(&s->lock);
1393
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001394 dm_put_device(ti, s->cow);
1395
Mikulas Patockac2411042010-08-12 04:13:51 +01001396 dm_put_device(ti, s->origin);
1397
Mikulas Patocka37524332019-10-02 06:15:53 -04001398 WARN_ON(s->in_progress);
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 kfree(s);
1401}
1402
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001403static void account_start_copy(struct dm_snapshot *s)
1404{
Mikulas Patocka37524332019-10-02 06:15:53 -04001405 spin_lock(&s->in_progress_wait.lock);
1406 s->in_progress++;
1407 spin_unlock(&s->in_progress_wait.lock);
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001408}
1409
1410static void account_end_copy(struct dm_snapshot *s)
1411{
Mikulas Patocka37524332019-10-02 06:15:53 -04001412 spin_lock(&s->in_progress_wait.lock);
1413 BUG_ON(!s->in_progress);
1414 s->in_progress--;
1415 if (likely(s->in_progress <= cow_threshold) &&
1416 unlikely(waitqueue_active(&s->in_progress_wait)))
1417 wake_up_locked(&s->in_progress_wait);
1418 spin_unlock(&s->in_progress_wait.lock);
1419}
1420
1421static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
1422{
1423 if (unlikely(s->in_progress > cow_threshold)) {
1424 spin_lock(&s->in_progress_wait.lock);
1425 if (likely(s->in_progress > cow_threshold)) {
1426 /*
1427 * NOTE: this throttle doesn't account for whether
1428 * the caller is servicing an IO that will trigger a COW
1429 * so excess throttling may result for chunks not required
1430 * to be COW'd. But if cow_threshold was reached, extra
1431 * throttling is unlikely to negatively impact performance.
1432 */
1433 DECLARE_WAITQUEUE(wait, current);
1434 __add_wait_queue(&s->in_progress_wait, &wait);
1435 __set_current_state(TASK_UNINTERRUPTIBLE);
1436 spin_unlock(&s->in_progress_wait.lock);
1437 if (unlock_origins)
1438 up_read(&_origins_lock);
1439 io_schedule();
1440 remove_wait_queue(&s->in_progress_wait, &wait);
1441 return false;
1442 }
1443 spin_unlock(&s->in_progress_wait.lock);
1444 }
1445 return true;
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001446}
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448/*
1449 * Flush a list of buffers.
1450 */
1451static void flush_bios(struct bio *bio)
1452{
1453 struct bio *n;
1454
1455 while (bio) {
1456 n = bio->bi_next;
1457 bio->bi_next = NULL;
1458 generic_make_request(bio);
1459 bio = n;
1460 }
1461}
1462
Mikulas Patocka37524332019-10-02 06:15:53 -04001463static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001464
1465/*
1466 * Flush a list of buffers.
1467 */
1468static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1469{
1470 struct bio *n;
1471 int r;
1472
1473 while (bio) {
1474 n = bio->bi_next;
1475 bio->bi_next = NULL;
Mikulas Patocka37524332019-10-02 06:15:53 -04001476 r = do_origin(s->origin, bio, false);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001477 if (r == DM_MAPIO_REMAPPED)
1478 generic_make_request(bio);
1479 bio = n;
1480 }
1481}
1482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483/*
1484 * Error a list of buffers.
1485 */
1486static void error_bios(struct bio *bio)
1487{
1488 struct bio *n;
1489
1490 while (bio) {
1491 n = bio->bi_next;
1492 bio->bi_next = NULL;
NeilBrown6712ecf2007-09-27 12:47:43 +02001493 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 bio = n;
1495 }
1496}
1497
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001498static void __invalidate_snapshot(struct dm_snapshot *s, int err)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001499{
1500 if (!s->valid)
1501 return;
1502
1503 if (err == -EIO)
1504 DMERR("Invalidating snapshot: Error reading/writing.");
1505 else if (err == -ENOMEM)
1506 DMERR("Invalidating snapshot: Unable to allocate exception.");
1507
Jonathan Brassow493df712009-04-02 19:55:31 +01001508 if (s->store->type->drop_snapshot)
1509 s->store->type->drop_snapshot(s->store);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001510
1511 s->valid = 0;
1512
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001513 dm_table_event(s->ti->table);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001514}
1515
Mikulas Patocka385277b2016-01-08 19:07:55 -05001516static void pending_complete(void *context, int success)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517{
Mikulas Patocka385277b2016-01-08 19:07:55 -05001518 struct dm_snap_pending_exception *pe = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +00001519 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 struct dm_snapshot *s = pe->snap;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001521 struct bio *origin_bios = NULL;
1522 struct bio *snapshot_bios = NULL;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001523 struct bio *full_bio = NULL;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001524 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001526 if (!success) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 /* Read/write error - snapshot is unusable */
Mikulas Patocka0685a252017-11-23 16:15:43 -05001528 mutex_lock(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001529 __invalidate_snapshot(s, -EIO);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001530 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001531 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 }
1533
Mikulas Patocka119bc542014-01-13 19:13:36 -05001534 e = alloc_completed_exception(GFP_NOIO);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001535 if (!e) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001536 mutex_lock(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001537 __invalidate_snapshot(s, -ENOMEM);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001538 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001539 goto out;
1540 }
1541 *e = pe->e;
1542
Mikulas Patocka0685a252017-11-23 16:15:43 -05001543 mutex_lock(&s->lock);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001544 if (!s->valid) {
Jon Brassow3510cb92009-12-10 23:52:11 +00001545 free_completed_exception(e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001546 error = 1;
1547 goto out;
1548 }
1549
Mike Snitzer615d1eb2009-12-10 23:52:29 +00001550 /* Check for conflicting reads */
1551 __check_for_conflicting_io(s, pe->e.old_chunk);
Mikulas Patockaa8d41b52008-07-21 12:00:34 +01001552
1553 /*
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001554 * Add a proper exception, and remove the
1555 * in-flight exception from the list.
1556 */
Jon Brassow3510cb92009-12-10 23:52:11 +00001557 dm_insert_exception(&s->complete, e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001558
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001559out:
Jon Brassow3510cb92009-12-10 23:52:11 +00001560 dm_remove_exception(&pe->e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001561 snapshot_bios = bio_list_get(&pe->snapshot_bios);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001562 origin_bios = bio_list_get(&pe->origin_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001563 full_bio = pe->full_bio;
Mikulas Patockafe3265b2015-11-25 16:03:31 -05001564 if (full_bio)
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001565 full_bio->bi_end_io = pe->full_bio_end_io;
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001566 increment_pending_exceptions_done_count();
1567
Mikulas Patocka0685a252017-11-23 16:15:43 -05001568 mutex_unlock(&s->lock);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001569
1570 /* Submit any pending write bios */
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001571 if (error) {
1572 if (full_bio)
1573 bio_io_error(full_bio);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001574 error_bios(snapshot_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001575 } else {
1576 if (full_bio)
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001577 bio_endio(full_bio);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001578 flush_bios(snapshot_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001579 }
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001580
Mikulas Patocka515ad662009-12-10 23:52:30 +00001581 retry_origin_bios(s, origin_bios);
Mikulas Patocka22aa66a2015-02-17 14:34:00 -05001582
1583 free_pending_exception(pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584}
1585
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001586static void complete_exception(struct dm_snap_pending_exception *pe)
1587{
1588 struct dm_snapshot *s = pe->snap;
1589
Mikulas Patocka385277b2016-01-08 19:07:55 -05001590 /* Update the metadata if we are persistent */
1591 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1592 pending_complete, pe);
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001593}
1594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595/*
1596 * Called when the copy I/O has finished. kcopyd actually runs
1597 * this code so don't block.
1598 */
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -07001599static void copy_callback(int read_err, unsigned long write_err, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001601 struct dm_snap_pending_exception *pe = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 struct dm_snapshot *s = pe->snap;
1603
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001604 pe->copy_error = read_err || write_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001606 if (pe->exception_sequence == s->exception_complete_sequence) {
1607 s->exception_complete_sequence++;
1608 complete_exception(pe);
1609
1610 while (!list_empty(&s->out_of_order_list)) {
1611 pe = list_entry(s->out_of_order_list.next,
1612 struct dm_snap_pending_exception, out_of_order_entry);
1613 if (pe->exception_sequence != s->exception_complete_sequence)
1614 break;
1615 s->exception_complete_sequence++;
1616 list_del(&pe->out_of_order_entry);
1617 complete_exception(pe);
1618 }
1619 } else {
1620 struct list_head *lh;
1621 struct dm_snap_pending_exception *pe2;
1622
1623 list_for_each_prev(lh, &s->out_of_order_list) {
1624 pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1625 if (pe2->exception_sequence < pe->exception_sequence)
1626 break;
1627 }
1628 list_add(&pe->out_of_order_entry, lh);
1629 }
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001630 account_end_copy(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631}
1632
1633/*
1634 * Dispatches the copy operation to kcopyd.
1635 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001636static void start_copy(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637{
1638 struct dm_snapshot *s = pe->snap;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +01001639 struct dm_io_region src, dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 struct block_device *bdev = s->origin->bdev;
1641 sector_t dev_size;
1642
1643 dev_size = get_dev_size(bdev);
1644
1645 src.bdev = bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001646 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
Mikulas Patockadf96eee2009-10-16 23:18:17 +01001647 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001649 dest.bdev = s->cow->bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001650 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 dest.count = src.count;
1652
1653 /* Hand over to kcopyd */
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001654 account_start_copy(s);
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001655 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656}
1657
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001658static void full_bio_end_io(struct bio *bio)
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001659{
1660 void *callback_data = bio->bi_private;
1661
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001662 dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001663}
1664
1665static void start_full_bio(struct dm_snap_pending_exception *pe,
1666 struct bio *bio)
1667{
1668 struct dm_snapshot *s = pe->snap;
1669 void *callback_data;
1670
1671 pe->full_bio = bio;
1672 pe->full_bio_end_io = bio->bi_end_io;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001673
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001674 account_start_copy(s);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001675 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1676 copy_callback, pe);
1677
1678 bio->bi_end_io = full_bio_end_io;
1679 bio->bi_private = callback_data;
1680
1681 generic_make_request(bio);
1682}
1683
Mikulas Patocka29138082009-04-02 19:55:25 +01001684static struct dm_snap_pending_exception *
1685__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1686{
Jon Brassow3510cb92009-12-10 23:52:11 +00001687 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001688
1689 if (!e)
1690 return NULL;
1691
1692 return container_of(e, struct dm_snap_pending_exception, e);
1693}
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695/*
1696 * Looks to see if this snapshot already has a pending exception
1697 * for this chunk, otherwise it allocates a new one and inserts
1698 * it into the pending table.
1699 *
1700 * NOTE: a write lock must be held on snap->lock before calling
1701 * this.
1702 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001703static struct dm_snap_pending_exception *
Mikulas Patockac6621392009-04-02 19:55:25 +01001704__find_pending_exception(struct dm_snapshot *s,
1705 struct dm_snap_pending_exception *pe, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706{
Mikulas Patockac6621392009-04-02 19:55:25 +01001707 struct dm_snap_pending_exception *pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001708
Mikulas Patocka29138082009-04-02 19:55:25 +01001709 pe2 = __lookup_pending_exception(s, chunk);
1710 if (pe2) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001711 free_pending_exception(pe);
Mikulas Patocka29138082009-04-02 19:55:25 +01001712 return pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001713 }
1714
1715 pe->e.old_chunk = chunk;
1716 bio_list_init(&pe->origin_bios);
1717 bio_list_init(&pe->snapshot_bios);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001718 pe->started = 0;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001719 pe->full_bio = NULL;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001720
Jonathan Brassow493df712009-04-02 19:55:31 +01001721 if (s->store->type->prepare_exception(s->store, &pe->e)) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001722 free_pending_exception(pe);
1723 return NULL;
1724 }
1725
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001726 pe->exception_sequence = s->exception_start_sequence++;
1727
Jon Brassow3510cb92009-12-10 23:52:11 +00001728 dm_insert_exception(&s->pending, &pe->e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001729
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 return pe;
1731}
1732
Jon Brassow1d4989c2009-12-10 23:52:10 +00001733static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
Milan Brozd74f81f2008-02-08 02:11:27 +00001734 struct bio *bio, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001736 bio->bi_bdev = s->cow->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001737 bio->bi_iter.bi_sector =
1738 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1739 (chunk - e->old_chunk)) +
1740 (bio->bi_iter.bi_sector & s->store->chunk_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741}
1742
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001743static int snapshot_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744{
Jon Brassow1d4989c2009-12-10 23:52:10 +00001745 struct dm_exception *e;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001746 struct dm_snapshot *s = ti->private;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001747 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 chunk_t chunk;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001749 struct dm_snap_pending_exception *pe = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
Mikulas Patockaee180262012-12-21 20:23:41 +00001751 init_tracked_chunk(bio);
1752
Jens Axboe1eff9d32016-08-05 15:35:16 -06001753 if (bio->bi_opf & REQ_PREFLUSH) {
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001754 bio->bi_bdev = s->cow->bdev;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001755 return DM_MAPIO_REMAPPED;
1756 }
1757
Kent Overstreet4f024f32013-10-11 15:44:27 -07001758 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760 /* Full snapshots are not usable */
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001761 /* To get here the table must be live so s->active is always set. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 if (!s->valid)
Alasdair G Kergonf6a80ea2005-07-12 15:53:01 -07001763 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Mikulas Patocka37524332019-10-02 06:15:53 -04001765 if (bio_data_dir(bio) == WRITE) {
1766 while (unlikely(!wait_for_in_progress(s, false)))
1767 ; /* wait_for_in_progress() has slept */
1768 }
1769
Mikulas Patocka0685a252017-11-23 16:15:43 -05001770 mutex_lock(&s->lock);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001771
Christoph Hellwig70246282016-07-19 11:28:41 +02001772 if (!s->valid || (unlikely(s->snapshot_overflowed) &&
1773 bio_data_dir(bio) == WRITE)) {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001774 r = -EIO;
1775 goto out_unlock;
1776 }
1777
1778 /* If the block is already remapped - use that, else remap it */
Jon Brassow3510cb92009-12-10 23:52:11 +00001779 e = dm_lookup_exception(&s->complete, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001780 if (e) {
Milan Brozd74f81f2008-02-08 02:11:27 +00001781 remap_exception(s, e, bio, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001782 goto out_unlock;
1783 }
1784
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 /*
1786 * Write to snapshot - higher level takes care of RW/RO
1787 * flags so we should only get this if we are
1788 * writeable.
1789 */
Christoph Hellwig70246282016-07-19 11:28:41 +02001790 if (bio_data_dir(bio) == WRITE) {
Mikulas Patocka29138082009-04-02 19:55:25 +01001791 pe = __lookup_pending_exception(s, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001792 if (!pe) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001793 mutex_unlock(&s->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01001794 pe = alloc_pending_exception(s);
Mikulas Patocka0685a252017-11-23 16:15:43 -05001795 mutex_lock(&s->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01001796
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001797 if (!s->valid || s->snapshot_overflowed) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001798 free_pending_exception(pe);
1799 r = -EIO;
1800 goto out_unlock;
1801 }
1802
Jon Brassow3510cb92009-12-10 23:52:11 +00001803 e = dm_lookup_exception(&s->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01001804 if (e) {
1805 free_pending_exception(pe);
1806 remap_exception(s, e, bio, chunk);
1807 goto out_unlock;
1808 }
1809
Mikulas Patockac6621392009-04-02 19:55:25 +01001810 pe = __find_pending_exception(s, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001811 if (!pe) {
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001812 if (s->store->userspace_supports_overflow) {
1813 s->snapshot_overflowed = 1;
1814 DMERR("Snapshot overflowed: Unable to allocate exception.");
1815 } else
1816 __invalidate_snapshot(s, -ENOMEM);
Mikulas Patocka29138082009-04-02 19:55:25 +01001817 r = -EIO;
1818 goto out_unlock;
1819 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001820 }
1821
Milan Brozd74f81f2008-02-08 02:11:27 +00001822 remap_exception(s, &pe->e, bio, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001823
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001824 r = DM_MAPIO_SUBMITTED;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001825
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001826 if (!pe->started &&
Kent Overstreet4f024f32013-10-11 15:44:27 -07001827 bio->bi_iter.bi_size ==
1828 (s->store->chunk_size << SECTOR_SHIFT)) {
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001829 pe->started = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001830 mutex_unlock(&s->lock);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001831 start_full_bio(pe, bio);
1832 goto out;
1833 }
1834
1835 bio_list_add(&pe->snapshot_bios, bio);
1836
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001837 if (!pe->started) {
1838 /* this is protected by snap->lock */
1839 pe->started = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001840 mutex_unlock(&s->lock);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001841 start_copy(pe);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001842 goto out;
1843 }
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001844 } else {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001845 bio->bi_bdev = s->origin->bdev;
Mikulas Patockaee180262012-12-21 20:23:41 +00001846 track_chunk(s, bio, chunk);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001849out_unlock:
Mikulas Patocka0685a252017-11-23 16:15:43 -05001850 mutex_unlock(&s->lock);
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001851out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 return r;
1853}
1854
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001855/*
1856 * A snapshot-merge target behaves like a combination of a snapshot
1857 * target and a snapshot-origin target. It only generates new
1858 * exceptions in other snapshots and not in the one that is being
1859 * merged.
1860 *
1861 * For each chunk, if there is an existing exception, it is used to
1862 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1863 * which in turn might generate exceptions in other snapshots.
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001864 * If merging is currently taking place on the chunk in question, the
1865 * I/O is deferred by adding it to s->bios_queued_during_merge.
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001866 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001867static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001868{
1869 struct dm_exception *e;
1870 struct dm_snapshot *s = ti->private;
1871 int r = DM_MAPIO_REMAPPED;
1872 chunk_t chunk;
1873
Mikulas Patockaee180262012-12-21 20:23:41 +00001874 init_tracked_chunk(bio);
1875
Jens Axboe1eff9d32016-08-05 15:35:16 -06001876 if (bio->bi_opf & REQ_PREFLUSH) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001877 if (!dm_bio_get_target_bio_nr(bio))
Mike Snitzer10b81062009-12-10 23:52:31 +00001878 bio->bi_bdev = s->origin->bdev;
1879 else
1880 bio->bi_bdev = s->cow->bdev;
Mike Snitzer10b81062009-12-10 23:52:31 +00001881 return DM_MAPIO_REMAPPED;
1882 }
1883
Kent Overstreet4f024f32013-10-11 15:44:27 -07001884 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001885
Mikulas Patocka0685a252017-11-23 16:15:43 -05001886 mutex_lock(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001887
Mikulas Patockad2fdb772009-12-10 23:52:36 +00001888 /* Full merging snapshots are redirected to the origin */
1889 if (!s->valid)
1890 goto redirect_to_origin;
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001891
1892 /* If the block is already remapped - use that */
1893 e = dm_lookup_exception(&s->complete, chunk);
1894 if (e) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001895 /* Queue writes overlapping with chunks being merged */
Christoph Hellwig70246282016-07-19 11:28:41 +02001896 if (bio_data_dir(bio) == WRITE &&
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001897 chunk >= s->first_merging_chunk &&
1898 chunk < (s->first_merging_chunk +
1899 s->num_merging_chunks)) {
1900 bio->bi_bdev = s->origin->bdev;
1901 bio_list_add(&s->bios_queued_during_merge, bio);
1902 r = DM_MAPIO_SUBMITTED;
1903 goto out_unlock;
1904 }
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001905
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001906 remap_exception(s, e, bio, chunk);
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001907
Christoph Hellwig70246282016-07-19 11:28:41 +02001908 if (bio_data_dir(bio) == WRITE)
Mikulas Patockaee180262012-12-21 20:23:41 +00001909 track_chunk(s, bio, chunk);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001910 goto out_unlock;
1911 }
1912
Mikulas Patockad2fdb772009-12-10 23:52:36 +00001913redirect_to_origin:
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001914 bio->bi_bdev = s->origin->bdev;
1915
Christoph Hellwig70246282016-07-19 11:28:41 +02001916 if (bio_data_dir(bio) == WRITE) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001917 mutex_unlock(&s->lock);
Mikulas Patocka37524332019-10-02 06:15:53 -04001918 return do_origin(s->origin, bio, false);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001919 }
1920
1921out_unlock:
Mikulas Patocka0685a252017-11-23 16:15:43 -05001922 mutex_unlock(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001923
1924 return r;
1925}
1926
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001927static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001928{
1929 struct dm_snapshot *s = ti->private;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001930
Mikulas Patockaee180262012-12-21 20:23:41 +00001931 if (is_bio_tracked(bio))
1932 stop_tracking_chunk(s, bio);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001933
1934 return 0;
1935}
1936
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001937static void snapshot_merge_presuspend(struct dm_target *ti)
1938{
1939 struct dm_snapshot *s = ti->private;
1940
1941 stop_merge(s);
1942}
1943
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001944static int snapshot_preresume(struct dm_target *ti)
1945{
1946 int r = 0;
1947 struct dm_snapshot *s = ti->private;
1948 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1949
1950 down_read(&_origins_lock);
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001951 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001952 if (snap_src && snap_dest) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001953 mutex_lock(&snap_src->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001954 if (s == snap_src) {
1955 DMERR("Unable to resume snapshot source until "
1956 "handover completes.");
1957 r = -EINVAL;
Mike Snitzerb83b2f22011-01-13 19:59:59 +00001958 } else if (!dm_suspended(snap_src->ti)) {
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001959 DMERR("Unable to perform snapshot handover until "
1960 "source is suspended.");
1961 r = -EINVAL;
1962 }
Mikulas Patocka0685a252017-11-23 16:15:43 -05001963 mutex_unlock(&snap_src->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001964 }
1965 up_read(&_origins_lock);
1966
1967 return r;
1968}
1969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970static void snapshot_resume(struct dm_target *ti)
1971{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001972 struct dm_snapshot *s = ti->private;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001973 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
Mikulas Patockab735fed2015-02-26 11:40:35 -05001974 struct dm_origin *o;
1975 struct mapped_device *origin_md = NULL;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001976 bool must_restart_merging = false;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001977
1978 down_read(&_origins_lock);
Mikulas Patockab735fed2015-02-26 11:40:35 -05001979
1980 o = __lookup_dm_origin(s->origin->bdev);
1981 if (o)
1982 origin_md = dm_table_get_md(o->ti->table);
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001983 if (!origin_md) {
1984 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1985 if (snap_merging)
1986 origin_md = dm_table_get_md(snap_merging->ti->table);
1987 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05001988 if (origin_md == dm_table_get_md(ti->table))
1989 origin_md = NULL;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001990 if (origin_md) {
1991 if (dm_hold(origin_md))
1992 origin_md = NULL;
1993 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05001994
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001995 up_read(&_origins_lock);
1996
1997 if (origin_md) {
Mikulas Patockab735fed2015-02-26 11:40:35 -05001998 dm_internal_suspend_fast(origin_md);
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001999 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
2000 must_restart_merging = true;
2001 stop_merge(snap_merging);
2002 }
2003 }
2004
2005 down_read(&_origins_lock);
Mikulas Patockab735fed2015-02-26 11:40:35 -05002006
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00002007 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002008 if (snap_src && snap_dest) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05002009 mutex_lock(&snap_src->lock);
2010 mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002011 __handover_exceptions(snap_src, snap_dest);
Mikulas Patocka0685a252017-11-23 16:15:43 -05002012 mutex_unlock(&snap_dest->lock);
2013 mutex_unlock(&snap_src->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002014 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05002015
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002016 up_read(&_origins_lock);
2017
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05002018 if (origin_md) {
2019 if (must_restart_merging)
2020 start_merge(snap_merging);
2021 dm_internal_resume_fast(origin_md);
2022 dm_put(origin_md);
2023 }
2024
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002025 /* Now we have correct chunk size, reregister */
2026 reregister_snapshot(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Mikulas Patocka0685a252017-11-23 16:15:43 -05002028 mutex_lock(&s->lock);
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08002029 s->active = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05002030 mutex_unlock(&s->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031}
2032
Mike Snitzer542f9032012-07-27 15:08:00 +01002033static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002034{
Mike Snitzer542f9032012-07-27 15:08:00 +01002035 uint32_t min_chunksize;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002036
2037 down_read(&_origins_lock);
2038 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
2039 up_read(&_origins_lock);
2040
2041 return min_chunksize;
2042}
2043
2044static void snapshot_merge_resume(struct dm_target *ti)
2045{
2046 struct dm_snapshot *s = ti->private;
2047
2048 /*
2049 * Handover exceptions from existing snapshot.
2050 */
2051 snapshot_resume(ti);
2052
2053 /*
Mike Snitzer542f9032012-07-27 15:08:00 +01002054 * snapshot-merge acts as an origin, so set ti->max_io_len
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002055 */
Mike Snitzer542f9032012-07-27 15:08:00 +01002056 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002057
2058 start_merge(s);
2059}
2060
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002061static void snapshot_status(struct dm_target *ti, status_type_t type,
2062 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063{
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01002064 unsigned sz = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002065 struct dm_snapshot *snap = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
2067 switch (type) {
2068 case STATUSTYPE_INFO:
Mikulas Patocka94e765722009-12-10 23:51:53 +00002069
Mikulas Patocka0685a252017-11-23 16:15:43 -05002070 mutex_lock(&snap->lock);
Mikulas Patocka94e765722009-12-10 23:51:53 +00002071
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 if (!snap->valid)
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01002073 DMEMIT("Invalid");
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00002074 else if (snap->merge_failed)
2075 DMEMIT("Merge failed");
Mikulas Patocka76c44f62015-06-21 16:31:33 -04002076 else if (snap->snapshot_overflowed)
2077 DMEMIT("Overflow");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 else {
Mike Snitzer985903b2009-12-10 23:52:11 +00002079 if (snap->store->type->usage) {
2080 sector_t total_sectors, sectors_allocated,
2081 metadata_sectors;
2082 snap->store->type->usage(snap->store,
2083 &total_sectors,
2084 &sectors_allocated,
2085 &metadata_sectors);
2086 DMEMIT("%llu/%llu %llu",
2087 (unsigned long long)sectors_allocated,
2088 (unsigned long long)total_sectors,
2089 (unsigned long long)metadata_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 }
2091 else
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01002092 DMEMIT("Unknown");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 }
Mikulas Patocka94e765722009-12-10 23:51:53 +00002094
Mikulas Patocka0685a252017-11-23 16:15:43 -05002095 mutex_unlock(&snap->lock);
Mikulas Patocka94e765722009-12-10 23:51:53 +00002096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 break;
2098
2099 case STATUSTYPE_TABLE:
2100 /*
2101 * kdevname returns a static pointer so we need
2102 * to make private copies if the output is to
2103 * make sense.
2104 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00002105 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
Jonathan Brassow1e302a92009-04-02 19:55:35 +01002106 snap->store->type->status(snap->store, type, result + sz,
2107 maxlen - sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 break;
2109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110}
2111
Mike Snitzer8811f462009-09-04 20:40:19 +01002112static int snapshot_iterate_devices(struct dm_target *ti,
2113 iterate_devices_callout_fn fn, void *data)
2114{
2115 struct dm_snapshot *snap = ti->private;
Mikulas Patocka1e5554c2010-08-12 04:13:50 +01002116 int r;
Mike Snitzer8811f462009-09-04 20:40:19 +01002117
Mikulas Patocka1e5554c2010-08-12 04:13:50 +01002118 r = fn(ti, snap->origin, 0, ti->len, data);
2119
2120 if (!r)
2121 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2122
2123 return r;
Mike Snitzer8811f462009-09-04 20:40:19 +01002124}
2125
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127/*-----------------------------------------------------------------
2128 * Origin methods
2129 *---------------------------------------------------------------*/
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002130
2131/*
2132 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2133 * supplied bio was ignored. The caller may submit it immediately.
2134 * (No remapping actually occurs as the origin is always a direct linear
2135 * map.)
2136 *
2137 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2138 * and any supplied bio is added to a list to be submitted once all
2139 * the necessary exceptions exist.
2140 */
2141static int __origin_write(struct list_head *snapshots, sector_t sector,
2142 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143{
Mikulas Patocka515ad662009-12-10 23:52:30 +00002144 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 struct dm_snapshot *snap;
Jon Brassow1d4989c2009-12-10 23:52:10 +00002146 struct dm_exception *e;
Mikulas Patocka515ad662009-12-10 23:52:30 +00002147 struct dm_snap_pending_exception *pe;
2148 struct dm_snap_pending_exception *pe_to_start_now = NULL;
2149 struct dm_snap_pending_exception *pe_to_start_last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 chunk_t chunk;
2151
2152 /* Do all the snapshots on this origin */
2153 list_for_each_entry (snap, snapshots, list) {
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002154 /*
2155 * Don't make new exceptions in a merging snapshot
2156 * because it has effectively been deleted
2157 */
2158 if (dm_target_is_snapshot_merge(snap->ti))
2159 continue;
2160
Mikulas Patocka0685a252017-11-23 16:15:43 -05002161 mutex_lock(&snap->lock);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002162
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08002163 /* Only deal with valid and active snapshots */
2164 if (!snap->valid || !snap->active)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002165 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
Alasdair G Kergond5e404c2005-07-12 15:53:05 -07002167 /* Nothing to do if writing beyond end of snapshot */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002168 if (sector >= dm_table_get_size(snap->ti->table))
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002169 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
2171 /*
2172 * Remember, different snapshots can have
2173 * different chunk sizes.
2174 */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002175 chunk = sector_to_chunk(snap->store, sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
2177 /*
2178 * Check exception table to see if block
2179 * is already remapped in this snapshot
2180 * and trigger an exception if not.
2181 */
Jon Brassow3510cb92009-12-10 23:52:11 +00002182 e = dm_lookup_exception(&snap->complete, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002183 if (e)
2184 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Mikulas Patocka29138082009-04-02 19:55:25 +01002186 pe = __lookup_pending_exception(snap, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002187 if (!pe) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05002188 mutex_unlock(&snap->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01002189 pe = alloc_pending_exception(snap);
Mikulas Patocka0685a252017-11-23 16:15:43 -05002190 mutex_lock(&snap->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01002191
2192 if (!snap->valid) {
2193 free_pending_exception(pe);
2194 goto next_snapshot;
2195 }
2196
Jon Brassow3510cb92009-12-10 23:52:11 +00002197 e = dm_lookup_exception(&snap->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01002198 if (e) {
2199 free_pending_exception(pe);
2200 goto next_snapshot;
2201 }
2202
Mikulas Patockac6621392009-04-02 19:55:25 +01002203 pe = __find_pending_exception(snap, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01002204 if (!pe) {
2205 __invalidate_snapshot(snap, -ENOMEM);
2206 goto next_snapshot;
2207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 }
2209
Mikulas Patocka515ad662009-12-10 23:52:30 +00002210 r = DM_MAPIO_SUBMITTED;
2211
2212 /*
2213 * If an origin bio was supplied, queue it to wait for the
2214 * completion of this exception, and start this one last,
2215 * at the end of the function.
2216 */
2217 if (bio) {
2218 bio_list_add(&pe->origin_bios, bio);
2219 bio = NULL;
2220
2221 if (!pe->started) {
2222 pe->started = 1;
2223 pe_to_start_last = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002224 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002225 }
2226
2227 if (!pe->started) {
2228 pe->started = 1;
Mikulas Patocka515ad662009-12-10 23:52:30 +00002229 pe_to_start_now = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002230 }
2231
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01002232next_snapshot:
Mikulas Patocka0685a252017-11-23 16:15:43 -05002233 mutex_unlock(&snap->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
Mikulas Patocka515ad662009-12-10 23:52:30 +00002235 if (pe_to_start_now) {
2236 start_copy(pe_to_start_now);
2237 pe_to_start_now = NULL;
2238 }
Alasdair G Kergonb4b610f2006-03-27 01:17:44 -08002239 }
2240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 /*
Mikulas Patocka515ad662009-12-10 23:52:30 +00002242 * Submit the exception against which the bio is queued last,
2243 * to give the other exceptions a head start.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 */
Mikulas Patocka515ad662009-12-10 23:52:30 +00002245 if (pe_to_start_last)
2246 start_copy(pe_to_start_last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 return r;
2249}
2250
2251/*
2252 * Called on a write from the origin driver.
2253 */
Mikulas Patocka37524332019-10-02 06:15:53 -04002254static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255{
2256 struct origin *o;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08002257 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Mikulas Patocka37524332019-10-02 06:15:53 -04002259again:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 down_read(&_origins_lock);
2261 o = __lookup_origin(origin->bdev);
Mikulas Patocka37524332019-10-02 06:15:53 -04002262 if (o) {
2263 if (limit) {
2264 struct dm_snapshot *s;
2265 list_for_each_entry(s, &o->snapshots, list)
2266 if (unlikely(!wait_for_in_progress(s, true)))
2267 goto again;
2268 }
2269
Kent Overstreet4f024f32013-10-11 15:44:27 -07002270 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
Mikulas Patocka37524332019-10-02 06:15:53 -04002271 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 up_read(&_origins_lock);
2273
2274 return r;
2275}
2276
2277/*
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002278 * Trigger exceptions in all non-merging snapshots.
2279 *
2280 * The chunk size of the merging snapshot may be larger than the chunk
2281 * size of some other snapshot so we may need to reallocate multiple
2282 * chunks in other snapshots.
2283 *
2284 * We scan all the overlapping exceptions in the other snapshots.
2285 * Returns 1 if anything was reallocated and must be waited for,
2286 * otherwise returns 0.
2287 *
2288 * size must be a multiple of merging_snap's chunk_size.
2289 */
2290static int origin_write_extent(struct dm_snapshot *merging_snap,
2291 sector_t sector, unsigned size)
2292{
2293 int must_wait = 0;
2294 sector_t n;
2295 struct origin *o;
2296
2297 /*
Mike Snitzer542f9032012-07-27 15:08:00 +01002298 * The origin's __minimum_chunk_size() got stored in max_io_len
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002299 * by snapshot_merge_resume().
2300 */
2301 down_read(&_origins_lock);
2302 o = __lookup_origin(merging_snap->origin->bdev);
Mike Snitzer542f9032012-07-27 15:08:00 +01002303 for (n = 0; n < size; n += merging_snap->ti->max_io_len)
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002304 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2305 DM_MAPIO_SUBMITTED)
2306 must_wait = 1;
2307 up_read(&_origins_lock);
2308
2309 return must_wait;
2310}
2311
2312/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 * Origin: maps a linear range of a device, with hooks for snapshotting.
2314 */
2315
2316/*
2317 * Construct an origin mapping: <dev_path>
2318 * The context for an origin is merely a 'struct dm_dev *'
2319 * pointing to the real device.
2320 */
2321static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2322{
2323 int r;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002324 struct dm_origin *o;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
2326 if (argc != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002327 ti->error = "origin: incorrect number of arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 return -EINVAL;
2329 }
2330
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002331 o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2332 if (!o) {
2333 ti->error = "Cannot allocate private origin structure";
2334 r = -ENOMEM;
2335 goto bad_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 }
2337
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002338 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2339 if (r) {
2340 ti->error = "Cannot get target device";
2341 goto bad_open;
2342 }
2343
Mikulas Patockab735fed2015-02-26 11:40:35 -05002344 o->ti = ti;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002345 ti->private = o;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002346 ti->num_flush_bios = 1;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 return 0;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002349
2350bad_open:
2351 kfree(o);
2352bad_alloc:
2353 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354}
2355
2356static void origin_dtr(struct dm_target *ti)
2357{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002358 struct dm_origin *o = ti->private;
Mikulas Patockab735fed2015-02-26 11:40:35 -05002359
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002360 dm_put_device(ti, o->dev);
2361 kfree(o);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362}
2363
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002364static int origin_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002366 struct dm_origin *o = ti->private;
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002367 unsigned available_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002369 bio->bi_bdev = o->dev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
Jens Axboe1eff9d32016-08-05 15:35:16 -06002371 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002372 return DM_MAPIO_REMAPPED;
2373
Christoph Hellwig70246282016-07-19 11:28:41 +02002374 if (bio_data_dir(bio) != WRITE)
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002375 return DM_MAPIO_REMAPPED;
2376
2377 available_sectors = o->split_boundary -
2378 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2379
2380 if (bio_sectors(bio) > available_sectors)
2381 dm_accept_partial_bio(bio, available_sectors);
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 /* Only tell snapshots if this is a write */
Mikulas Patocka37524332019-10-02 06:15:53 -04002384 return do_origin(o->dev, bio, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385}
2386
Toshi Kanif6e629b2016-06-28 13:37:16 -06002387static long origin_direct_access(struct dm_target *ti, sector_t sector,
Linus Torvaldsf0c98eb2016-07-28 17:22:07 -07002388 void **kaddr, pfn_t *pfn, long size)
Toshi Kanif6e629b2016-06-28 13:37:16 -06002389{
2390 DMWARN("device does not support dax.");
2391 return -EIO;
2392}
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394/*
Mike Snitzer542f9032012-07-27 15:08:00 +01002395 * Set the target "max_io_len" field to the minimum of all the snapshots'
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 * chunk sizes.
2397 */
2398static void origin_resume(struct dm_target *ti)
2399{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002400 struct dm_origin *o = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002402 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
Mikulas Patockab735fed2015-02-26 11:40:35 -05002403
2404 down_write(&_origins_lock);
2405 __insert_dm_origin(o);
2406 up_write(&_origins_lock);
2407}
2408
2409static void origin_postsuspend(struct dm_target *ti)
2410{
2411 struct dm_origin *o = ti->private;
2412
2413 down_write(&_origins_lock);
2414 __remove_dm_origin(o);
2415 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416}
2417
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002418static void origin_status(struct dm_target *ti, status_type_t type,
2419 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002421 struct dm_origin *o = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
2423 switch (type) {
2424 case STATUSTYPE_INFO:
2425 result[0] = '\0';
2426 break;
2427
2428 case STATUSTYPE_TABLE:
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002429 snprintf(result, maxlen, "%s", o->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 break;
2431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432}
2433
Mike Snitzer8811f462009-09-04 20:40:19 +01002434static int origin_iterate_devices(struct dm_target *ti,
2435 iterate_devices_callout_fn fn, void *data)
2436{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002437 struct dm_origin *o = ti->private;
Mike Snitzer8811f462009-09-04 20:40:19 +01002438
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002439 return fn(ti, o->dev, 0, ti->len, data);
Mike Snitzer8811f462009-09-04 20:40:19 +01002440}
2441
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442static struct target_type origin_target = {
2443 .name = "snapshot-origin",
Mikulas Patockab735fed2015-02-26 11:40:35 -05002444 .version = {1, 9, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 .module = THIS_MODULE,
2446 .ctr = origin_ctr,
2447 .dtr = origin_dtr,
2448 .map = origin_map,
2449 .resume = origin_resume,
Mikulas Patockab735fed2015-02-26 11:40:35 -05002450 .postsuspend = origin_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 .status = origin_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002452 .iterate_devices = origin_iterate_devices,
Toshi Kanif6e629b2016-06-28 13:37:16 -06002453 .direct_access = origin_direct_access,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454};
2455
2456static struct target_type snapshot_target = {
2457 .name = "snapshot",
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04002458 .version = {1, 15, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 .module = THIS_MODULE,
2460 .ctr = snapshot_ctr,
2461 .dtr = snapshot_dtr,
2462 .map = snapshot_map,
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002463 .end_io = snapshot_end_io,
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002464 .preresume = snapshot_preresume,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 .resume = snapshot_resume,
2466 .status = snapshot_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002467 .iterate_devices = snapshot_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468};
2469
Mikulas Patockad698aa42009-12-10 23:52:30 +00002470static struct target_type merge_target = {
2471 .name = dm_snapshot_merge_target_name,
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04002472 .version = {1, 4, 0},
Mikulas Patockad698aa42009-12-10 23:52:30 +00002473 .module = THIS_MODULE,
2474 .ctr = snapshot_ctr,
2475 .dtr = snapshot_dtr,
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002476 .map = snapshot_merge_map,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002477 .end_io = snapshot_end_io,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002478 .presuspend = snapshot_merge_presuspend,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002479 .preresume = snapshot_preresume,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002480 .resume = snapshot_merge_resume,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002481 .status = snapshot_status,
2482 .iterate_devices = snapshot_iterate_devices,
2483};
2484
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485static int __init dm_snapshot_init(void)
2486{
2487 int r;
2488
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002489 r = dm_exception_store_init();
2490 if (r) {
2491 DMERR("Failed to initialize exception stores");
2492 return r;
2493 }
2494
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 r = dm_register_target(&snapshot_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002496 if (r < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 DMERR("snapshot target register failed %d", r);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002498 goto bad_register_snapshot_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 }
2500
2501 r = dm_register_target(&origin_target);
2502 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002503 DMERR("Origin target register failed %d", r);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002504 goto bad_register_origin_target;
2505 }
2506
2507 r = dm_register_target(&merge_target);
2508 if (r < 0) {
2509 DMERR("Merge target register failed %d", r);
2510 goto bad_register_merge_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 }
2512
2513 r = init_origin_hash();
2514 if (r) {
2515 DMERR("init_origin_hash failed.");
Mikulas Patockad698aa42009-12-10 23:52:30 +00002516 goto bad_origin_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 }
2518
Jon Brassow1d4989c2009-12-10 23:52:10 +00002519 exception_cache = KMEM_CACHE(dm_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 if (!exception_cache) {
2521 DMERR("Couldn't create exception cache.");
2522 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002523 goto bad_exception_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 }
2525
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002526 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 if (!pending_cache) {
2528 DMERR("Couldn't create pending cache.");
2529 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002530 goto bad_pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 }
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 return 0;
2534
Mikulas Patockad698aa42009-12-10 23:52:30 +00002535bad_pending_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 kmem_cache_destroy(exception_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002537bad_exception_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 exit_origin_hash();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002539bad_origin_hash:
2540 dm_unregister_target(&merge_target);
2541bad_register_merge_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002543bad_register_origin_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 dm_unregister_target(&snapshot_target);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002545bad_register_snapshot_target:
2546 dm_exception_store_exit();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002547
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 return r;
2549}
2550
2551static void __exit dm_snapshot_exit(void)
2552{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002553 dm_unregister_target(&snapshot_target);
2554 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002555 dm_unregister_target(&merge_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
2557 exit_origin_hash();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 kmem_cache_destroy(pending_cache);
2559 kmem_cache_destroy(exception_cache);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002560
2561 dm_exception_store_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562}
2563
2564/* Module hooks */
2565module_init(dm_snapshot_init);
2566module_exit(dm_snapshot_exit);
2567
2568MODULE_DESCRIPTION(DM_NAME " snapshot target");
2569MODULE_AUTHOR("Joe Thornber");
2570MODULE_LICENSE("GPL");
Mikulas Patocka23cb2102013-03-01 22:45:47 +00002571MODULE_ALIAS("dm-snapshot-origin");
2572MODULE_ALIAS("dm-snapshot-merge");