blob: bc52776c69ccaddb9151547e2b886c2031953152 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/device-mapper.h>
Mikulas Patocka90fa1522009-01-06 03:04:54 +000011#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010020#include <linux/log2.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010021#include <linux/dm-kcopyd.h>
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010022#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Jonathan Brassowaea53d92009-01-06 03:05:15 +000024#include "dm-exception-store.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "snapshots"
27
Mikulas Patockad698aa42009-12-10 23:52:30 +000028static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29
30#define dm_target_is_snapshot_merge(ti) \
31 ((ti)->type->name == dm_snapshot_merge_target_name)
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*
34 * The percentage increment we will wake up users at
35 */
36#define WAKE_UP_PERCENT 5
37
38/*
39 * kcopyd priority of snapshot operations
40 */
41#define SNAPSHOT_COPY_PRIORITY 2
42
43/*
Milan Broz8ee27672008-04-24 21:42:36 +010044 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 */
Milan Broz8ee27672008-04-24 21:42:36 +010046#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Mikulas Patockacd45daf2008-07-21 12:00:32 +010048/*
49 * The size of the mempool used to track chunks in use.
50 */
51#define MIN_IOS 256
52
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010053#define DM_TRACKED_CHUNK_HASH_SIZE 16
54#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
55 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
56
Jon Brassow191437a2009-12-10 23:52:10 +000057struct dm_exception_table {
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010058 uint32_t hash_mask;
59 unsigned hash_shift;
60 struct list_head *table;
61};
62
63struct dm_snapshot {
64 struct rw_semaphore lock;
65
66 struct dm_dev *origin;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +000067 struct dm_dev *cow;
68
69 struct dm_target *ti;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010070
71 /* List of snapshots per Origin */
72 struct list_head list;
73
74 /* You can't use a snapshot if this is 0 (e.g. if full) */
75 int valid;
76
77 /* Origin writes don't trigger exceptions until this is set */
78 int active;
79
Mike Snitzerc26655c2009-12-10 23:52:12 +000080 /* Whether or not owning mapped_device is suspended */
81 int suspended;
82
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010083 mempool_t *pending_pool;
84
85 atomic_t pending_exceptions_count;
86
Jon Brassow191437a2009-12-10 23:52:10 +000087 struct dm_exception_table pending;
88 struct dm_exception_table complete;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010089
90 /*
91 * pe_lock protects all pending_exception operations and access
92 * as well as the snapshot_bios list.
93 */
94 spinlock_t pe_lock;
95
96 /* The on disk metadata handler */
97 struct dm_exception_store *store;
98
99 struct dm_kcopyd_client *kcopyd_client;
100
101 /* Queue of snapshot writes for ksnapd to flush */
102 struct bio_list queued_bios;
103 struct work_struct queued_bios_work;
104
105 /* Chunks with outstanding reads */
106 mempool_t *tracked_chunk_pool;
107 spinlock_t tracked_chunk_lock;
108 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000109
110 /* Wait for events based on state_bits */
111 unsigned long state_bits;
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000112
113 /* Range of chunks currently being merged. */
114 chunk_t first_merging_chunk;
115 int num_merging_chunks;
116
117 /*
118 * Incoming bios that overlap with chunks being merged must wait
119 * for them to be committed.
120 */
121 struct bio_list bios_queued_during_merge;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100122};
123
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000124/*
125 * state_bits:
126 * RUNNING_MERGE - Merge operation is in progress.
127 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
128 * cleared afterwards.
129 */
130#define RUNNING_MERGE 0
131#define SHUTDOWN_MERGE 1
132
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000133struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
134{
135 return s->cow;
136}
137EXPORT_SYMBOL(dm_snap_cow);
138
Adrian Bunkc642f9e2006-12-08 02:41:13 -0800139static struct workqueue_struct *ksnapd;
David Howellsc4028952006-11-22 14:57:56 +0000140static void flush_queued_bios(struct work_struct *work);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -0700141
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100142static sector_t chunk_to_sector(struct dm_exception_store *store,
143 chunk_t chunk)
144{
145 return chunk << store->chunk_shift;
146}
147
148static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
149{
150 /*
151 * There is only ever one instance of a particular block
152 * device so we can compare pointers safely.
153 */
154 return lhs == rhs;
155}
156
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100157struct dm_snap_pending_exception {
Jon Brassow1d4989c2009-12-10 23:52:10 +0000158 struct dm_exception e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 /*
161 * Origin buffers waiting for this to complete are held
162 * in a bio list
163 */
164 struct bio_list origin_bios;
165 struct bio_list snapshot_bios;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 /* Pointer back to snapshot context */
168 struct dm_snapshot *snap;
169
170 /*
171 * 1 indicates the exception has already been sent to
172 * kcopyd.
173 */
174 int started;
175};
176
177/*
178 * Hash table mapping origin volumes to lists of snapshots and
179 * a lock to protect it
180 */
Christoph Lametere18b8902006-12-06 20:33:20 -0800181static struct kmem_cache *exception_cache;
182static struct kmem_cache *pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100184struct dm_snap_tracked_chunk {
185 struct hlist_node node;
186 chunk_t chunk;
187};
188
189static struct kmem_cache *tracked_chunk_cache;
190
191static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
192 chunk_t chunk)
193{
194 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
195 GFP_NOIO);
196 unsigned long flags;
197
198 c->chunk = chunk;
199
200 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
201 hlist_add_head(&c->node,
202 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
203 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
204
205 return c;
206}
207
208static void stop_tracking_chunk(struct dm_snapshot *s,
209 struct dm_snap_tracked_chunk *c)
210{
211 unsigned long flags;
212
213 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
214 hlist_del(&c->node);
215 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
216
217 mempool_free(c, s->tracked_chunk_pool);
218}
219
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100220static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
221{
222 struct dm_snap_tracked_chunk *c;
223 struct hlist_node *hn;
224 int found = 0;
225
226 spin_lock_irq(&s->tracked_chunk_lock);
227
228 hlist_for_each_entry(c, hn,
229 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
230 if (c->chunk == chunk) {
231 found = 1;
232 break;
233 }
234 }
235
236 spin_unlock_irq(&s->tracked_chunk_lock);
237
238 return found;
239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241/*
Mike Snitzer615d1eb2009-12-10 23:52:29 +0000242 * This conflicting I/O is extremely improbable in the caller,
243 * so msleep(1) is sufficient and there is no need for a wait queue.
244 */
245static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
246{
247 while (__chunk_is_tracked(s, chunk))
248 msleep(1);
249}
250
251/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 * One of these per registered origin, held in the snapshot_origins hash
253 */
254struct origin {
255 /* The origin device */
256 struct block_device *bdev;
257
258 struct list_head hash_list;
259
260 /* List of snapshots for this origin */
261 struct list_head snapshots;
262};
263
264/*
265 * Size of the hash table for origin volumes. If we make this
266 * the size of the minors list then it should be nearly perfect
267 */
268#define ORIGIN_HASH_SIZE 256
269#define ORIGIN_MASK 0xFF
270static struct list_head *_origins;
271static struct rw_semaphore _origins_lock;
272
273static int init_origin_hash(void)
274{
275 int i;
276
277 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
278 GFP_KERNEL);
279 if (!_origins) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700280 DMERR("unable to allocate memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 return -ENOMEM;
282 }
283
284 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
285 INIT_LIST_HEAD(_origins + i);
286 init_rwsem(&_origins_lock);
287
288 return 0;
289}
290
291static void exit_origin_hash(void)
292{
293 kfree(_origins);
294}
295
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100296static unsigned origin_hash(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
298 return bdev->bd_dev & ORIGIN_MASK;
299}
300
301static struct origin *__lookup_origin(struct block_device *origin)
302{
303 struct list_head *ol;
304 struct origin *o;
305
306 ol = &_origins[origin_hash(origin)];
307 list_for_each_entry (o, ol, hash_list)
308 if (bdev_equal(o->bdev, origin))
309 return o;
310
311 return NULL;
312}
313
314static void __insert_origin(struct origin *o)
315{
316 struct list_head *sl = &_origins[origin_hash(o->bdev)];
317 list_add_tail(&o->hash_list, sl);
318}
319
320/*
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000321 * _origins_lock must be held when calling this function.
322 * Returns number of snapshots registered using the supplied cow device, plus:
323 * snap_src - a snapshot suitable for use as a source of exception handover
324 * snap_dest - a snapshot capable of receiving exception handover.
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000325 * snap_merge - an existing snapshot-merge target linked to the same origin.
326 * There can be at most one snapshot-merge target. The parameter is optional.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000327 *
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000328 * Possible return values and states of snap_src and snap_dest.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000329 * 0: NULL, NULL - first new snapshot
330 * 1: snap_src, NULL - normal snapshot
331 * 2: snap_src, snap_dest - waiting for handover
332 * 2: snap_src, NULL - handed over, waiting for old to be deleted
333 * 1: NULL, snap_dest - source got destroyed without handover
334 */
335static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
336 struct dm_snapshot **snap_src,
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000337 struct dm_snapshot **snap_dest,
338 struct dm_snapshot **snap_merge)
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000339{
340 struct dm_snapshot *s;
341 struct origin *o;
342 int count = 0;
343 int active;
344
345 o = __lookup_origin(snap->origin->bdev);
346 if (!o)
347 goto out;
348
349 list_for_each_entry(s, &o->snapshots, list) {
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000350 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
351 *snap_merge = s;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000352 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
353 continue;
354
355 down_read(&s->lock);
356 active = s->active;
357 up_read(&s->lock);
358
359 if (active) {
360 if (snap_src)
361 *snap_src = s;
362 } else if (snap_dest)
363 *snap_dest = s;
364
365 count++;
366 }
367
368out:
369 return count;
370}
371
372/*
373 * On success, returns 1 if this snapshot is a handover destination,
374 * otherwise returns 0.
375 */
376static int __validate_exception_handover(struct dm_snapshot *snap)
377{
378 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000379 struct dm_snapshot *snap_merge = NULL;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000380
381 /* Does snapshot need exceptions handed over to it? */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000382 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
383 &snap_merge) == 2) ||
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000384 snap_dest) {
385 snap->ti->error = "Snapshot cow pairing for exception "
386 "table handover failed";
387 return -EINVAL;
388 }
389
390 /*
391 * If no snap_src was found, snap cannot become a handover
392 * destination.
393 */
394 if (!snap_src)
395 return 0;
396
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000397 /*
398 * Non-snapshot-merge handover?
399 */
400 if (!dm_target_is_snapshot_merge(snap->ti))
401 return 1;
402
403 /*
404 * Do not allow more than one merging snapshot.
405 */
406 if (snap_merge) {
407 snap->ti->error = "A snapshot is already merging.";
408 return -EINVAL;
409 }
410
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000411 if (!snap_src->store->type->prepare_merge ||
412 !snap_src->store->type->commit_merge) {
413 snap->ti->error = "Snapshot exception store does not "
414 "support snapshot-merge.";
415 return -EINVAL;
416 }
417
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000418 return 1;
419}
420
421static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
422{
423 struct dm_snapshot *l;
424
425 /* Sort the list according to chunk size, largest-first smallest-last */
426 list_for_each_entry(l, &o->snapshots, list)
427 if (l->store->chunk_size < s->store->chunk_size)
428 break;
429 list_add_tail(&s->list, &l->list);
430}
431
432/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 * Make a note of the snapshot and its origin so we can look it
434 * up when the origin has a write on it.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000435 *
436 * Also validate snapshot exception store handovers.
437 * On success, returns 1 if this registration is a handover destination,
438 * otherwise returns 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 */
440static int register_snapshot(struct dm_snapshot *snap)
441{
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000442 struct origin *o, *new_o = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 struct block_device *bdev = snap->origin->bdev;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000444 int r = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000446 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
447 if (!new_o)
448 return -ENOMEM;
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 down_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000452 r = __validate_exception_handover(snap);
453 if (r < 0) {
454 kfree(new_o);
455 goto out;
456 }
457
458 o = __lookup_origin(bdev);
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000459 if (o)
460 kfree(new_o);
461 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 /* New origin */
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000463 o = new_o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 /* Initialise the struct */
466 INIT_LIST_HEAD(&o->snapshots);
467 o->bdev = bdev;
468
469 __insert_origin(o);
470 }
471
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000472 __insert_snapshot(o, snap);
473
474out:
475 up_write(&_origins_lock);
476
477 return r;
478}
479
480/*
481 * Move snapshot to correct place in list according to chunk size.
482 */
483static void reregister_snapshot(struct dm_snapshot *s)
484{
485 struct block_device *bdev = s->origin->bdev;
486
487 down_write(&_origins_lock);
488
489 list_del(&s->list);
490 __insert_snapshot(__lookup_origin(bdev), s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
494
495static void unregister_snapshot(struct dm_snapshot *s)
496{
497 struct origin *o;
498
499 down_write(&_origins_lock);
500 o = __lookup_origin(s->origin->bdev);
501
502 list_del(&s->list);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000503 if (o && list_empty(&o->snapshots)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 list_del(&o->hash_list);
505 kfree(o);
506 }
507
508 up_write(&_origins_lock);
509}
510
511/*
512 * Implementation of the exception hash tables.
Milan Brozd74f81f2008-02-08 02:11:27 +0000513 * The lowest hash_shift bits of the chunk number are ignored, allowing
514 * some consecutive chunks to be grouped together.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000516static int dm_exception_table_init(struct dm_exception_table *et,
517 uint32_t size, unsigned hash_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
519 unsigned int i;
520
Milan Brozd74f81f2008-02-08 02:11:27 +0000521 et->hash_shift = hash_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 et->hash_mask = size - 1;
523 et->table = dm_vcalloc(size, sizeof(struct list_head));
524 if (!et->table)
525 return -ENOMEM;
526
527 for (i = 0; i < size; i++)
528 INIT_LIST_HEAD(et->table + i);
529
530 return 0;
531}
532
Jon Brassow3510cb92009-12-10 23:52:11 +0000533static void dm_exception_table_exit(struct dm_exception_table *et,
534 struct kmem_cache *mem)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535{
536 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000537 struct dm_exception *ex, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 int i, size;
539
540 size = et->hash_mask + 1;
541 for (i = 0; i < size; i++) {
542 slot = et->table + i;
543
544 list_for_each_entry_safe (ex, next, slot, hash_list)
545 kmem_cache_free(mem, ex);
546 }
547
548 vfree(et->table);
549}
550
Jon Brassow191437a2009-12-10 23:52:10 +0000551static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
Milan Brozd74f81f2008-02-08 02:11:27 +0000553 return (chunk >> et->hash_shift) & et->hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554}
555
Jon Brassow3510cb92009-12-10 23:52:11 +0000556static void dm_remove_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
558 list_del(&e->hash_list);
559}
560
561/*
562 * Return the exception data for a sector, or NULL if not
563 * remapped.
564 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000565static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
566 chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
568 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000569 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571 slot = &et->table[exception_hash(et, chunk)];
572 list_for_each_entry (e, slot, hash_list)
Milan Brozd74f81f2008-02-08 02:11:27 +0000573 if (chunk >= e->old_chunk &&
574 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 return e;
576
577 return NULL;
578}
579
Jon Brassow3510cb92009-12-10 23:52:11 +0000580static struct dm_exception *alloc_completed_exception(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
Jon Brassow1d4989c2009-12-10 23:52:10 +0000582 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
584 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
585 if (!e)
586 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
587
588 return e;
589}
590
Jon Brassow3510cb92009-12-10 23:52:11 +0000591static void free_completed_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
593 kmem_cache_free(exception_cache, e);
594}
595
Mikulas Patocka92e86812008-07-21 12:00:35 +0100596static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Mikulas Patocka92e86812008-07-21 12:00:35 +0100598 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
599 GFP_NOIO);
600
Mikulas Patocka879129d22008-10-30 13:33:16 +0000601 atomic_inc(&s->pending_exceptions_count);
Mikulas Patocka92e86812008-07-21 12:00:35 +0100602 pe->snap = s;
603
604 return pe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605}
606
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100607static void free_pending_exception(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
Mikulas Patocka879129d22008-10-30 13:33:16 +0000609 struct dm_snapshot *s = pe->snap;
610
611 mempool_free(pe, s->pending_pool);
612 smp_mb__before_atomic_dec();
613 atomic_dec(&s->pending_exceptions_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
Jon Brassow3510cb92009-12-10 23:52:11 +0000616static void dm_insert_exception(struct dm_exception_table *eh,
617 struct dm_exception *new_e)
Milan Brozd74f81f2008-02-08 02:11:27 +0000618{
Milan Brozd74f81f2008-02-08 02:11:27 +0000619 struct list_head *l;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000620 struct dm_exception *e = NULL;
Milan Brozd74f81f2008-02-08 02:11:27 +0000621
622 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
623
624 /* Add immediately if this table doesn't support consecutive chunks */
625 if (!eh->hash_shift)
626 goto out;
627
628 /* List is ordered by old_chunk */
629 list_for_each_entry_reverse(e, l, hash_list) {
630 /* Insert after an existing chunk? */
631 if (new_e->old_chunk == (e->old_chunk +
632 dm_consecutive_chunk_count(e) + 1) &&
633 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
634 dm_consecutive_chunk_count(e) + 1)) {
635 dm_consecutive_chunk_count_inc(e);
Jon Brassow3510cb92009-12-10 23:52:11 +0000636 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000637 return;
638 }
639
640 /* Insert before an existing chunk? */
641 if (new_e->old_chunk == (e->old_chunk - 1) &&
642 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
643 dm_consecutive_chunk_count_inc(e);
644 e->old_chunk--;
645 e->new_chunk--;
Jon Brassow3510cb92009-12-10 23:52:11 +0000646 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000647 return;
648 }
649
650 if (new_e->old_chunk > e->old_chunk)
651 break;
652 }
653
654out:
655 list_add(&new_e->hash_list, e ? &e->hash_list : l);
656}
657
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000658/*
659 * Callback used by the exception stores to load exceptions when
660 * initialising.
661 */
662static int dm_add_exception(void *context, chunk_t old, chunk_t new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000664 struct dm_snapshot *s = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000665 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Jon Brassow3510cb92009-12-10 23:52:11 +0000667 e = alloc_completed_exception();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (!e)
669 return -ENOMEM;
670
671 e->old_chunk = old;
Milan Brozd74f81f2008-02-08 02:11:27 +0000672
673 /* Consecutive_count is implicitly initialised to zero */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 e->new_chunk = new;
Milan Brozd74f81f2008-02-08 02:11:27 +0000675
Jon Brassow3510cb92009-12-10 23:52:11 +0000676 dm_insert_exception(&s->complete, e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return 0;
679}
680
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000681#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
682
683/*
684 * Return a minimum chunk size of all snapshots that have the specified origin.
685 * Return zero if the origin has no snapshots.
686 */
687static sector_t __minimum_chunk_size(struct origin *o)
688{
689 struct dm_snapshot *snap;
690 unsigned chunk_size = 0;
691
692 if (o)
693 list_for_each_entry(snap, &o->snapshots, list)
694 chunk_size = min_not_zero(chunk_size,
695 snap->store->chunk_size);
696
697 return chunk_size;
698}
699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700/*
701 * Hard coded magic.
702 */
703static int calc_max_buckets(void)
704{
705 /* use a fixed size of 2MB */
706 unsigned long mem = 2 * 1024 * 1024;
707 mem /= sizeof(struct list_head);
708
709 return mem;
710}
711
712/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 * Allocate room for a suitable hash table.
714 */
Jonathan Brassowfee19982009-04-02 19:55:34 +0100715static int init_hash_tables(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
718
719 /*
720 * Calculate based on the size of the original volume or
721 * the COW volume...
722 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000723 cow_dev_size = get_dev_size(s->cow->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 origin_dev_size = get_dev_size(s->origin->bdev);
725 max_buckets = calc_max_buckets();
726
Jonathan Brassowfee19982009-04-02 19:55:34 +0100727 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 hash_size = min(hash_size, max_buckets);
729
Mikulas Patocka8e87b9b2009-12-10 23:51:54 +0000730 if (hash_size < 64)
731 hash_size = 64;
Robert P. J. Day8defd832008-02-08 02:10:06 +0000732 hash_size = rounddown_pow_of_two(hash_size);
Jon Brassow3510cb92009-12-10 23:52:11 +0000733 if (dm_exception_table_init(&s->complete, hash_size,
734 DM_CHUNK_CONSECUTIVE_BITS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return -ENOMEM;
736
737 /*
738 * Allocate hash table for in-flight exceptions
739 * Make this smaller than the real hash table
740 */
741 hash_size >>= 3;
742 if (hash_size < 64)
743 hash_size = 64;
744
Jon Brassow3510cb92009-12-10 23:52:11 +0000745 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
746 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 return -ENOMEM;
748 }
749
750 return 0;
751}
752
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000753static void merge_shutdown(struct dm_snapshot *s)
754{
755 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
756 smp_mb__after_clear_bit();
757 wake_up_bit(&s->state_bits, RUNNING_MERGE);
758}
759
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000760static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
761{
762 s->first_merging_chunk = 0;
763 s->num_merging_chunks = 0;
764
765 return bio_list_get(&s->bios_queued_during_merge);
766}
767
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000768/*
769 * Remove one chunk from the index of completed exceptions.
770 */
771static int __remove_single_exception_chunk(struct dm_snapshot *s,
772 chunk_t old_chunk)
773{
774 struct dm_exception *e;
775
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000776 e = dm_lookup_exception(&s->complete, old_chunk);
777 if (!e) {
778 DMERR("Corruption detected: exception for block %llu is "
779 "on disk but not in memory",
780 (unsigned long long)old_chunk);
781 return -EINVAL;
782 }
783
784 /*
785 * If this is the only chunk using this exception, remove exception.
786 */
787 if (!dm_consecutive_chunk_count(e)) {
788 dm_remove_exception(e);
789 free_completed_exception(e);
790 return 0;
791 }
792
793 /*
794 * The chunk may be either at the beginning or the end of a
795 * group of consecutive chunks - never in the middle. We are
796 * removing chunks in the opposite order to that in which they
797 * were added, so this should always be true.
798 * Decrement the consecutive chunk counter and adjust the
799 * starting point if necessary.
800 */
801 if (old_chunk == e->old_chunk) {
802 e->old_chunk++;
803 e->new_chunk++;
804 } else if (old_chunk != e->old_chunk +
805 dm_consecutive_chunk_count(e)) {
806 DMERR("Attempt to merge block %llu from the "
807 "middle of a chunk range [%llu - %llu]",
808 (unsigned long long)old_chunk,
809 (unsigned long long)e->old_chunk,
810 (unsigned long long)
811 e->old_chunk + dm_consecutive_chunk_count(e));
812 return -EINVAL;
813 }
814
815 dm_consecutive_chunk_count_dec(e);
816
817 return 0;
818}
819
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000820static void flush_bios(struct bio *bio);
821
822static int remove_single_exception_chunk(struct dm_snapshot *s)
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000823{
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000824 struct bio *b = NULL;
825 int r;
826 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000827
828 down_write(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000829
830 /*
831 * Process chunks (and associated exceptions) in reverse order
832 * so that dm_consecutive_chunk_count_dec() accounting works.
833 */
834 do {
835 r = __remove_single_exception_chunk(s, old_chunk);
836 if (r)
837 goto out;
838 } while (old_chunk-- > s->first_merging_chunk);
839
840 b = __release_queued_bios_after_merge(s);
841
842out:
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000843 up_write(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000844 if (b)
845 flush_bios(b);
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000846
847 return r;
848}
849
850static void merge_callback(int read_err, unsigned long write_err,
851 void *context);
852
853static void snapshot_merge_next_chunks(struct dm_snapshot *s)
854{
855 int r;
856 chunk_t old_chunk, new_chunk;
857 struct dm_io_region src, dest;
858
859 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
860 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
861 goto shut;
862
863 /*
864 * valid flag never changes during merge, so no lock required.
865 */
866 if (!s->valid) {
867 DMERR("Snapshot is invalid: can't merge");
868 goto shut;
869 }
870
871 r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk);
872 if (r <= 0) {
873 if (r < 0)
874 DMERR("Read error in exception store: "
875 "shutting down merge");
876 goto shut;
877 }
878
879 /* TODO: use larger I/O size once we verify that kcopyd handles it */
880
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000881 dest.bdev = s->origin->bdev;
882 dest.sector = chunk_to_sector(s->store, old_chunk);
883 dest.count = min((sector_t)s->store->chunk_size,
884 get_dev_size(dest.bdev) - dest.sector);
885
886 src.bdev = s->cow->bdev;
887 src.sector = chunk_to_sector(s->store, new_chunk);
888 src.count = dest.count;
889
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000890 down_write(&s->lock);
891 s->first_merging_chunk = old_chunk;
892 s->num_merging_chunks = 1;
893 up_write(&s->lock);
894
Mikulas Patocka17aa0332009-12-10 23:52:33 +0000895 __check_for_conflicting_io(s, old_chunk);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000896
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000897 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
898 return;
899
900shut:
901 merge_shutdown(s);
902}
903
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000904static void error_bios(struct bio *bio);
905
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000906static void merge_callback(int read_err, unsigned long write_err, void *context)
907{
908 struct dm_snapshot *s = context;
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000909 struct bio *b = NULL;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000910
911 if (read_err || write_err) {
912 if (read_err)
913 DMERR("Read error: shutting down merge.");
914 else
915 DMERR("Write error: shutting down merge.");
916 goto shut;
917 }
918
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000919 if (s->store->type->commit_merge(s->store,
920 s->num_merging_chunks) < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000921 DMERR("Write error in exception store: shutting down merge");
922 goto shut;
923 }
924
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000925 if (remove_single_exception_chunk(s) < 0)
926 goto shut;
927
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000928 snapshot_merge_next_chunks(s);
929
930 return;
931
932shut:
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000933 down_write(&s->lock);
934 b = __release_queued_bios_after_merge(s);
935 up_write(&s->lock);
936 error_bios(b);
937
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000938 merge_shutdown(s);
939}
940
941static void start_merge(struct dm_snapshot *s)
942{
943 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
944 snapshot_merge_next_chunks(s);
945}
946
947static int wait_schedule(void *ptr)
948{
949 schedule();
950
951 return 0;
952}
953
954/*
955 * Stop the merging process and wait until it finishes.
956 */
957static void stop_merge(struct dm_snapshot *s)
958{
959 set_bit(SHUTDOWN_MERGE, &s->state_bits);
960 wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
961 TASK_UNINTERRUPTIBLE);
962 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
963}
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
967 */
968static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
969{
970 struct dm_snapshot *s;
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100971 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 int r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000973 char *origin_path, *cow_path;
Mike Snitzer10b81062009-12-10 23:52:31 +0000974 unsigned args_used, num_flush_requests = 1;
975 fmode_t origin_mode = FMODE_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Mark McLoughlin4c7e3bf2006-10-03 01:15:25 -0700977 if (argc != 4) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700978 ti->error = "requires exactly 4 arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000980 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982
Mike Snitzer10b81062009-12-10 23:52:31 +0000983 if (dm_target_is_snapshot_merge(ti)) {
984 num_flush_requests = 2;
985 origin_mode = FMODE_WRITE;
986 }
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 origin_path = argv[0];
Jonathan Brassowfee19982009-04-02 19:55:34 +0100989 argv++;
990 argc--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 s = kmalloc(sizeof(*s), GFP_KERNEL);
Jonathan Brassowfee19982009-04-02 19:55:34 +0100993 if (!s) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 ti->error = "Cannot allocate snapshot context private "
995 "structure";
996 r = -ENOMEM;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000997 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 }
999
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001000 cow_path = argv[0];
1001 argv++;
1002 argc--;
1003
1004 r = dm_get_device(ti, cow_path, 0, 0,
1005 FMODE_READ | FMODE_WRITE, &s->cow);
1006 if (r) {
1007 ti->error = "Cannot get COW device";
1008 goto bad_cow;
1009 }
1010
1011 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1012 if (r) {
1013 ti->error = "Couldn't create exception store";
1014 r = -EINVAL;
1015 goto bad_store;
1016 }
1017
1018 argv += args_used;
1019 argc -= args_used;
1020
Mike Snitzer10b81062009-12-10 23:52:31 +00001021 r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 if (r) {
1023 ti->error = "Cannot get origin device";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001024 goto bad_origin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 }
1026
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001027 s->ti = ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 s->valid = 1;
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001029 s->active = 0;
Mike Snitzerc26655c2009-12-10 23:52:12 +00001030 s->suspended = 0;
Mikulas Patocka879129d22008-10-30 13:33:16 +00001031 atomic_set(&s->pending_exceptions_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 init_rwsem(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001033 INIT_LIST_HEAD(&s->list);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001034 spin_lock_init(&s->pe_lock);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001035 s->state_bits = 0;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001036 s->first_merging_chunk = 0;
1037 s->num_merging_chunks = 0;
1038 bio_list_init(&s->bios_queued_during_merge);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 /* Allocate hash table for COW data */
Jonathan Brassowfee19982009-04-02 19:55:34 +01001041 if (init_hash_tables(s)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 ti->error = "Unable to allocate hash table space";
1043 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001044 goto bad_hash_tables;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 }
1046
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001047 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 if (r) {
1049 ti->error = "Could not create kcopyd client";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001050 goto bad_kcopyd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 }
1052
Mikulas Patocka92e86812008-07-21 12:00:35 +01001053 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1054 if (!s->pending_pool) {
1055 ti->error = "Could not allocate mempool for pending exceptions";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001056 goto bad_pending_pool;
Mikulas Patocka92e86812008-07-21 12:00:35 +01001057 }
1058
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001059 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1060 tracked_chunk_cache);
1061 if (!s->tracked_chunk_pool) {
1062 ti->error = "Could not allocate tracked_chunk mempool for "
1063 "tracking reads";
Mikulas Patocka92e86812008-07-21 12:00:35 +01001064 goto bad_tracked_chunk_pool;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001065 }
1066
1067 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1068 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1069
1070 spin_lock_init(&s->tracked_chunk_lock);
1071
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001072 bio_list_init(&s->queued_bios);
1073 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1074
1075 ti->private = s;
Mike Snitzer10b81062009-12-10 23:52:31 +00001076 ti->num_flush_requests = num_flush_requests;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001077
1078 /* Add snapshot to the list of snapshots for this origin */
1079 /* Exceptions aren't triggered till snapshot_resume() is called */
1080 r = register_snapshot(s);
1081 if (r == -ENOMEM) {
1082 ti->error = "Snapshot origin struct allocation failed";
1083 goto bad_load_and_register;
1084 } else if (r < 0) {
1085 /* invalid handover, register_snapshot has set ti->error */
1086 goto bad_load_and_register;
1087 }
1088
1089 /*
1090 * Metadata must only be loaded into one table at once, so skip this
1091 * if metadata will be handed over during resume.
1092 * Chunk size will be set during the handover - set it to zero to
1093 * ensure it's ignored.
1094 */
1095 if (r > 0) {
1096 s->store->chunk_size = 0;
1097 return 0;
1098 }
1099
Jonathan Brassow493df712009-04-02 19:55:31 +01001100 r = s->store->type->read_metadata(s->store, dm_add_exception,
1101 (void *)s);
Milan Broz07641472007-07-12 17:28:13 +01001102 if (r < 0) {
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001103 ti->error = "Failed to read snapshot metadata";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001104 goto bad_read_metadata;
Milan Broz07641472007-07-12 17:28:13 +01001105 } else if (r > 0) {
1106 s->valid = 0;
1107 DMWARN("Snapshot is marked invalid.");
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001108 }
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001109
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001110 if (!s->store->chunk_size) {
1111 ti->error = "Chunk size not set";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001112 goto bad_read_metadata;
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001113 }
Jonathan Brassowd0216842009-04-02 19:55:32 +01001114 ti->split_io = s->store->chunk_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 return 0;
1117
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001118bad_read_metadata:
1119 unregister_snapshot(s);
1120
Jonathan Brassowfee19982009-04-02 19:55:34 +01001121bad_load_and_register:
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001122 mempool_destroy(s->tracked_chunk_pool);
1123
Jonathan Brassowfee19982009-04-02 19:55:34 +01001124bad_tracked_chunk_pool:
Mikulas Patocka92e86812008-07-21 12:00:35 +01001125 mempool_destroy(s->pending_pool);
1126
Jonathan Brassowfee19982009-04-02 19:55:34 +01001127bad_pending_pool:
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001128 dm_kcopyd_client_destroy(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Jonathan Brassowfee19982009-04-02 19:55:34 +01001130bad_kcopyd:
Jon Brassow3510cb92009-12-10 23:52:11 +00001131 dm_exception_table_exit(&s->pending, pending_cache);
1132 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Jonathan Brassowfee19982009-04-02 19:55:34 +01001134bad_hash_tables:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 dm_put_device(ti, s->origin);
1136
Jonathan Brassowfee19982009-04-02 19:55:34 +01001137bad_origin:
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001138 dm_exception_store_destroy(s->store);
1139
1140bad_store:
1141 dm_put_device(ti, s->cow);
1142
1143bad_cow:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 kfree(s);
1145
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001146bad:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 return r;
1148}
1149
Milan Broz31c93a02006-12-08 02:41:11 -08001150static void __free_exceptions(struct dm_snapshot *s)
1151{
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001152 dm_kcopyd_client_destroy(s->kcopyd_client);
Milan Broz31c93a02006-12-08 02:41:11 -08001153 s->kcopyd_client = NULL;
1154
Jon Brassow3510cb92009-12-10 23:52:11 +00001155 dm_exception_table_exit(&s->pending, pending_cache);
1156 dm_exception_table_exit(&s->complete, exception_cache);
Milan Broz31c93a02006-12-08 02:41:11 -08001157}
1158
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001159static void __handover_exceptions(struct dm_snapshot *snap_src,
1160 struct dm_snapshot *snap_dest)
1161{
1162 union {
1163 struct dm_exception_table table_swap;
1164 struct dm_exception_store *store_swap;
1165 } u;
1166
1167 /*
1168 * Swap all snapshot context information between the two instances.
1169 */
1170 u.table_swap = snap_dest->complete;
1171 snap_dest->complete = snap_src->complete;
1172 snap_src->complete = u.table_swap;
1173
1174 u.store_swap = snap_dest->store;
1175 snap_dest->store = snap_src->store;
1176 snap_src->store = u.store_swap;
1177
1178 snap_dest->store->snap = snap_dest;
1179 snap_src->store->snap = snap_src;
1180
1181 snap_dest->ti->split_io = snap_dest->store->chunk_size;
1182 snap_dest->valid = snap_src->valid;
1183
1184 /*
1185 * Set source invalid to ensure it receives no further I/O.
1186 */
1187 snap_src->valid = 0;
1188}
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190static void snapshot_dtr(struct dm_target *ti)
1191{
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001192#ifdef CONFIG_DM_DEBUG
1193 int i;
1194#endif
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001195 struct dm_snapshot *s = ti->private;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001196 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001198 flush_workqueue(ksnapd);
1199
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001200 down_read(&_origins_lock);
1201 /* Check whether exception handover must be cancelled */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001202 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001203 if (snap_src && snap_dest && (s == snap_src)) {
1204 down_write(&snap_dest->lock);
1205 snap_dest->valid = 0;
1206 up_write(&snap_dest->lock);
1207 DMERR("Cancelling snapshot handover.");
1208 }
1209 up_read(&_origins_lock);
1210
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001211 if (dm_target_is_snapshot_merge(ti))
1212 stop_merge(s);
1213
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001214 /* Prevent further origin writes from using this snapshot. */
1215 /* After this returns there can be no new kcopyd jobs. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 unregister_snapshot(s);
1217
Mikulas Patocka879129d22008-10-30 13:33:16 +00001218 while (atomic_read(&s->pending_exceptions_count))
Mikulas Patocka90fa1522009-01-06 03:04:54 +00001219 msleep(1);
Mikulas Patocka879129d22008-10-30 13:33:16 +00001220 /*
1221 * Ensure instructions in mempool_destroy aren't reordered
1222 * before atomic_read.
1223 */
1224 smp_mb();
1225
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001226#ifdef CONFIG_DM_DEBUG
1227 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1228 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1229#endif
1230
1231 mempool_destroy(s->tracked_chunk_pool);
1232
Milan Broz31c93a02006-12-08 02:41:11 -08001233 __free_exceptions(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Mikulas Patocka92e86812008-07-21 12:00:35 +01001235 mempool_destroy(s->pending_pool);
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 dm_put_device(ti, s->origin);
Jonathan Brassowfee19982009-04-02 19:55:34 +01001238
1239 dm_exception_store_destroy(s->store);
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001240
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001241 dm_put_device(ti, s->cow);
1242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 kfree(s);
1244}
1245
1246/*
1247 * Flush a list of buffers.
1248 */
1249static void flush_bios(struct bio *bio)
1250{
1251 struct bio *n;
1252
1253 while (bio) {
1254 n = bio->bi_next;
1255 bio->bi_next = NULL;
1256 generic_make_request(bio);
1257 bio = n;
1258 }
1259}
1260
David Howellsc4028952006-11-22 14:57:56 +00001261static void flush_queued_bios(struct work_struct *work)
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001262{
David Howellsc4028952006-11-22 14:57:56 +00001263 struct dm_snapshot *s =
1264 container_of(work, struct dm_snapshot, queued_bios_work);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001265 struct bio *queued_bios;
1266 unsigned long flags;
1267
1268 spin_lock_irqsave(&s->pe_lock, flags);
1269 queued_bios = bio_list_get(&s->queued_bios);
1270 spin_unlock_irqrestore(&s->pe_lock, flags);
1271
1272 flush_bios(queued_bios);
1273}
1274
Mikulas Patocka515ad662009-12-10 23:52:30 +00001275static int do_origin(struct dm_dev *origin, struct bio *bio);
1276
1277/*
1278 * Flush a list of buffers.
1279 */
1280static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1281{
1282 struct bio *n;
1283 int r;
1284
1285 while (bio) {
1286 n = bio->bi_next;
1287 bio->bi_next = NULL;
1288 r = do_origin(s->origin, bio);
1289 if (r == DM_MAPIO_REMAPPED)
1290 generic_make_request(bio);
1291 bio = n;
1292 }
1293}
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295/*
1296 * Error a list of buffers.
1297 */
1298static void error_bios(struct bio *bio)
1299{
1300 struct bio *n;
1301
1302 while (bio) {
1303 n = bio->bi_next;
1304 bio->bi_next = NULL;
NeilBrown6712ecf2007-09-27 12:47:43 +02001305 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 bio = n;
1307 }
1308}
1309
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001310static void __invalidate_snapshot(struct dm_snapshot *s, int err)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001311{
1312 if (!s->valid)
1313 return;
1314
1315 if (err == -EIO)
1316 DMERR("Invalidating snapshot: Error reading/writing.");
1317 else if (err == -ENOMEM)
1318 DMERR("Invalidating snapshot: Unable to allocate exception.");
1319
Jonathan Brassow493df712009-04-02 19:55:31 +01001320 if (s->store->type->drop_snapshot)
1321 s->store->type->drop_snapshot(s->store);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001322
1323 s->valid = 0;
1324
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001325 dm_table_event(s->ti->table);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001326}
1327
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001328static void pending_complete(struct dm_snap_pending_exception *pe, int success)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329{
Jon Brassow1d4989c2009-12-10 23:52:10 +00001330 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 struct dm_snapshot *s = pe->snap;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001332 struct bio *origin_bios = NULL;
1333 struct bio *snapshot_bios = NULL;
1334 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001336 if (!success) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 /* Read/write error - snapshot is unusable */
1338 down_write(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001339 __invalidate_snapshot(s, -EIO);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001340 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001341 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 }
1343
Jon Brassow3510cb92009-12-10 23:52:11 +00001344 e = alloc_completed_exception();
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001345 if (!e) {
1346 down_write(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001347 __invalidate_snapshot(s, -ENOMEM);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001348 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001349 goto out;
1350 }
1351 *e = pe->e;
1352
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001353 down_write(&s->lock);
1354 if (!s->valid) {
Jon Brassow3510cb92009-12-10 23:52:11 +00001355 free_completed_exception(e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001356 error = 1;
1357 goto out;
1358 }
1359
Mike Snitzer615d1eb2009-12-10 23:52:29 +00001360 /* Check for conflicting reads */
1361 __check_for_conflicting_io(s, pe->e.old_chunk);
Mikulas Patockaa8d41b52008-07-21 12:00:34 +01001362
1363 /*
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001364 * Add a proper exception, and remove the
1365 * in-flight exception from the list.
1366 */
Jon Brassow3510cb92009-12-10 23:52:11 +00001367 dm_insert_exception(&s->complete, e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 out:
Jon Brassow3510cb92009-12-10 23:52:11 +00001370 dm_remove_exception(&pe->e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001371 snapshot_bios = bio_list_get(&pe->snapshot_bios);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001372 origin_bios = bio_list_get(&pe->origin_bios);
1373 free_pending_exception(pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001375 up_write(&s->lock);
1376
1377 /* Submit any pending write bios */
1378 if (error)
1379 error_bios(snapshot_bios);
1380 else
1381 flush_bios(snapshot_bios);
1382
Mikulas Patocka515ad662009-12-10 23:52:30 +00001383 retry_origin_bios(s, origin_bios);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384}
1385
1386static void commit_callback(void *context, int success)
1387{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001388 struct dm_snap_pending_exception *pe = context;
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 pending_complete(pe, success);
1391}
1392
1393/*
1394 * Called when the copy I/O has finished. kcopyd actually runs
1395 * this code so don't block.
1396 */
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -07001397static void copy_callback(int read_err, unsigned long write_err, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001399 struct dm_snap_pending_exception *pe = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 struct dm_snapshot *s = pe->snap;
1401
1402 if (read_err || write_err)
1403 pending_complete(pe, 0);
1404
1405 else
1406 /* Update the metadata if we are persistent */
Jonathan Brassow493df712009-04-02 19:55:31 +01001407 s->store->type->commit_exception(s->store, &pe->e,
1408 commit_callback, pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409}
1410
1411/*
1412 * Dispatches the copy operation to kcopyd.
1413 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001414static void start_copy(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
1416 struct dm_snapshot *s = pe->snap;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +01001417 struct dm_io_region src, dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 struct block_device *bdev = s->origin->bdev;
1419 sector_t dev_size;
1420
1421 dev_size = get_dev_size(bdev);
1422
1423 src.bdev = bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001424 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
Mikulas Patockadf96eee2009-10-16 23:18:17 +01001425 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001427 dest.bdev = s->cow->bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001428 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 dest.count = src.count;
1430
1431 /* Hand over to kcopyd */
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001432 dm_kcopyd_copy(s->kcopyd_client,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 &src, 1, &dest, 0, copy_callback, pe);
1434}
1435
Mikulas Patocka29138082009-04-02 19:55:25 +01001436static struct dm_snap_pending_exception *
1437__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1438{
Jon Brassow3510cb92009-12-10 23:52:11 +00001439 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001440
1441 if (!e)
1442 return NULL;
1443
1444 return container_of(e, struct dm_snap_pending_exception, e);
1445}
1446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447/*
1448 * Looks to see if this snapshot already has a pending exception
1449 * for this chunk, otherwise it allocates a new one and inserts
1450 * it into the pending table.
1451 *
1452 * NOTE: a write lock must be held on snap->lock before calling
1453 * this.
1454 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001455static struct dm_snap_pending_exception *
Mikulas Patockac6621392009-04-02 19:55:25 +01001456__find_pending_exception(struct dm_snapshot *s,
1457 struct dm_snap_pending_exception *pe, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458{
Mikulas Patockac6621392009-04-02 19:55:25 +01001459 struct dm_snap_pending_exception *pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001460
Mikulas Patocka29138082009-04-02 19:55:25 +01001461 pe2 = __lookup_pending_exception(s, chunk);
1462 if (pe2) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001463 free_pending_exception(pe);
Mikulas Patocka29138082009-04-02 19:55:25 +01001464 return pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001465 }
1466
1467 pe->e.old_chunk = chunk;
1468 bio_list_init(&pe->origin_bios);
1469 bio_list_init(&pe->snapshot_bios);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001470 pe->started = 0;
1471
Jonathan Brassow493df712009-04-02 19:55:31 +01001472 if (s->store->type->prepare_exception(s->store, &pe->e)) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001473 free_pending_exception(pe);
1474 return NULL;
1475 }
1476
Jon Brassow3510cb92009-12-10 23:52:11 +00001477 dm_insert_exception(&s->pending, &pe->e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return pe;
1480}
1481
Jon Brassow1d4989c2009-12-10 23:52:10 +00001482static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
Milan Brozd74f81f2008-02-08 02:11:27 +00001483 struct bio *bio, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001485 bio->bi_bdev = s->cow->bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001486 bio->bi_sector = chunk_to_sector(s->store,
1487 dm_chunk_number(e->new_chunk) +
1488 (chunk - e->old_chunk)) +
1489 (bio->bi_sector &
1490 s->store->chunk_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491}
1492
1493static int snapshot_map(struct dm_target *ti, struct bio *bio,
1494 union map_info *map_context)
1495{
Jon Brassow1d4989c2009-12-10 23:52:10 +00001496 struct dm_exception *e;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001497 struct dm_snapshot *s = ti->private;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001498 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 chunk_t chunk;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001500 struct dm_snap_pending_exception *pe = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001502 if (unlikely(bio_empty_barrier(bio))) {
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001503 bio->bi_bdev = s->cow->bdev;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001504 return DM_MAPIO_REMAPPED;
1505 }
1506
Jonathan Brassow71fab002009-04-02 19:55:33 +01001507 chunk = sector_to_chunk(s->store, bio->bi_sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
1509 /* Full snapshots are not usable */
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001510 /* To get here the table must be live so s->active is always set. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 if (!s->valid)
Alasdair G Kergonf6a80ea2005-07-12 15:53:01 -07001512 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001514 /* FIXME: should only take write lock if we need
1515 * to copy an exception */
1516 down_write(&s->lock);
1517
1518 if (!s->valid) {
1519 r = -EIO;
1520 goto out_unlock;
1521 }
1522
1523 /* If the block is already remapped - use that, else remap it */
Jon Brassow3510cb92009-12-10 23:52:11 +00001524 e = dm_lookup_exception(&s->complete, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001525 if (e) {
Milan Brozd74f81f2008-02-08 02:11:27 +00001526 remap_exception(s, e, bio, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001527 goto out_unlock;
1528 }
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 /*
1531 * Write to snapshot - higher level takes care of RW/RO
1532 * flags so we should only get this if we are
1533 * writeable.
1534 */
1535 if (bio_rw(bio) == WRITE) {
Mikulas Patocka29138082009-04-02 19:55:25 +01001536 pe = __lookup_pending_exception(s, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001537 if (!pe) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001538 up_write(&s->lock);
1539 pe = alloc_pending_exception(s);
1540 down_write(&s->lock);
1541
1542 if (!s->valid) {
1543 free_pending_exception(pe);
1544 r = -EIO;
1545 goto out_unlock;
1546 }
1547
Jon Brassow3510cb92009-12-10 23:52:11 +00001548 e = dm_lookup_exception(&s->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01001549 if (e) {
1550 free_pending_exception(pe);
1551 remap_exception(s, e, bio, chunk);
1552 goto out_unlock;
1553 }
1554
Mikulas Patockac6621392009-04-02 19:55:25 +01001555 pe = __find_pending_exception(s, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001556 if (!pe) {
1557 __invalidate_snapshot(s, -ENOMEM);
1558 r = -EIO;
1559 goto out_unlock;
1560 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001561 }
1562
Milan Brozd74f81f2008-02-08 02:11:27 +00001563 remap_exception(s, &pe->e, bio, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001564 bio_list_add(&pe->snapshot_bios, bio);
1565
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001566 r = DM_MAPIO_SUBMITTED;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001567
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001568 if (!pe->started) {
1569 /* this is protected by snap->lock */
1570 pe->started = 1;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001571 up_write(&s->lock);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001572 start_copy(pe);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001573 goto out;
1574 }
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001575 } else {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001576 bio->bi_bdev = s->origin->bdev;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001577 map_context->ptr = track_chunk(s, chunk);
1578 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001580 out_unlock:
1581 up_write(&s->lock);
1582 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return r;
1584}
1585
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001586/*
1587 * A snapshot-merge target behaves like a combination of a snapshot
1588 * target and a snapshot-origin target. It only generates new
1589 * exceptions in other snapshots and not in the one that is being
1590 * merged.
1591 *
1592 * For each chunk, if there is an existing exception, it is used to
1593 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1594 * which in turn might generate exceptions in other snapshots.
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001595 * If merging is currently taking place on the chunk in question, the
1596 * I/O is deferred by adding it to s->bios_queued_during_merge.
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001597 */
1598static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1599 union map_info *map_context)
1600{
1601 struct dm_exception *e;
1602 struct dm_snapshot *s = ti->private;
1603 int r = DM_MAPIO_REMAPPED;
1604 chunk_t chunk;
1605
Mike Snitzer10b81062009-12-10 23:52:31 +00001606 if (unlikely(bio_empty_barrier(bio))) {
1607 if (!map_context->flush_request)
1608 bio->bi_bdev = s->origin->bdev;
1609 else
1610 bio->bi_bdev = s->cow->bdev;
1611 map_context->ptr = NULL;
1612 return DM_MAPIO_REMAPPED;
1613 }
1614
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001615 chunk = sector_to_chunk(s->store, bio->bi_sector);
1616
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001617 down_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001618
1619 /* Full snapshots are not usable */
1620 if (!s->valid) {
1621 r = -EIO;
1622 goto out_unlock;
1623 }
1624
1625 /* If the block is already remapped - use that */
1626 e = dm_lookup_exception(&s->complete, chunk);
1627 if (e) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001628 /* Queue writes overlapping with chunks being merged */
1629 if (bio_rw(bio) == WRITE &&
1630 chunk >= s->first_merging_chunk &&
1631 chunk < (s->first_merging_chunk +
1632 s->num_merging_chunks)) {
1633 bio->bi_bdev = s->origin->bdev;
1634 bio_list_add(&s->bios_queued_during_merge, bio);
1635 r = DM_MAPIO_SUBMITTED;
1636 goto out_unlock;
1637 }
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001638
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001639 remap_exception(s, e, bio, chunk);
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001640
1641 if (bio_rw(bio) == WRITE)
1642 map_context->ptr = track_chunk(s, chunk);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001643 goto out_unlock;
1644 }
1645
1646 bio->bi_bdev = s->origin->bdev;
1647
1648 if (bio_rw(bio) == WRITE) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001649 up_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001650 return do_origin(s->origin, bio);
1651 }
1652
1653out_unlock:
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001654 up_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001655
1656 return r;
1657}
1658
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001659static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1660 int error, union map_info *map_context)
1661{
1662 struct dm_snapshot *s = ti->private;
1663 struct dm_snap_tracked_chunk *c = map_context->ptr;
1664
1665 if (c)
1666 stop_tracking_chunk(s, c);
1667
1668 return 0;
1669}
1670
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001671static void snapshot_merge_presuspend(struct dm_target *ti)
1672{
1673 struct dm_snapshot *s = ti->private;
1674
1675 stop_merge(s);
1676}
1677
Mike Snitzerc26655c2009-12-10 23:52:12 +00001678static void snapshot_postsuspend(struct dm_target *ti)
1679{
1680 struct dm_snapshot *s = ti->private;
1681
1682 down_write(&s->lock);
1683 s->suspended = 1;
1684 up_write(&s->lock);
1685}
1686
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001687static int snapshot_preresume(struct dm_target *ti)
1688{
1689 int r = 0;
1690 struct dm_snapshot *s = ti->private;
1691 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1692
1693 down_read(&_origins_lock);
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001694 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001695 if (snap_src && snap_dest) {
1696 down_read(&snap_src->lock);
1697 if (s == snap_src) {
1698 DMERR("Unable to resume snapshot source until "
1699 "handover completes.");
1700 r = -EINVAL;
1701 } else if (!snap_src->suspended) {
1702 DMERR("Unable to perform snapshot handover until "
1703 "source is suspended.");
1704 r = -EINVAL;
1705 }
1706 up_read(&snap_src->lock);
1707 }
1708 up_read(&_origins_lock);
1709
1710 return r;
1711}
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713static void snapshot_resume(struct dm_target *ti)
1714{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001715 struct dm_snapshot *s = ti->private;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001716 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1717
1718 down_read(&_origins_lock);
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001719 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001720 if (snap_src && snap_dest) {
1721 down_write(&snap_src->lock);
1722 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1723 __handover_exceptions(snap_src, snap_dest);
1724 up_write(&snap_dest->lock);
1725 up_write(&snap_src->lock);
1726 }
1727 up_read(&_origins_lock);
1728
1729 /* Now we have correct chunk size, reregister */
1730 reregister_snapshot(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001732 down_write(&s->lock);
1733 s->active = 1;
Mike Snitzerc26655c2009-12-10 23:52:12 +00001734 s->suspended = 0;
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001735 up_write(&s->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736}
1737
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001738static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1739{
1740 sector_t min_chunksize;
1741
1742 down_read(&_origins_lock);
1743 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1744 up_read(&_origins_lock);
1745
1746 return min_chunksize;
1747}
1748
1749static void snapshot_merge_resume(struct dm_target *ti)
1750{
1751 struct dm_snapshot *s = ti->private;
1752
1753 /*
1754 * Handover exceptions from existing snapshot.
1755 */
1756 snapshot_resume(ti);
1757
1758 /*
1759 * snapshot-merge acts as an origin, so set ti->split_io
1760 */
1761 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1762
1763 start_merge(s);
1764}
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766static int snapshot_status(struct dm_target *ti, status_type_t type,
1767 char *result, unsigned int maxlen)
1768{
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001769 unsigned sz = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001770 struct dm_snapshot *snap = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 switch (type) {
1773 case STATUSTYPE_INFO:
Mikulas Patocka94e765722009-12-10 23:51:53 +00001774
1775 down_write(&snap->lock);
1776
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 if (!snap->valid)
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001778 DMEMIT("Invalid");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 else {
Mike Snitzer985903b2009-12-10 23:52:11 +00001780 if (snap->store->type->usage) {
1781 sector_t total_sectors, sectors_allocated,
1782 metadata_sectors;
1783 snap->store->type->usage(snap->store,
1784 &total_sectors,
1785 &sectors_allocated,
1786 &metadata_sectors);
1787 DMEMIT("%llu/%llu %llu",
1788 (unsigned long long)sectors_allocated,
1789 (unsigned long long)total_sectors,
1790 (unsigned long long)metadata_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 }
1792 else
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001793 DMEMIT("Unknown");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 }
Mikulas Patocka94e765722009-12-10 23:51:53 +00001795
1796 up_write(&snap->lock);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 break;
1799
1800 case STATUSTYPE_TABLE:
1801 /*
1802 * kdevname returns a static pointer so we need
1803 * to make private copies if the output is to
1804 * make sense.
1805 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001806 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
Jonathan Brassow1e302a92009-04-02 19:55:35 +01001807 snap->store->type->status(snap->store, type, result + sz,
1808 maxlen - sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 break;
1810 }
1811
1812 return 0;
1813}
1814
Mike Snitzer8811f462009-09-04 20:40:19 +01001815static int snapshot_iterate_devices(struct dm_target *ti,
1816 iterate_devices_callout_fn fn, void *data)
1817{
1818 struct dm_snapshot *snap = ti->private;
1819
1820 return fn(ti, snap->origin, 0, ti->len, data);
1821}
1822
1823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824/*-----------------------------------------------------------------
1825 * Origin methods
1826 *---------------------------------------------------------------*/
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00001827
1828/*
1829 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1830 * supplied bio was ignored. The caller may submit it immediately.
1831 * (No remapping actually occurs as the origin is always a direct linear
1832 * map.)
1833 *
1834 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1835 * and any supplied bio is added to a list to be submitted once all
1836 * the necessary exceptions exist.
1837 */
1838static int __origin_write(struct list_head *snapshots, sector_t sector,
1839 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840{
Mikulas Patocka515ad662009-12-10 23:52:30 +00001841 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 struct dm_snapshot *snap;
Jon Brassow1d4989c2009-12-10 23:52:10 +00001843 struct dm_exception *e;
Mikulas Patocka515ad662009-12-10 23:52:30 +00001844 struct dm_snap_pending_exception *pe;
1845 struct dm_snap_pending_exception *pe_to_start_now = NULL;
1846 struct dm_snap_pending_exception *pe_to_start_last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 chunk_t chunk;
1848
1849 /* Do all the snapshots on this origin */
1850 list_for_each_entry (snap, snapshots, list) {
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001851 /*
1852 * Don't make new exceptions in a merging snapshot
1853 * because it has effectively been deleted
1854 */
1855 if (dm_target_is_snapshot_merge(snap->ti))
1856 continue;
1857
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001858 down_write(&snap->lock);
1859
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001860 /* Only deal with valid and active snapshots */
1861 if (!snap->valid || !snap->active)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001862 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Alasdair G Kergond5e404c2005-07-12 15:53:05 -07001864 /* Nothing to do if writing beyond end of snapshot */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00001865 if (sector >= dm_table_get_size(snap->ti->table))
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001866 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
1868 /*
1869 * Remember, different snapshots can have
1870 * different chunk sizes.
1871 */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00001872 chunk = sector_to_chunk(snap->store, sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873
1874 /*
1875 * Check exception table to see if block
1876 * is already remapped in this snapshot
1877 * and trigger an exception if not.
1878 */
Jon Brassow3510cb92009-12-10 23:52:11 +00001879 e = dm_lookup_exception(&snap->complete, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001880 if (e)
1881 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
Mikulas Patocka29138082009-04-02 19:55:25 +01001883 pe = __lookup_pending_exception(snap, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001884 if (!pe) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001885 up_write(&snap->lock);
1886 pe = alloc_pending_exception(snap);
1887 down_write(&snap->lock);
1888
1889 if (!snap->valid) {
1890 free_pending_exception(pe);
1891 goto next_snapshot;
1892 }
1893
Jon Brassow3510cb92009-12-10 23:52:11 +00001894 e = dm_lookup_exception(&snap->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01001895 if (e) {
1896 free_pending_exception(pe);
1897 goto next_snapshot;
1898 }
1899
Mikulas Patockac6621392009-04-02 19:55:25 +01001900 pe = __find_pending_exception(snap, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001901 if (!pe) {
1902 __invalidate_snapshot(snap, -ENOMEM);
1903 goto next_snapshot;
1904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
1906
Mikulas Patocka515ad662009-12-10 23:52:30 +00001907 r = DM_MAPIO_SUBMITTED;
1908
1909 /*
1910 * If an origin bio was supplied, queue it to wait for the
1911 * completion of this exception, and start this one last,
1912 * at the end of the function.
1913 */
1914 if (bio) {
1915 bio_list_add(&pe->origin_bios, bio);
1916 bio = NULL;
1917
1918 if (!pe->started) {
1919 pe->started = 1;
1920 pe_to_start_last = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001921 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001922 }
1923
1924 if (!pe->started) {
1925 pe->started = 1;
Mikulas Patocka515ad662009-12-10 23:52:30 +00001926 pe_to_start_now = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001927 }
1928
1929 next_snapshot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 up_write(&snap->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Mikulas Patocka515ad662009-12-10 23:52:30 +00001932 if (pe_to_start_now) {
1933 start_copy(pe_to_start_now);
1934 pe_to_start_now = NULL;
1935 }
Alasdair G Kergonb4b610f2006-03-27 01:17:44 -08001936 }
1937
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 /*
Mikulas Patocka515ad662009-12-10 23:52:30 +00001939 * Submit the exception against which the bio is queued last,
1940 * to give the other exceptions a head start.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 */
Mikulas Patocka515ad662009-12-10 23:52:30 +00001942 if (pe_to_start_last)
1943 start_copy(pe_to_start_last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
1945 return r;
1946}
1947
1948/*
1949 * Called on a write from the origin driver.
1950 */
1951static int do_origin(struct dm_dev *origin, struct bio *bio)
1952{
1953 struct origin *o;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001954 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
1956 down_read(&_origins_lock);
1957 o = __lookup_origin(origin->bdev);
1958 if (o)
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00001959 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 up_read(&_origins_lock);
1961
1962 return r;
1963}
1964
1965/*
1966 * Origin: maps a linear range of a device, with hooks for snapshotting.
1967 */
1968
1969/*
1970 * Construct an origin mapping: <dev_path>
1971 * The context for an origin is merely a 'struct dm_dev *'
1972 * pointing to the real device.
1973 */
1974static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1975{
1976 int r;
1977 struct dm_dev *dev;
1978
1979 if (argc != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001980 ti->error = "origin: incorrect number of arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 return -EINVAL;
1982 }
1983
1984 r = dm_get_device(ti, argv[0], 0, ti->len,
1985 dm_table_get_mode(ti->table), &dev);
1986 if (r) {
1987 ti->error = "Cannot get target device";
1988 return r;
1989 }
1990
1991 ti->private = dev;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001992 ti->num_flush_requests = 1;
1993
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 return 0;
1995}
1996
1997static void origin_dtr(struct dm_target *ti)
1998{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001999 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 dm_put_device(ti, dev);
2001}
2002
2003static int origin_map(struct dm_target *ti, struct bio *bio,
2004 union map_info *map_context)
2005{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002006 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 bio->bi_bdev = dev->bdev;
2008
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002009 if (unlikely(bio_empty_barrier(bio)))
2010 return DM_MAPIO_REMAPPED;
2011
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 /* Only tell snapshots if this is a write */
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08002013 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014}
2015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016/*
2017 * Set the target "split_io" field to the minimum of all the snapshots'
2018 * chunk sizes.
2019 */
2020static void origin_resume(struct dm_target *ti)
2021{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002022 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002024 ti->split_io = get_origin_minimum_chunksize(dev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025}
2026
2027static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2028 unsigned int maxlen)
2029{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002030 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
2032 switch (type) {
2033 case STATUSTYPE_INFO:
2034 result[0] = '\0';
2035 break;
2036
2037 case STATUSTYPE_TABLE:
2038 snprintf(result, maxlen, "%s", dev->name);
2039 break;
2040 }
2041
2042 return 0;
2043}
2044
Mike Snitzer8811f462009-09-04 20:40:19 +01002045static int origin_iterate_devices(struct dm_target *ti,
2046 iterate_devices_callout_fn fn, void *data)
2047{
2048 struct dm_dev *dev = ti->private;
2049
2050 return fn(ti, dev, 0, ti->len, data);
2051}
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053static struct target_type origin_target = {
2054 .name = "snapshot-origin",
Mike Snitzer8811f462009-09-04 20:40:19 +01002055 .version = {1, 7, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 .module = THIS_MODULE,
2057 .ctr = origin_ctr,
2058 .dtr = origin_dtr,
2059 .map = origin_map,
2060 .resume = origin_resume,
2061 .status = origin_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002062 .iterate_devices = origin_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063};
2064
2065static struct target_type snapshot_target = {
2066 .name = "snapshot",
Mike Snitzerc26655c2009-12-10 23:52:12 +00002067 .version = {1, 9, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 .module = THIS_MODULE,
2069 .ctr = snapshot_ctr,
2070 .dtr = snapshot_dtr,
2071 .map = snapshot_map,
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002072 .end_io = snapshot_end_io,
Mike Snitzerc26655c2009-12-10 23:52:12 +00002073 .postsuspend = snapshot_postsuspend,
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002074 .preresume = snapshot_preresume,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 .resume = snapshot_resume,
2076 .status = snapshot_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002077 .iterate_devices = snapshot_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078};
2079
Mikulas Patockad698aa42009-12-10 23:52:30 +00002080static struct target_type merge_target = {
2081 .name = dm_snapshot_merge_target_name,
2082 .version = {1, 0, 0},
2083 .module = THIS_MODULE,
2084 .ctr = snapshot_ctr,
2085 .dtr = snapshot_dtr,
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002086 .map = snapshot_merge_map,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002087 .end_io = snapshot_end_io,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002088 .presuspend = snapshot_merge_presuspend,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002089 .postsuspend = snapshot_postsuspend,
2090 .preresume = snapshot_preresume,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002091 .resume = snapshot_merge_resume,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002092 .status = snapshot_status,
2093 .iterate_devices = snapshot_iterate_devices,
2094};
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096static int __init dm_snapshot_init(void)
2097{
2098 int r;
2099
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002100 r = dm_exception_store_init();
2101 if (r) {
2102 DMERR("Failed to initialize exception stores");
2103 return r;
2104 }
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 r = dm_register_target(&snapshot_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002107 if (r < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 DMERR("snapshot target register failed %d", r);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002109 goto bad_register_snapshot_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 }
2111
2112 r = dm_register_target(&origin_target);
2113 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002114 DMERR("Origin target register failed %d", r);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002115 goto bad_register_origin_target;
2116 }
2117
2118 r = dm_register_target(&merge_target);
2119 if (r < 0) {
2120 DMERR("Merge target register failed %d", r);
2121 goto bad_register_merge_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 }
2123
2124 r = init_origin_hash();
2125 if (r) {
2126 DMERR("init_origin_hash failed.");
Mikulas Patockad698aa42009-12-10 23:52:30 +00002127 goto bad_origin_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 }
2129
Jon Brassow1d4989c2009-12-10 23:52:10 +00002130 exception_cache = KMEM_CACHE(dm_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 if (!exception_cache) {
2132 DMERR("Couldn't create exception cache.");
2133 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002134 goto bad_exception_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
2136
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002137 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 if (!pending_cache) {
2139 DMERR("Couldn't create pending cache.");
2140 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002141 goto bad_pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 }
2143
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002144 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2145 if (!tracked_chunk_cache) {
2146 DMERR("Couldn't create cache to track chunks in use.");
2147 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002148 goto bad_tracked_chunk_cache;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002149 }
2150
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07002151 ksnapd = create_singlethread_workqueue("ksnapd");
2152 if (!ksnapd) {
2153 DMERR("Failed to create ksnapd workqueue.");
2154 r = -ENOMEM;
Mikulas Patocka92e86812008-07-21 12:00:35 +01002155 goto bad_pending_pool;
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07002156 }
2157
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 return 0;
2159
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002160bad_pending_pool:
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002161 kmem_cache_destroy(tracked_chunk_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002162bad_tracked_chunk_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 kmem_cache_destroy(pending_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002164bad_pending_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 kmem_cache_destroy(exception_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002166bad_exception_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 exit_origin_hash();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002168bad_origin_hash:
2169 dm_unregister_target(&merge_target);
2170bad_register_merge_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002172bad_register_origin_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 dm_unregister_target(&snapshot_target);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002174bad_register_snapshot_target:
2175 dm_exception_store_exit();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002176
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 return r;
2178}
2179
2180static void __exit dm_snapshot_exit(void)
2181{
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07002182 destroy_workqueue(ksnapd);
2183
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002184 dm_unregister_target(&snapshot_target);
2185 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002186 dm_unregister_target(&merge_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
2188 exit_origin_hash();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 kmem_cache_destroy(pending_cache);
2190 kmem_cache_destroy(exception_cache);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002191 kmem_cache_destroy(tracked_chunk_cache);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002192
2193 dm_exception_store_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194}
2195
2196/* Module hooks */
2197module_init(dm_snapshot_init);
2198module_exit(dm_snapshot_exit);
2199
2200MODULE_DESCRIPTION(DM_NAME " snapshot target");
2201MODULE_AUTHOR("Joe Thornber");
2202MODULE_LICENSE("GPL");