blob: 1498704467a7e12dcf33da9dd10ecd020e35efe5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/device-mapper.h>
Mikulas Patocka90fa1522009-01-06 03:04:54 +000011#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010020#include <linux/log2.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010021#include <linux/dm-kcopyd.h>
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010022#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Jonathan Brassowaea53d92009-01-06 03:05:15 +000024#include "dm-exception-store.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "snapshots"
27
Mikulas Patockad698aa42009-12-10 23:52:30 +000028static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29
30#define dm_target_is_snapshot_merge(ti) \
31 ((ti)->type->name == dm_snapshot_merge_target_name)
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*
34 * The percentage increment we will wake up users at
35 */
36#define WAKE_UP_PERCENT 5
37
38/*
39 * kcopyd priority of snapshot operations
40 */
41#define SNAPSHOT_COPY_PRIORITY 2
42
43/*
Milan Broz8ee27672008-04-24 21:42:36 +010044 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 */
Milan Broz8ee27672008-04-24 21:42:36 +010046#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Mikulas Patockacd45daf2008-07-21 12:00:32 +010048/*
49 * The size of the mempool used to track chunks in use.
50 */
51#define MIN_IOS 256
52
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010053#define DM_TRACKED_CHUNK_HASH_SIZE 16
54#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
55 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
56
Jon Brassow191437a2009-12-10 23:52:10 +000057struct dm_exception_table {
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010058 uint32_t hash_mask;
59 unsigned hash_shift;
60 struct list_head *table;
61};
62
63struct dm_snapshot {
64 struct rw_semaphore lock;
65
66 struct dm_dev *origin;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +000067 struct dm_dev *cow;
68
69 struct dm_target *ti;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010070
71 /* List of snapshots per Origin */
72 struct list_head list;
73
74 /* You can't use a snapshot if this is 0 (e.g. if full) */
75 int valid;
76
77 /* Origin writes don't trigger exceptions until this is set */
78 int active;
79
Mike Snitzerc26655c2009-12-10 23:52:12 +000080 /* Whether or not owning mapped_device is suspended */
81 int suspended;
82
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010083 mempool_t *pending_pool;
84
85 atomic_t pending_exceptions_count;
86
Jon Brassow191437a2009-12-10 23:52:10 +000087 struct dm_exception_table pending;
88 struct dm_exception_table complete;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010089
90 /*
91 * pe_lock protects all pending_exception operations and access
92 * as well as the snapshot_bios list.
93 */
94 spinlock_t pe_lock;
95
96 /* The on disk metadata handler */
97 struct dm_exception_store *store;
98
99 struct dm_kcopyd_client *kcopyd_client;
100
101 /* Queue of snapshot writes for ksnapd to flush */
102 struct bio_list queued_bios;
103 struct work_struct queued_bios_work;
104
105 /* Chunks with outstanding reads */
106 mempool_t *tracked_chunk_pool;
107 spinlock_t tracked_chunk_lock;
108 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000109
110 /* Wait for events based on state_bits */
111 unsigned long state_bits;
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000112
113 /* Range of chunks currently being merged. */
114 chunk_t first_merging_chunk;
115 int num_merging_chunks;
116
117 /*
118 * Incoming bios that overlap with chunks being merged must wait
119 * for them to be committed.
120 */
121 struct bio_list bios_queued_during_merge;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100122};
123
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000124/*
125 * state_bits:
126 * RUNNING_MERGE - Merge operation is in progress.
127 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
128 * cleared afterwards.
129 */
130#define RUNNING_MERGE 0
131#define SHUTDOWN_MERGE 1
132
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000133struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
134{
135 return s->cow;
136}
137EXPORT_SYMBOL(dm_snap_cow);
138
Adrian Bunkc642f9e2006-12-08 02:41:13 -0800139static struct workqueue_struct *ksnapd;
David Howellsc4028952006-11-22 14:57:56 +0000140static void flush_queued_bios(struct work_struct *work);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -0700141
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100142static sector_t chunk_to_sector(struct dm_exception_store *store,
143 chunk_t chunk)
144{
145 return chunk << store->chunk_shift;
146}
147
148static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
149{
150 /*
151 * There is only ever one instance of a particular block
152 * device so we can compare pointers safely.
153 */
154 return lhs == rhs;
155}
156
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100157struct dm_snap_pending_exception {
Jon Brassow1d4989c2009-12-10 23:52:10 +0000158 struct dm_exception e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 /*
161 * Origin buffers waiting for this to complete are held
162 * in a bio list
163 */
164 struct bio_list origin_bios;
165 struct bio_list snapshot_bios;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 /* Pointer back to snapshot context */
168 struct dm_snapshot *snap;
169
170 /*
171 * 1 indicates the exception has already been sent to
172 * kcopyd.
173 */
174 int started;
175};
176
177/*
178 * Hash table mapping origin volumes to lists of snapshots and
179 * a lock to protect it
180 */
Christoph Lametere18b8902006-12-06 20:33:20 -0800181static struct kmem_cache *exception_cache;
182static struct kmem_cache *pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100184struct dm_snap_tracked_chunk {
185 struct hlist_node node;
186 chunk_t chunk;
187};
188
189static struct kmem_cache *tracked_chunk_cache;
190
191static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
192 chunk_t chunk)
193{
194 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
195 GFP_NOIO);
196 unsigned long flags;
197
198 c->chunk = chunk;
199
200 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
201 hlist_add_head(&c->node,
202 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
203 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
204
205 return c;
206}
207
208static void stop_tracking_chunk(struct dm_snapshot *s,
209 struct dm_snap_tracked_chunk *c)
210{
211 unsigned long flags;
212
213 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
214 hlist_del(&c->node);
215 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
216
217 mempool_free(c, s->tracked_chunk_pool);
218}
219
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100220static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
221{
222 struct dm_snap_tracked_chunk *c;
223 struct hlist_node *hn;
224 int found = 0;
225
226 spin_lock_irq(&s->tracked_chunk_lock);
227
228 hlist_for_each_entry(c, hn,
229 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
230 if (c->chunk == chunk) {
231 found = 1;
232 break;
233 }
234 }
235
236 spin_unlock_irq(&s->tracked_chunk_lock);
237
238 return found;
239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241/*
Mike Snitzer615d1eb2009-12-10 23:52:29 +0000242 * This conflicting I/O is extremely improbable in the caller,
243 * so msleep(1) is sufficient and there is no need for a wait queue.
244 */
245static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
246{
247 while (__chunk_is_tracked(s, chunk))
248 msleep(1);
249}
250
251/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 * One of these per registered origin, held in the snapshot_origins hash
253 */
254struct origin {
255 /* The origin device */
256 struct block_device *bdev;
257
258 struct list_head hash_list;
259
260 /* List of snapshots for this origin */
261 struct list_head snapshots;
262};
263
264/*
265 * Size of the hash table for origin volumes. If we make this
266 * the size of the minors list then it should be nearly perfect
267 */
268#define ORIGIN_HASH_SIZE 256
269#define ORIGIN_MASK 0xFF
270static struct list_head *_origins;
271static struct rw_semaphore _origins_lock;
272
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000273static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
274static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
275static uint64_t _pending_exceptions_done_count;
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277static int init_origin_hash(void)
278{
279 int i;
280
281 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
282 GFP_KERNEL);
283 if (!_origins) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700284 DMERR("unable to allocate memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 return -ENOMEM;
286 }
287
288 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
289 INIT_LIST_HEAD(_origins + i);
290 init_rwsem(&_origins_lock);
291
292 return 0;
293}
294
295static void exit_origin_hash(void)
296{
297 kfree(_origins);
298}
299
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100300static unsigned origin_hash(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
302 return bdev->bd_dev & ORIGIN_MASK;
303}
304
305static struct origin *__lookup_origin(struct block_device *origin)
306{
307 struct list_head *ol;
308 struct origin *o;
309
310 ol = &_origins[origin_hash(origin)];
311 list_for_each_entry (o, ol, hash_list)
312 if (bdev_equal(o->bdev, origin))
313 return o;
314
315 return NULL;
316}
317
318static void __insert_origin(struct origin *o)
319{
320 struct list_head *sl = &_origins[origin_hash(o->bdev)];
321 list_add_tail(&o->hash_list, sl);
322}
323
324/*
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000325 * _origins_lock must be held when calling this function.
326 * Returns number of snapshots registered using the supplied cow device, plus:
327 * snap_src - a snapshot suitable for use as a source of exception handover
328 * snap_dest - a snapshot capable of receiving exception handover.
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000329 * snap_merge - an existing snapshot-merge target linked to the same origin.
330 * There can be at most one snapshot-merge target. The parameter is optional.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000331 *
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000332 * Possible return values and states of snap_src and snap_dest.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000333 * 0: NULL, NULL - first new snapshot
334 * 1: snap_src, NULL - normal snapshot
335 * 2: snap_src, snap_dest - waiting for handover
336 * 2: snap_src, NULL - handed over, waiting for old to be deleted
337 * 1: NULL, snap_dest - source got destroyed without handover
338 */
339static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
340 struct dm_snapshot **snap_src,
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000341 struct dm_snapshot **snap_dest,
342 struct dm_snapshot **snap_merge)
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000343{
344 struct dm_snapshot *s;
345 struct origin *o;
346 int count = 0;
347 int active;
348
349 o = __lookup_origin(snap->origin->bdev);
350 if (!o)
351 goto out;
352
353 list_for_each_entry(s, &o->snapshots, list) {
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000354 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
355 *snap_merge = s;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000356 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
357 continue;
358
359 down_read(&s->lock);
360 active = s->active;
361 up_read(&s->lock);
362
363 if (active) {
364 if (snap_src)
365 *snap_src = s;
366 } else if (snap_dest)
367 *snap_dest = s;
368
369 count++;
370 }
371
372out:
373 return count;
374}
375
376/*
377 * On success, returns 1 if this snapshot is a handover destination,
378 * otherwise returns 0.
379 */
380static int __validate_exception_handover(struct dm_snapshot *snap)
381{
382 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000383 struct dm_snapshot *snap_merge = NULL;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000384
385 /* Does snapshot need exceptions handed over to it? */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000386 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
387 &snap_merge) == 2) ||
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000388 snap_dest) {
389 snap->ti->error = "Snapshot cow pairing for exception "
390 "table handover failed";
391 return -EINVAL;
392 }
393
394 /*
395 * If no snap_src was found, snap cannot become a handover
396 * destination.
397 */
398 if (!snap_src)
399 return 0;
400
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000401 /*
402 * Non-snapshot-merge handover?
403 */
404 if (!dm_target_is_snapshot_merge(snap->ti))
405 return 1;
406
407 /*
408 * Do not allow more than one merging snapshot.
409 */
410 if (snap_merge) {
411 snap->ti->error = "A snapshot is already merging.";
412 return -EINVAL;
413 }
414
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000415 if (!snap_src->store->type->prepare_merge ||
416 !snap_src->store->type->commit_merge) {
417 snap->ti->error = "Snapshot exception store does not "
418 "support snapshot-merge.";
419 return -EINVAL;
420 }
421
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000422 return 1;
423}
424
425static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
426{
427 struct dm_snapshot *l;
428
429 /* Sort the list according to chunk size, largest-first smallest-last */
430 list_for_each_entry(l, &o->snapshots, list)
431 if (l->store->chunk_size < s->store->chunk_size)
432 break;
433 list_add_tail(&s->list, &l->list);
434}
435
436/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 * Make a note of the snapshot and its origin so we can look it
438 * up when the origin has a write on it.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000439 *
440 * Also validate snapshot exception store handovers.
441 * On success, returns 1 if this registration is a handover destination,
442 * otherwise returns 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 */
444static int register_snapshot(struct dm_snapshot *snap)
445{
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000446 struct origin *o, *new_o = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 struct block_device *bdev = snap->origin->bdev;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000448 int r = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000450 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
451 if (!new_o)
452 return -ENOMEM;
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 down_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000456 r = __validate_exception_handover(snap);
457 if (r < 0) {
458 kfree(new_o);
459 goto out;
460 }
461
462 o = __lookup_origin(bdev);
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000463 if (o)
464 kfree(new_o);
465 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 /* New origin */
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000467 o = new_o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 /* Initialise the struct */
470 INIT_LIST_HEAD(&o->snapshots);
471 o->bdev = bdev;
472
473 __insert_origin(o);
474 }
475
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000476 __insert_snapshot(o, snap);
477
478out:
479 up_write(&_origins_lock);
480
481 return r;
482}
483
484/*
485 * Move snapshot to correct place in list according to chunk size.
486 */
487static void reregister_snapshot(struct dm_snapshot *s)
488{
489 struct block_device *bdev = s->origin->bdev;
490
491 down_write(&_origins_lock);
492
493 list_del(&s->list);
494 __insert_snapshot(__lookup_origin(bdev), s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
496 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
499static void unregister_snapshot(struct dm_snapshot *s)
500{
501 struct origin *o;
502
503 down_write(&_origins_lock);
504 o = __lookup_origin(s->origin->bdev);
505
506 list_del(&s->list);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000507 if (o && list_empty(&o->snapshots)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 list_del(&o->hash_list);
509 kfree(o);
510 }
511
512 up_write(&_origins_lock);
513}
514
515/*
516 * Implementation of the exception hash tables.
Milan Brozd74f81f2008-02-08 02:11:27 +0000517 * The lowest hash_shift bits of the chunk number are ignored, allowing
518 * some consecutive chunks to be grouped together.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000520static int dm_exception_table_init(struct dm_exception_table *et,
521 uint32_t size, unsigned hash_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522{
523 unsigned int i;
524
Milan Brozd74f81f2008-02-08 02:11:27 +0000525 et->hash_shift = hash_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 et->hash_mask = size - 1;
527 et->table = dm_vcalloc(size, sizeof(struct list_head));
528 if (!et->table)
529 return -ENOMEM;
530
531 for (i = 0; i < size; i++)
532 INIT_LIST_HEAD(et->table + i);
533
534 return 0;
535}
536
Jon Brassow3510cb92009-12-10 23:52:11 +0000537static void dm_exception_table_exit(struct dm_exception_table *et,
538 struct kmem_cache *mem)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
540 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000541 struct dm_exception *ex, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 int i, size;
543
544 size = et->hash_mask + 1;
545 for (i = 0; i < size; i++) {
546 slot = et->table + i;
547
548 list_for_each_entry_safe (ex, next, slot, hash_list)
549 kmem_cache_free(mem, ex);
550 }
551
552 vfree(et->table);
553}
554
Jon Brassow191437a2009-12-10 23:52:10 +0000555static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556{
Milan Brozd74f81f2008-02-08 02:11:27 +0000557 return (chunk >> et->hash_shift) & et->hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558}
559
Jon Brassow3510cb92009-12-10 23:52:11 +0000560static void dm_remove_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
562 list_del(&e->hash_list);
563}
564
565/*
566 * Return the exception data for a sector, or NULL if not
567 * remapped.
568 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000569static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
570 chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
572 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000573 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 slot = &et->table[exception_hash(et, chunk)];
576 list_for_each_entry (e, slot, hash_list)
Milan Brozd74f81f2008-02-08 02:11:27 +0000577 if (chunk >= e->old_chunk &&
578 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 return e;
580
581 return NULL;
582}
583
Jon Brassow3510cb92009-12-10 23:52:11 +0000584static struct dm_exception *alloc_completed_exception(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585{
Jon Brassow1d4989c2009-12-10 23:52:10 +0000586 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
589 if (!e)
590 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
591
592 return e;
593}
594
Jon Brassow3510cb92009-12-10 23:52:11 +0000595static void free_completed_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
597 kmem_cache_free(exception_cache, e);
598}
599
Mikulas Patocka92e86812008-07-21 12:00:35 +0100600static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601{
Mikulas Patocka92e86812008-07-21 12:00:35 +0100602 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
603 GFP_NOIO);
604
Mikulas Patocka879129d22008-10-30 13:33:16 +0000605 atomic_inc(&s->pending_exceptions_count);
Mikulas Patocka92e86812008-07-21 12:00:35 +0100606 pe->snap = s;
607
608 return pe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609}
610
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100611static void free_pending_exception(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Mikulas Patocka879129d22008-10-30 13:33:16 +0000613 struct dm_snapshot *s = pe->snap;
614
615 mempool_free(pe, s->pending_pool);
616 smp_mb__before_atomic_dec();
617 atomic_dec(&s->pending_exceptions_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618}
619
Jon Brassow3510cb92009-12-10 23:52:11 +0000620static void dm_insert_exception(struct dm_exception_table *eh,
621 struct dm_exception *new_e)
Milan Brozd74f81f2008-02-08 02:11:27 +0000622{
Milan Brozd74f81f2008-02-08 02:11:27 +0000623 struct list_head *l;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000624 struct dm_exception *e = NULL;
Milan Brozd74f81f2008-02-08 02:11:27 +0000625
626 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
627
628 /* Add immediately if this table doesn't support consecutive chunks */
629 if (!eh->hash_shift)
630 goto out;
631
632 /* List is ordered by old_chunk */
633 list_for_each_entry_reverse(e, l, hash_list) {
634 /* Insert after an existing chunk? */
635 if (new_e->old_chunk == (e->old_chunk +
636 dm_consecutive_chunk_count(e) + 1) &&
637 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
638 dm_consecutive_chunk_count(e) + 1)) {
639 dm_consecutive_chunk_count_inc(e);
Jon Brassow3510cb92009-12-10 23:52:11 +0000640 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000641 return;
642 }
643
644 /* Insert before an existing chunk? */
645 if (new_e->old_chunk == (e->old_chunk - 1) &&
646 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
647 dm_consecutive_chunk_count_inc(e);
648 e->old_chunk--;
649 e->new_chunk--;
Jon Brassow3510cb92009-12-10 23:52:11 +0000650 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000651 return;
652 }
653
654 if (new_e->old_chunk > e->old_chunk)
655 break;
656 }
657
658out:
659 list_add(&new_e->hash_list, e ? &e->hash_list : l);
660}
661
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000662/*
663 * Callback used by the exception stores to load exceptions when
664 * initialising.
665 */
666static int dm_add_exception(void *context, chunk_t old, chunk_t new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000668 struct dm_snapshot *s = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000669 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Jon Brassow3510cb92009-12-10 23:52:11 +0000671 e = alloc_completed_exception();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (!e)
673 return -ENOMEM;
674
675 e->old_chunk = old;
Milan Brozd74f81f2008-02-08 02:11:27 +0000676
677 /* Consecutive_count is implicitly initialised to zero */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 e->new_chunk = new;
Milan Brozd74f81f2008-02-08 02:11:27 +0000679
Jon Brassow3510cb92009-12-10 23:52:11 +0000680 dm_insert_exception(&s->complete, e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 return 0;
683}
684
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000685#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
686
687/*
688 * Return a minimum chunk size of all snapshots that have the specified origin.
689 * Return zero if the origin has no snapshots.
690 */
691static sector_t __minimum_chunk_size(struct origin *o)
692{
693 struct dm_snapshot *snap;
694 unsigned chunk_size = 0;
695
696 if (o)
697 list_for_each_entry(snap, &o->snapshots, list)
698 chunk_size = min_not_zero(chunk_size,
699 snap->store->chunk_size);
700
701 return chunk_size;
702}
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704/*
705 * Hard coded magic.
706 */
707static int calc_max_buckets(void)
708{
709 /* use a fixed size of 2MB */
710 unsigned long mem = 2 * 1024 * 1024;
711 mem /= sizeof(struct list_head);
712
713 return mem;
714}
715
716/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 * Allocate room for a suitable hash table.
718 */
Jonathan Brassowfee19982009-04-02 19:55:34 +0100719static int init_hash_tables(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720{
721 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
722
723 /*
724 * Calculate based on the size of the original volume or
725 * the COW volume...
726 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000727 cow_dev_size = get_dev_size(s->cow->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 origin_dev_size = get_dev_size(s->origin->bdev);
729 max_buckets = calc_max_buckets();
730
Jonathan Brassowfee19982009-04-02 19:55:34 +0100731 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 hash_size = min(hash_size, max_buckets);
733
Mikulas Patocka8e87b9b2009-12-10 23:51:54 +0000734 if (hash_size < 64)
735 hash_size = 64;
Robert P. J. Day8defd832008-02-08 02:10:06 +0000736 hash_size = rounddown_pow_of_two(hash_size);
Jon Brassow3510cb92009-12-10 23:52:11 +0000737 if (dm_exception_table_init(&s->complete, hash_size,
738 DM_CHUNK_CONSECUTIVE_BITS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 return -ENOMEM;
740
741 /*
742 * Allocate hash table for in-flight exceptions
743 * Make this smaller than the real hash table
744 */
745 hash_size >>= 3;
746 if (hash_size < 64)
747 hash_size = 64;
748
Jon Brassow3510cb92009-12-10 23:52:11 +0000749 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
750 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return -ENOMEM;
752 }
753
754 return 0;
755}
756
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000757static void merge_shutdown(struct dm_snapshot *s)
758{
759 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
760 smp_mb__after_clear_bit();
761 wake_up_bit(&s->state_bits, RUNNING_MERGE);
762}
763
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000764static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
765{
766 s->first_merging_chunk = 0;
767 s->num_merging_chunks = 0;
768
769 return bio_list_get(&s->bios_queued_during_merge);
770}
771
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000772/*
773 * Remove one chunk from the index of completed exceptions.
774 */
775static int __remove_single_exception_chunk(struct dm_snapshot *s,
776 chunk_t old_chunk)
777{
778 struct dm_exception *e;
779
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000780 e = dm_lookup_exception(&s->complete, old_chunk);
781 if (!e) {
782 DMERR("Corruption detected: exception for block %llu is "
783 "on disk but not in memory",
784 (unsigned long long)old_chunk);
785 return -EINVAL;
786 }
787
788 /*
789 * If this is the only chunk using this exception, remove exception.
790 */
791 if (!dm_consecutive_chunk_count(e)) {
792 dm_remove_exception(e);
793 free_completed_exception(e);
794 return 0;
795 }
796
797 /*
798 * The chunk may be either at the beginning or the end of a
799 * group of consecutive chunks - never in the middle. We are
800 * removing chunks in the opposite order to that in which they
801 * were added, so this should always be true.
802 * Decrement the consecutive chunk counter and adjust the
803 * starting point if necessary.
804 */
805 if (old_chunk == e->old_chunk) {
806 e->old_chunk++;
807 e->new_chunk++;
808 } else if (old_chunk != e->old_chunk +
809 dm_consecutive_chunk_count(e)) {
810 DMERR("Attempt to merge block %llu from the "
811 "middle of a chunk range [%llu - %llu]",
812 (unsigned long long)old_chunk,
813 (unsigned long long)e->old_chunk,
814 (unsigned long long)
815 e->old_chunk + dm_consecutive_chunk_count(e));
816 return -EINVAL;
817 }
818
819 dm_consecutive_chunk_count_dec(e);
820
821 return 0;
822}
823
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000824static void flush_bios(struct bio *bio);
825
826static int remove_single_exception_chunk(struct dm_snapshot *s)
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000827{
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000828 struct bio *b = NULL;
829 int r;
830 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000831
832 down_write(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000833
834 /*
835 * Process chunks (and associated exceptions) in reverse order
836 * so that dm_consecutive_chunk_count_dec() accounting works.
837 */
838 do {
839 r = __remove_single_exception_chunk(s, old_chunk);
840 if (r)
841 goto out;
842 } while (old_chunk-- > s->first_merging_chunk);
843
844 b = __release_queued_bios_after_merge(s);
845
846out:
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000847 up_write(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000848 if (b)
849 flush_bios(b);
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000850
851 return r;
852}
853
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000854static int origin_write_extent(struct dm_snapshot *merging_snap,
855 sector_t sector, unsigned chunk_size);
856
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000857static void merge_callback(int read_err, unsigned long write_err,
858 void *context);
859
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000860static uint64_t read_pending_exceptions_done_count(void)
861{
862 uint64_t pending_exceptions_done;
863
864 spin_lock(&_pending_exceptions_done_spinlock);
865 pending_exceptions_done = _pending_exceptions_done_count;
866 spin_unlock(&_pending_exceptions_done_spinlock);
867
868 return pending_exceptions_done;
869}
870
871static void increment_pending_exceptions_done_count(void)
872{
873 spin_lock(&_pending_exceptions_done_spinlock);
874 _pending_exceptions_done_count++;
875 spin_unlock(&_pending_exceptions_done_spinlock);
876
877 wake_up_all(&_pending_exceptions_done);
878}
879
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000880static void snapshot_merge_next_chunks(struct dm_snapshot *s)
881{
882 int r;
883 chunk_t old_chunk, new_chunk;
884 struct dm_io_region src, dest;
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000885 uint64_t previous_count;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000886
887 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
888 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
889 goto shut;
890
891 /*
892 * valid flag never changes during merge, so no lock required.
893 */
894 if (!s->valid) {
895 DMERR("Snapshot is invalid: can't merge");
896 goto shut;
897 }
898
899 r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk);
900 if (r <= 0) {
901 if (r < 0)
902 DMERR("Read error in exception store: "
903 "shutting down merge");
904 goto shut;
905 }
906
907 /* TODO: use larger I/O size once we verify that kcopyd handles it */
908
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000909 dest.bdev = s->origin->bdev;
910 dest.sector = chunk_to_sector(s->store, old_chunk);
911 dest.count = min((sector_t)s->store->chunk_size,
912 get_dev_size(dest.bdev) - dest.sector);
913
914 src.bdev = s->cow->bdev;
915 src.sector = chunk_to_sector(s->store, new_chunk);
916 src.count = dest.count;
917
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000918 /*
919 * Reallocate any exceptions needed in other snapshots then
920 * wait for the pending exceptions to complete.
921 * Each time any pending exception (globally on the system)
922 * completes we are woken and repeat the process to find out
923 * if we can proceed. While this may not seem a particularly
924 * efficient algorithm, it is not expected to have any
925 * significant impact on performance.
926 */
927 previous_count = read_pending_exceptions_done_count();
928 while (origin_write_extent(s, dest.sector, s->store->chunk_size)) {
929 wait_event(_pending_exceptions_done,
930 (read_pending_exceptions_done_count() !=
931 previous_count));
932 /* Retry after the wait, until all exceptions are done. */
933 previous_count = read_pending_exceptions_done_count();
934 }
935
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000936 down_write(&s->lock);
937 s->first_merging_chunk = old_chunk;
938 s->num_merging_chunks = 1;
939 up_write(&s->lock);
940
Mikulas Patocka17aa0332009-12-10 23:52:33 +0000941 __check_for_conflicting_io(s, old_chunk);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000942
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000943 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
944 return;
945
946shut:
947 merge_shutdown(s);
948}
949
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000950static void error_bios(struct bio *bio);
951
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000952static void merge_callback(int read_err, unsigned long write_err, void *context)
953{
954 struct dm_snapshot *s = context;
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000955 struct bio *b = NULL;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000956
957 if (read_err || write_err) {
958 if (read_err)
959 DMERR("Read error: shutting down merge.");
960 else
961 DMERR("Write error: shutting down merge.");
962 goto shut;
963 }
964
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000965 if (s->store->type->commit_merge(s->store,
966 s->num_merging_chunks) < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000967 DMERR("Write error in exception store: shutting down merge");
968 goto shut;
969 }
970
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000971 if (remove_single_exception_chunk(s) < 0)
972 goto shut;
973
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000974 snapshot_merge_next_chunks(s);
975
976 return;
977
978shut:
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000979 down_write(&s->lock);
980 b = __release_queued_bios_after_merge(s);
981 up_write(&s->lock);
982 error_bios(b);
983
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000984 merge_shutdown(s);
985}
986
987static void start_merge(struct dm_snapshot *s)
988{
989 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
990 snapshot_merge_next_chunks(s);
991}
992
993static int wait_schedule(void *ptr)
994{
995 schedule();
996
997 return 0;
998}
999
1000/*
1001 * Stop the merging process and wait until it finishes.
1002 */
1003static void stop_merge(struct dm_snapshot *s)
1004{
1005 set_bit(SHUTDOWN_MERGE, &s->state_bits);
1006 wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1007 TASK_UNINTERRUPTIBLE);
1008 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1009}
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1013 */
1014static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1015{
1016 struct dm_snapshot *s;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001017 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 int r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001019 char *origin_path, *cow_path;
Mike Snitzer10b81062009-12-10 23:52:31 +00001020 unsigned args_used, num_flush_requests = 1;
1021 fmode_t origin_mode = FMODE_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
Mark McLoughlin4c7e3bf2006-10-03 01:15:25 -07001023 if (argc != 4) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001024 ti->error = "requires exactly 4 arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001026 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 }
1028
Mike Snitzer10b81062009-12-10 23:52:31 +00001029 if (dm_target_is_snapshot_merge(ti)) {
1030 num_flush_requests = 2;
1031 origin_mode = FMODE_WRITE;
1032 }
1033
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 origin_path = argv[0];
Jonathan Brassowfee19982009-04-02 19:55:34 +01001035 argv++;
1036 argc--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 s = kmalloc(sizeof(*s), GFP_KERNEL);
Jonathan Brassowfee19982009-04-02 19:55:34 +01001039 if (!s) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 ti->error = "Cannot allocate snapshot context private "
1041 "structure";
1042 r = -ENOMEM;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001043 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 }
1045
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001046 cow_path = argv[0];
1047 argv++;
1048 argc--;
1049
1050 r = dm_get_device(ti, cow_path, 0, 0,
1051 FMODE_READ | FMODE_WRITE, &s->cow);
1052 if (r) {
1053 ti->error = "Cannot get COW device";
1054 goto bad_cow;
1055 }
1056
1057 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1058 if (r) {
1059 ti->error = "Couldn't create exception store";
1060 r = -EINVAL;
1061 goto bad_store;
1062 }
1063
1064 argv += args_used;
1065 argc -= args_used;
1066
Mike Snitzer10b81062009-12-10 23:52:31 +00001067 r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 if (r) {
1069 ti->error = "Cannot get origin device";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001070 goto bad_origin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 }
1072
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001073 s->ti = ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 s->valid = 1;
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001075 s->active = 0;
Mike Snitzerc26655c2009-12-10 23:52:12 +00001076 s->suspended = 0;
Mikulas Patocka879129d22008-10-30 13:33:16 +00001077 atomic_set(&s->pending_exceptions_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 init_rwsem(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001079 INIT_LIST_HEAD(&s->list);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001080 spin_lock_init(&s->pe_lock);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001081 s->state_bits = 0;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001082 s->first_merging_chunk = 0;
1083 s->num_merging_chunks = 0;
1084 bio_list_init(&s->bios_queued_during_merge);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 /* Allocate hash table for COW data */
Jonathan Brassowfee19982009-04-02 19:55:34 +01001087 if (init_hash_tables(s)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 ti->error = "Unable to allocate hash table space";
1089 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001090 goto bad_hash_tables;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 }
1092
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001093 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 if (r) {
1095 ti->error = "Could not create kcopyd client";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001096 goto bad_kcopyd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 }
1098
Mikulas Patocka92e86812008-07-21 12:00:35 +01001099 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1100 if (!s->pending_pool) {
1101 ti->error = "Could not allocate mempool for pending exceptions";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001102 goto bad_pending_pool;
Mikulas Patocka92e86812008-07-21 12:00:35 +01001103 }
1104
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001105 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1106 tracked_chunk_cache);
1107 if (!s->tracked_chunk_pool) {
1108 ti->error = "Could not allocate tracked_chunk mempool for "
1109 "tracking reads";
Mikulas Patocka92e86812008-07-21 12:00:35 +01001110 goto bad_tracked_chunk_pool;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001111 }
1112
1113 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1114 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1115
1116 spin_lock_init(&s->tracked_chunk_lock);
1117
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001118 bio_list_init(&s->queued_bios);
1119 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1120
1121 ti->private = s;
Mike Snitzer10b81062009-12-10 23:52:31 +00001122 ti->num_flush_requests = num_flush_requests;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001123
1124 /* Add snapshot to the list of snapshots for this origin */
1125 /* Exceptions aren't triggered till snapshot_resume() is called */
1126 r = register_snapshot(s);
1127 if (r == -ENOMEM) {
1128 ti->error = "Snapshot origin struct allocation failed";
1129 goto bad_load_and_register;
1130 } else if (r < 0) {
1131 /* invalid handover, register_snapshot has set ti->error */
1132 goto bad_load_and_register;
1133 }
1134
1135 /*
1136 * Metadata must only be loaded into one table at once, so skip this
1137 * if metadata will be handed over during resume.
1138 * Chunk size will be set during the handover - set it to zero to
1139 * ensure it's ignored.
1140 */
1141 if (r > 0) {
1142 s->store->chunk_size = 0;
1143 return 0;
1144 }
1145
Jonathan Brassow493df712009-04-02 19:55:31 +01001146 r = s->store->type->read_metadata(s->store, dm_add_exception,
1147 (void *)s);
Milan Broz07641472007-07-12 17:28:13 +01001148 if (r < 0) {
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001149 ti->error = "Failed to read snapshot metadata";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001150 goto bad_read_metadata;
Milan Broz07641472007-07-12 17:28:13 +01001151 } else if (r > 0) {
1152 s->valid = 0;
1153 DMWARN("Snapshot is marked invalid.");
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001154 }
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001155
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001156 if (!s->store->chunk_size) {
1157 ti->error = "Chunk size not set";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001158 goto bad_read_metadata;
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001159 }
Jonathan Brassowd0216842009-04-02 19:55:32 +01001160 ti->split_io = s->store->chunk_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 return 0;
1163
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001164bad_read_metadata:
1165 unregister_snapshot(s);
1166
Jonathan Brassowfee19982009-04-02 19:55:34 +01001167bad_load_and_register:
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001168 mempool_destroy(s->tracked_chunk_pool);
1169
Jonathan Brassowfee19982009-04-02 19:55:34 +01001170bad_tracked_chunk_pool:
Mikulas Patocka92e86812008-07-21 12:00:35 +01001171 mempool_destroy(s->pending_pool);
1172
Jonathan Brassowfee19982009-04-02 19:55:34 +01001173bad_pending_pool:
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001174 dm_kcopyd_client_destroy(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Jonathan Brassowfee19982009-04-02 19:55:34 +01001176bad_kcopyd:
Jon Brassow3510cb92009-12-10 23:52:11 +00001177 dm_exception_table_exit(&s->pending, pending_cache);
1178 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Jonathan Brassowfee19982009-04-02 19:55:34 +01001180bad_hash_tables:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 dm_put_device(ti, s->origin);
1182
Jonathan Brassowfee19982009-04-02 19:55:34 +01001183bad_origin:
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001184 dm_exception_store_destroy(s->store);
1185
1186bad_store:
1187 dm_put_device(ti, s->cow);
1188
1189bad_cow:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 kfree(s);
1191
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001192bad:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 return r;
1194}
1195
Milan Broz31c93a02006-12-08 02:41:11 -08001196static void __free_exceptions(struct dm_snapshot *s)
1197{
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001198 dm_kcopyd_client_destroy(s->kcopyd_client);
Milan Broz31c93a02006-12-08 02:41:11 -08001199 s->kcopyd_client = NULL;
1200
Jon Brassow3510cb92009-12-10 23:52:11 +00001201 dm_exception_table_exit(&s->pending, pending_cache);
1202 dm_exception_table_exit(&s->complete, exception_cache);
Milan Broz31c93a02006-12-08 02:41:11 -08001203}
1204
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001205static void __handover_exceptions(struct dm_snapshot *snap_src,
1206 struct dm_snapshot *snap_dest)
1207{
1208 union {
1209 struct dm_exception_table table_swap;
1210 struct dm_exception_store *store_swap;
1211 } u;
1212
1213 /*
1214 * Swap all snapshot context information between the two instances.
1215 */
1216 u.table_swap = snap_dest->complete;
1217 snap_dest->complete = snap_src->complete;
1218 snap_src->complete = u.table_swap;
1219
1220 u.store_swap = snap_dest->store;
1221 snap_dest->store = snap_src->store;
1222 snap_src->store = u.store_swap;
1223
1224 snap_dest->store->snap = snap_dest;
1225 snap_src->store->snap = snap_src;
1226
1227 snap_dest->ti->split_io = snap_dest->store->chunk_size;
1228 snap_dest->valid = snap_src->valid;
1229
1230 /*
1231 * Set source invalid to ensure it receives no further I/O.
1232 */
1233 snap_src->valid = 0;
1234}
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236static void snapshot_dtr(struct dm_target *ti)
1237{
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001238#ifdef CONFIG_DM_DEBUG
1239 int i;
1240#endif
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001241 struct dm_snapshot *s = ti->private;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001242 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001244 flush_workqueue(ksnapd);
1245
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001246 down_read(&_origins_lock);
1247 /* Check whether exception handover must be cancelled */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001248 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001249 if (snap_src && snap_dest && (s == snap_src)) {
1250 down_write(&snap_dest->lock);
1251 snap_dest->valid = 0;
1252 up_write(&snap_dest->lock);
1253 DMERR("Cancelling snapshot handover.");
1254 }
1255 up_read(&_origins_lock);
1256
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001257 if (dm_target_is_snapshot_merge(ti))
1258 stop_merge(s);
1259
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001260 /* Prevent further origin writes from using this snapshot. */
1261 /* After this returns there can be no new kcopyd jobs. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 unregister_snapshot(s);
1263
Mikulas Patocka879129d22008-10-30 13:33:16 +00001264 while (atomic_read(&s->pending_exceptions_count))
Mikulas Patocka90fa1522009-01-06 03:04:54 +00001265 msleep(1);
Mikulas Patocka879129d22008-10-30 13:33:16 +00001266 /*
1267 * Ensure instructions in mempool_destroy aren't reordered
1268 * before atomic_read.
1269 */
1270 smp_mb();
1271
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001272#ifdef CONFIG_DM_DEBUG
1273 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1274 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1275#endif
1276
1277 mempool_destroy(s->tracked_chunk_pool);
1278
Milan Broz31c93a02006-12-08 02:41:11 -08001279 __free_exceptions(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Mikulas Patocka92e86812008-07-21 12:00:35 +01001281 mempool_destroy(s->pending_pool);
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 dm_put_device(ti, s->origin);
Jonathan Brassowfee19982009-04-02 19:55:34 +01001284
1285 dm_exception_store_destroy(s->store);
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001286
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001287 dm_put_device(ti, s->cow);
1288
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 kfree(s);
1290}
1291
1292/*
1293 * Flush a list of buffers.
1294 */
1295static void flush_bios(struct bio *bio)
1296{
1297 struct bio *n;
1298
1299 while (bio) {
1300 n = bio->bi_next;
1301 bio->bi_next = NULL;
1302 generic_make_request(bio);
1303 bio = n;
1304 }
1305}
1306
David Howellsc4028952006-11-22 14:57:56 +00001307static void flush_queued_bios(struct work_struct *work)
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001308{
David Howellsc4028952006-11-22 14:57:56 +00001309 struct dm_snapshot *s =
1310 container_of(work, struct dm_snapshot, queued_bios_work);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001311 struct bio *queued_bios;
1312 unsigned long flags;
1313
1314 spin_lock_irqsave(&s->pe_lock, flags);
1315 queued_bios = bio_list_get(&s->queued_bios);
1316 spin_unlock_irqrestore(&s->pe_lock, flags);
1317
1318 flush_bios(queued_bios);
1319}
1320
Mikulas Patocka515ad662009-12-10 23:52:30 +00001321static int do_origin(struct dm_dev *origin, struct bio *bio);
1322
1323/*
1324 * Flush a list of buffers.
1325 */
1326static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1327{
1328 struct bio *n;
1329 int r;
1330
1331 while (bio) {
1332 n = bio->bi_next;
1333 bio->bi_next = NULL;
1334 r = do_origin(s->origin, bio);
1335 if (r == DM_MAPIO_REMAPPED)
1336 generic_make_request(bio);
1337 bio = n;
1338 }
1339}
1340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341/*
1342 * Error a list of buffers.
1343 */
1344static void error_bios(struct bio *bio)
1345{
1346 struct bio *n;
1347
1348 while (bio) {
1349 n = bio->bi_next;
1350 bio->bi_next = NULL;
NeilBrown6712ecf2007-09-27 12:47:43 +02001351 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 bio = n;
1353 }
1354}
1355
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001356static void __invalidate_snapshot(struct dm_snapshot *s, int err)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001357{
1358 if (!s->valid)
1359 return;
1360
1361 if (err == -EIO)
1362 DMERR("Invalidating snapshot: Error reading/writing.");
1363 else if (err == -ENOMEM)
1364 DMERR("Invalidating snapshot: Unable to allocate exception.");
1365
Jonathan Brassow493df712009-04-02 19:55:31 +01001366 if (s->store->type->drop_snapshot)
1367 s->store->type->drop_snapshot(s->store);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001368
1369 s->valid = 0;
1370
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001371 dm_table_event(s->ti->table);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001372}
1373
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001374static void pending_complete(struct dm_snap_pending_exception *pe, int success)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
Jon Brassow1d4989c2009-12-10 23:52:10 +00001376 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 struct dm_snapshot *s = pe->snap;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001378 struct bio *origin_bios = NULL;
1379 struct bio *snapshot_bios = NULL;
1380 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001382 if (!success) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 /* Read/write error - snapshot is unusable */
1384 down_write(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001385 __invalidate_snapshot(s, -EIO);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001386 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001387 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 }
1389
Jon Brassow3510cb92009-12-10 23:52:11 +00001390 e = alloc_completed_exception();
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001391 if (!e) {
1392 down_write(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001393 __invalidate_snapshot(s, -ENOMEM);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001394 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001395 goto out;
1396 }
1397 *e = pe->e;
1398
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001399 down_write(&s->lock);
1400 if (!s->valid) {
Jon Brassow3510cb92009-12-10 23:52:11 +00001401 free_completed_exception(e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001402 error = 1;
1403 goto out;
1404 }
1405
Mike Snitzer615d1eb2009-12-10 23:52:29 +00001406 /* Check for conflicting reads */
1407 __check_for_conflicting_io(s, pe->e.old_chunk);
Mikulas Patockaa8d41b52008-07-21 12:00:34 +01001408
1409 /*
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001410 * Add a proper exception, and remove the
1411 * in-flight exception from the list.
1412 */
Jon Brassow3510cb92009-12-10 23:52:11 +00001413 dm_insert_exception(&s->complete, e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 out:
Jon Brassow3510cb92009-12-10 23:52:11 +00001416 dm_remove_exception(&pe->e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001417 snapshot_bios = bio_list_get(&pe->snapshot_bios);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001418 origin_bios = bio_list_get(&pe->origin_bios);
1419 free_pending_exception(pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001421 increment_pending_exceptions_done_count();
1422
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001423 up_write(&s->lock);
1424
1425 /* Submit any pending write bios */
1426 if (error)
1427 error_bios(snapshot_bios);
1428 else
1429 flush_bios(snapshot_bios);
1430
Mikulas Patocka515ad662009-12-10 23:52:30 +00001431 retry_origin_bios(s, origin_bios);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433
1434static void commit_callback(void *context, int success)
1435{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001436 struct dm_snap_pending_exception *pe = context;
1437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 pending_complete(pe, success);
1439}
1440
1441/*
1442 * Called when the copy I/O has finished. kcopyd actually runs
1443 * this code so don't block.
1444 */
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -07001445static void copy_callback(int read_err, unsigned long write_err, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001447 struct dm_snap_pending_exception *pe = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 struct dm_snapshot *s = pe->snap;
1449
1450 if (read_err || write_err)
1451 pending_complete(pe, 0);
1452
1453 else
1454 /* Update the metadata if we are persistent */
Jonathan Brassow493df712009-04-02 19:55:31 +01001455 s->store->type->commit_exception(s->store, &pe->e,
1456 commit_callback, pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457}
1458
1459/*
1460 * Dispatches the copy operation to kcopyd.
1461 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001462static void start_copy(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463{
1464 struct dm_snapshot *s = pe->snap;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +01001465 struct dm_io_region src, dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 struct block_device *bdev = s->origin->bdev;
1467 sector_t dev_size;
1468
1469 dev_size = get_dev_size(bdev);
1470
1471 src.bdev = bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001472 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
Mikulas Patockadf96eee2009-10-16 23:18:17 +01001473 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001475 dest.bdev = s->cow->bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001476 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 dest.count = src.count;
1478
1479 /* Hand over to kcopyd */
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001480 dm_kcopyd_copy(s->kcopyd_client,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 &src, 1, &dest, 0, copy_callback, pe);
1482}
1483
Mikulas Patocka29138082009-04-02 19:55:25 +01001484static struct dm_snap_pending_exception *
1485__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1486{
Jon Brassow3510cb92009-12-10 23:52:11 +00001487 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001488
1489 if (!e)
1490 return NULL;
1491
1492 return container_of(e, struct dm_snap_pending_exception, e);
1493}
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495/*
1496 * Looks to see if this snapshot already has a pending exception
1497 * for this chunk, otherwise it allocates a new one and inserts
1498 * it into the pending table.
1499 *
1500 * NOTE: a write lock must be held on snap->lock before calling
1501 * this.
1502 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001503static struct dm_snap_pending_exception *
Mikulas Patockac6621392009-04-02 19:55:25 +01001504__find_pending_exception(struct dm_snapshot *s,
1505 struct dm_snap_pending_exception *pe, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506{
Mikulas Patockac6621392009-04-02 19:55:25 +01001507 struct dm_snap_pending_exception *pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001508
Mikulas Patocka29138082009-04-02 19:55:25 +01001509 pe2 = __lookup_pending_exception(s, chunk);
1510 if (pe2) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001511 free_pending_exception(pe);
Mikulas Patocka29138082009-04-02 19:55:25 +01001512 return pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001513 }
1514
1515 pe->e.old_chunk = chunk;
1516 bio_list_init(&pe->origin_bios);
1517 bio_list_init(&pe->snapshot_bios);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001518 pe->started = 0;
1519
Jonathan Brassow493df712009-04-02 19:55:31 +01001520 if (s->store->type->prepare_exception(s->store, &pe->e)) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001521 free_pending_exception(pe);
1522 return NULL;
1523 }
1524
Jon Brassow3510cb92009-12-10 23:52:11 +00001525 dm_insert_exception(&s->pending, &pe->e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 return pe;
1528}
1529
Jon Brassow1d4989c2009-12-10 23:52:10 +00001530static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
Milan Brozd74f81f2008-02-08 02:11:27 +00001531 struct bio *bio, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532{
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001533 bio->bi_bdev = s->cow->bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001534 bio->bi_sector = chunk_to_sector(s->store,
1535 dm_chunk_number(e->new_chunk) +
1536 (chunk - e->old_chunk)) +
1537 (bio->bi_sector &
1538 s->store->chunk_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539}
1540
1541static int snapshot_map(struct dm_target *ti, struct bio *bio,
1542 union map_info *map_context)
1543{
Jon Brassow1d4989c2009-12-10 23:52:10 +00001544 struct dm_exception *e;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001545 struct dm_snapshot *s = ti->private;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001546 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 chunk_t chunk;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001548 struct dm_snap_pending_exception *pe = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001550 if (unlikely(bio_empty_barrier(bio))) {
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001551 bio->bi_bdev = s->cow->bdev;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001552 return DM_MAPIO_REMAPPED;
1553 }
1554
Jonathan Brassow71fab002009-04-02 19:55:33 +01001555 chunk = sector_to_chunk(s->store, bio->bi_sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
1557 /* Full snapshots are not usable */
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001558 /* To get here the table must be live so s->active is always set. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 if (!s->valid)
Alasdair G Kergonf6a80ea2005-07-12 15:53:01 -07001560 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001562 /* FIXME: should only take write lock if we need
1563 * to copy an exception */
1564 down_write(&s->lock);
1565
1566 if (!s->valid) {
1567 r = -EIO;
1568 goto out_unlock;
1569 }
1570
1571 /* If the block is already remapped - use that, else remap it */
Jon Brassow3510cb92009-12-10 23:52:11 +00001572 e = dm_lookup_exception(&s->complete, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001573 if (e) {
Milan Brozd74f81f2008-02-08 02:11:27 +00001574 remap_exception(s, e, bio, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001575 goto out_unlock;
1576 }
1577
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 /*
1579 * Write to snapshot - higher level takes care of RW/RO
1580 * flags so we should only get this if we are
1581 * writeable.
1582 */
1583 if (bio_rw(bio) == WRITE) {
Mikulas Patocka29138082009-04-02 19:55:25 +01001584 pe = __lookup_pending_exception(s, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001585 if (!pe) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001586 up_write(&s->lock);
1587 pe = alloc_pending_exception(s);
1588 down_write(&s->lock);
1589
1590 if (!s->valid) {
1591 free_pending_exception(pe);
1592 r = -EIO;
1593 goto out_unlock;
1594 }
1595
Jon Brassow3510cb92009-12-10 23:52:11 +00001596 e = dm_lookup_exception(&s->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01001597 if (e) {
1598 free_pending_exception(pe);
1599 remap_exception(s, e, bio, chunk);
1600 goto out_unlock;
1601 }
1602
Mikulas Patockac6621392009-04-02 19:55:25 +01001603 pe = __find_pending_exception(s, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001604 if (!pe) {
1605 __invalidate_snapshot(s, -ENOMEM);
1606 r = -EIO;
1607 goto out_unlock;
1608 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001609 }
1610
Milan Brozd74f81f2008-02-08 02:11:27 +00001611 remap_exception(s, &pe->e, bio, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001612 bio_list_add(&pe->snapshot_bios, bio);
1613
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001614 r = DM_MAPIO_SUBMITTED;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001615
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001616 if (!pe->started) {
1617 /* this is protected by snap->lock */
1618 pe->started = 1;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001619 up_write(&s->lock);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001620 start_copy(pe);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001621 goto out;
1622 }
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001623 } else {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001624 bio->bi_bdev = s->origin->bdev;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001625 map_context->ptr = track_chunk(s, chunk);
1626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001628 out_unlock:
1629 up_write(&s->lock);
1630 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 return r;
1632}
1633
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001634/*
1635 * A snapshot-merge target behaves like a combination of a snapshot
1636 * target and a snapshot-origin target. It only generates new
1637 * exceptions in other snapshots and not in the one that is being
1638 * merged.
1639 *
1640 * For each chunk, if there is an existing exception, it is used to
1641 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1642 * which in turn might generate exceptions in other snapshots.
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001643 * If merging is currently taking place on the chunk in question, the
1644 * I/O is deferred by adding it to s->bios_queued_during_merge.
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001645 */
1646static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1647 union map_info *map_context)
1648{
1649 struct dm_exception *e;
1650 struct dm_snapshot *s = ti->private;
1651 int r = DM_MAPIO_REMAPPED;
1652 chunk_t chunk;
1653
Mike Snitzer10b81062009-12-10 23:52:31 +00001654 if (unlikely(bio_empty_barrier(bio))) {
1655 if (!map_context->flush_request)
1656 bio->bi_bdev = s->origin->bdev;
1657 else
1658 bio->bi_bdev = s->cow->bdev;
1659 map_context->ptr = NULL;
1660 return DM_MAPIO_REMAPPED;
1661 }
1662
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001663 chunk = sector_to_chunk(s->store, bio->bi_sector);
1664
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001665 down_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001666
1667 /* Full snapshots are not usable */
1668 if (!s->valid) {
1669 r = -EIO;
1670 goto out_unlock;
1671 }
1672
1673 /* If the block is already remapped - use that */
1674 e = dm_lookup_exception(&s->complete, chunk);
1675 if (e) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001676 /* Queue writes overlapping with chunks being merged */
1677 if (bio_rw(bio) == WRITE &&
1678 chunk >= s->first_merging_chunk &&
1679 chunk < (s->first_merging_chunk +
1680 s->num_merging_chunks)) {
1681 bio->bi_bdev = s->origin->bdev;
1682 bio_list_add(&s->bios_queued_during_merge, bio);
1683 r = DM_MAPIO_SUBMITTED;
1684 goto out_unlock;
1685 }
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001686
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001687 remap_exception(s, e, bio, chunk);
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001688
1689 if (bio_rw(bio) == WRITE)
1690 map_context->ptr = track_chunk(s, chunk);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001691 goto out_unlock;
1692 }
1693
1694 bio->bi_bdev = s->origin->bdev;
1695
1696 if (bio_rw(bio) == WRITE) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001697 up_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001698 return do_origin(s->origin, bio);
1699 }
1700
1701out_unlock:
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001702 up_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001703
1704 return r;
1705}
1706
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001707static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1708 int error, union map_info *map_context)
1709{
1710 struct dm_snapshot *s = ti->private;
1711 struct dm_snap_tracked_chunk *c = map_context->ptr;
1712
1713 if (c)
1714 stop_tracking_chunk(s, c);
1715
1716 return 0;
1717}
1718
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001719static void snapshot_merge_presuspend(struct dm_target *ti)
1720{
1721 struct dm_snapshot *s = ti->private;
1722
1723 stop_merge(s);
1724}
1725
Mike Snitzerc26655c2009-12-10 23:52:12 +00001726static void snapshot_postsuspend(struct dm_target *ti)
1727{
1728 struct dm_snapshot *s = ti->private;
1729
1730 down_write(&s->lock);
1731 s->suspended = 1;
1732 up_write(&s->lock);
1733}
1734
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001735static int snapshot_preresume(struct dm_target *ti)
1736{
1737 int r = 0;
1738 struct dm_snapshot *s = ti->private;
1739 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1740
1741 down_read(&_origins_lock);
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001742 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001743 if (snap_src && snap_dest) {
1744 down_read(&snap_src->lock);
1745 if (s == snap_src) {
1746 DMERR("Unable to resume snapshot source until "
1747 "handover completes.");
1748 r = -EINVAL;
1749 } else if (!snap_src->suspended) {
1750 DMERR("Unable to perform snapshot handover until "
1751 "source is suspended.");
1752 r = -EINVAL;
1753 }
1754 up_read(&snap_src->lock);
1755 }
1756 up_read(&_origins_lock);
1757
1758 return r;
1759}
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761static void snapshot_resume(struct dm_target *ti)
1762{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001763 struct dm_snapshot *s = ti->private;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001764 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1765
1766 down_read(&_origins_lock);
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001767 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001768 if (snap_src && snap_dest) {
1769 down_write(&snap_src->lock);
1770 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1771 __handover_exceptions(snap_src, snap_dest);
1772 up_write(&snap_dest->lock);
1773 up_write(&snap_src->lock);
1774 }
1775 up_read(&_origins_lock);
1776
1777 /* Now we have correct chunk size, reregister */
1778 reregister_snapshot(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001780 down_write(&s->lock);
1781 s->active = 1;
Mike Snitzerc26655c2009-12-10 23:52:12 +00001782 s->suspended = 0;
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001783 up_write(&s->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784}
1785
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001786static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1787{
1788 sector_t min_chunksize;
1789
1790 down_read(&_origins_lock);
1791 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1792 up_read(&_origins_lock);
1793
1794 return min_chunksize;
1795}
1796
1797static void snapshot_merge_resume(struct dm_target *ti)
1798{
1799 struct dm_snapshot *s = ti->private;
1800
1801 /*
1802 * Handover exceptions from existing snapshot.
1803 */
1804 snapshot_resume(ti);
1805
1806 /*
1807 * snapshot-merge acts as an origin, so set ti->split_io
1808 */
1809 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1810
1811 start_merge(s);
1812}
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814static int snapshot_status(struct dm_target *ti, status_type_t type,
1815 char *result, unsigned int maxlen)
1816{
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001817 unsigned sz = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001818 struct dm_snapshot *snap = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
1820 switch (type) {
1821 case STATUSTYPE_INFO:
Mikulas Patocka94e765722009-12-10 23:51:53 +00001822
1823 down_write(&snap->lock);
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 if (!snap->valid)
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001826 DMEMIT("Invalid");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 else {
Mike Snitzer985903b2009-12-10 23:52:11 +00001828 if (snap->store->type->usage) {
1829 sector_t total_sectors, sectors_allocated,
1830 metadata_sectors;
1831 snap->store->type->usage(snap->store,
1832 &total_sectors,
1833 &sectors_allocated,
1834 &metadata_sectors);
1835 DMEMIT("%llu/%llu %llu",
1836 (unsigned long long)sectors_allocated,
1837 (unsigned long long)total_sectors,
1838 (unsigned long long)metadata_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 }
1840 else
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001841 DMEMIT("Unknown");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 }
Mikulas Patocka94e765722009-12-10 23:51:53 +00001843
1844 up_write(&snap->lock);
1845
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 break;
1847
1848 case STATUSTYPE_TABLE:
1849 /*
1850 * kdevname returns a static pointer so we need
1851 * to make private copies if the output is to
1852 * make sense.
1853 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001854 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
Jonathan Brassow1e302a92009-04-02 19:55:35 +01001855 snap->store->type->status(snap->store, type, result + sz,
1856 maxlen - sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 break;
1858 }
1859
1860 return 0;
1861}
1862
Mike Snitzer8811f462009-09-04 20:40:19 +01001863static int snapshot_iterate_devices(struct dm_target *ti,
1864 iterate_devices_callout_fn fn, void *data)
1865{
1866 struct dm_snapshot *snap = ti->private;
1867
1868 return fn(ti, snap->origin, 0, ti->len, data);
1869}
1870
1871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872/*-----------------------------------------------------------------
1873 * Origin methods
1874 *---------------------------------------------------------------*/
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00001875
1876/*
1877 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1878 * supplied bio was ignored. The caller may submit it immediately.
1879 * (No remapping actually occurs as the origin is always a direct linear
1880 * map.)
1881 *
1882 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1883 * and any supplied bio is added to a list to be submitted once all
1884 * the necessary exceptions exist.
1885 */
1886static int __origin_write(struct list_head *snapshots, sector_t sector,
1887 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888{
Mikulas Patocka515ad662009-12-10 23:52:30 +00001889 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 struct dm_snapshot *snap;
Jon Brassow1d4989c2009-12-10 23:52:10 +00001891 struct dm_exception *e;
Mikulas Patocka515ad662009-12-10 23:52:30 +00001892 struct dm_snap_pending_exception *pe;
1893 struct dm_snap_pending_exception *pe_to_start_now = NULL;
1894 struct dm_snap_pending_exception *pe_to_start_last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 chunk_t chunk;
1896
1897 /* Do all the snapshots on this origin */
1898 list_for_each_entry (snap, snapshots, list) {
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001899 /*
1900 * Don't make new exceptions in a merging snapshot
1901 * because it has effectively been deleted
1902 */
1903 if (dm_target_is_snapshot_merge(snap->ti))
1904 continue;
1905
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001906 down_write(&snap->lock);
1907
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001908 /* Only deal with valid and active snapshots */
1909 if (!snap->valid || !snap->active)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001910 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
Alasdair G Kergond5e404c2005-07-12 15:53:05 -07001912 /* Nothing to do if writing beyond end of snapshot */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00001913 if (sector >= dm_table_get_size(snap->ti->table))
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001914 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916 /*
1917 * Remember, different snapshots can have
1918 * different chunk sizes.
1919 */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00001920 chunk = sector_to_chunk(snap->store, sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
1922 /*
1923 * Check exception table to see if block
1924 * is already remapped in this snapshot
1925 * and trigger an exception if not.
1926 */
Jon Brassow3510cb92009-12-10 23:52:11 +00001927 e = dm_lookup_exception(&snap->complete, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001928 if (e)
1929 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
Mikulas Patocka29138082009-04-02 19:55:25 +01001931 pe = __lookup_pending_exception(snap, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001932 if (!pe) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001933 up_write(&snap->lock);
1934 pe = alloc_pending_exception(snap);
1935 down_write(&snap->lock);
1936
1937 if (!snap->valid) {
1938 free_pending_exception(pe);
1939 goto next_snapshot;
1940 }
1941
Jon Brassow3510cb92009-12-10 23:52:11 +00001942 e = dm_lookup_exception(&snap->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01001943 if (e) {
1944 free_pending_exception(pe);
1945 goto next_snapshot;
1946 }
1947
Mikulas Patockac6621392009-04-02 19:55:25 +01001948 pe = __find_pending_exception(snap, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001949 if (!pe) {
1950 __invalidate_snapshot(snap, -ENOMEM);
1951 goto next_snapshot;
1952 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 }
1954
Mikulas Patocka515ad662009-12-10 23:52:30 +00001955 r = DM_MAPIO_SUBMITTED;
1956
1957 /*
1958 * If an origin bio was supplied, queue it to wait for the
1959 * completion of this exception, and start this one last,
1960 * at the end of the function.
1961 */
1962 if (bio) {
1963 bio_list_add(&pe->origin_bios, bio);
1964 bio = NULL;
1965
1966 if (!pe->started) {
1967 pe->started = 1;
1968 pe_to_start_last = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001969 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001970 }
1971
1972 if (!pe->started) {
1973 pe->started = 1;
Mikulas Patocka515ad662009-12-10 23:52:30 +00001974 pe_to_start_now = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001975 }
1976
1977 next_snapshot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 up_write(&snap->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Mikulas Patocka515ad662009-12-10 23:52:30 +00001980 if (pe_to_start_now) {
1981 start_copy(pe_to_start_now);
1982 pe_to_start_now = NULL;
1983 }
Alasdair G Kergonb4b610f2006-03-27 01:17:44 -08001984 }
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 /*
Mikulas Patocka515ad662009-12-10 23:52:30 +00001987 * Submit the exception against which the bio is queued last,
1988 * to give the other exceptions a head start.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 */
Mikulas Patocka515ad662009-12-10 23:52:30 +00001990 if (pe_to_start_last)
1991 start_copy(pe_to_start_last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
1993 return r;
1994}
1995
1996/*
1997 * Called on a write from the origin driver.
1998 */
1999static int do_origin(struct dm_dev *origin, struct bio *bio)
2000{
2001 struct origin *o;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08002002 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
2004 down_read(&_origins_lock);
2005 o = __lookup_origin(origin->bdev);
2006 if (o)
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002007 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 up_read(&_origins_lock);
2009
2010 return r;
2011}
2012
2013/*
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002014 * Trigger exceptions in all non-merging snapshots.
2015 *
2016 * The chunk size of the merging snapshot may be larger than the chunk
2017 * size of some other snapshot so we may need to reallocate multiple
2018 * chunks in other snapshots.
2019 *
2020 * We scan all the overlapping exceptions in the other snapshots.
2021 * Returns 1 if anything was reallocated and must be waited for,
2022 * otherwise returns 0.
2023 *
2024 * size must be a multiple of merging_snap's chunk_size.
2025 */
2026static int origin_write_extent(struct dm_snapshot *merging_snap,
2027 sector_t sector, unsigned size)
2028{
2029 int must_wait = 0;
2030 sector_t n;
2031 struct origin *o;
2032
2033 /*
2034 * The origin's __minimum_chunk_size() got stored in split_io
2035 * by snapshot_merge_resume().
2036 */
2037 down_read(&_origins_lock);
2038 o = __lookup_origin(merging_snap->origin->bdev);
2039 for (n = 0; n < size; n += merging_snap->ti->split_io)
2040 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2041 DM_MAPIO_SUBMITTED)
2042 must_wait = 1;
2043 up_read(&_origins_lock);
2044
2045 return must_wait;
2046}
2047
2048/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 * Origin: maps a linear range of a device, with hooks for snapshotting.
2050 */
2051
2052/*
2053 * Construct an origin mapping: <dev_path>
2054 * The context for an origin is merely a 'struct dm_dev *'
2055 * pointing to the real device.
2056 */
2057static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2058{
2059 int r;
2060 struct dm_dev *dev;
2061
2062 if (argc != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002063 ti->error = "origin: incorrect number of arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 return -EINVAL;
2065 }
2066
2067 r = dm_get_device(ti, argv[0], 0, ti->len,
2068 dm_table_get_mode(ti->table), &dev);
2069 if (r) {
2070 ti->error = "Cannot get target device";
2071 return r;
2072 }
2073
2074 ti->private = dev;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002075 ti->num_flush_requests = 1;
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 return 0;
2078}
2079
2080static void origin_dtr(struct dm_target *ti)
2081{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002082 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 dm_put_device(ti, dev);
2084}
2085
2086static int origin_map(struct dm_target *ti, struct bio *bio,
2087 union map_info *map_context)
2088{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002089 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 bio->bi_bdev = dev->bdev;
2091
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002092 if (unlikely(bio_empty_barrier(bio)))
2093 return DM_MAPIO_REMAPPED;
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 /* Only tell snapshots if this is a write */
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08002096 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097}
2098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099/*
2100 * Set the target "split_io" field to the minimum of all the snapshots'
2101 * chunk sizes.
2102 */
2103static void origin_resume(struct dm_target *ti)
2104{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002105 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002107 ti->split_io = get_origin_minimum_chunksize(dev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108}
2109
2110static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2111 unsigned int maxlen)
2112{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002113 struct dm_dev *dev = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115 switch (type) {
2116 case STATUSTYPE_INFO:
2117 result[0] = '\0';
2118 break;
2119
2120 case STATUSTYPE_TABLE:
2121 snprintf(result, maxlen, "%s", dev->name);
2122 break;
2123 }
2124
2125 return 0;
2126}
2127
Mike Snitzer8811f462009-09-04 20:40:19 +01002128static int origin_iterate_devices(struct dm_target *ti,
2129 iterate_devices_callout_fn fn, void *data)
2130{
2131 struct dm_dev *dev = ti->private;
2132
2133 return fn(ti, dev, 0, ti->len, data);
2134}
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136static struct target_type origin_target = {
2137 .name = "snapshot-origin",
Mike Snitzer8811f462009-09-04 20:40:19 +01002138 .version = {1, 7, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 .module = THIS_MODULE,
2140 .ctr = origin_ctr,
2141 .dtr = origin_dtr,
2142 .map = origin_map,
2143 .resume = origin_resume,
2144 .status = origin_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002145 .iterate_devices = origin_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146};
2147
2148static struct target_type snapshot_target = {
2149 .name = "snapshot",
Mike Snitzerc26655c2009-12-10 23:52:12 +00002150 .version = {1, 9, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 .module = THIS_MODULE,
2152 .ctr = snapshot_ctr,
2153 .dtr = snapshot_dtr,
2154 .map = snapshot_map,
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002155 .end_io = snapshot_end_io,
Mike Snitzerc26655c2009-12-10 23:52:12 +00002156 .postsuspend = snapshot_postsuspend,
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002157 .preresume = snapshot_preresume,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 .resume = snapshot_resume,
2159 .status = snapshot_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002160 .iterate_devices = snapshot_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161};
2162
Mikulas Patockad698aa42009-12-10 23:52:30 +00002163static struct target_type merge_target = {
2164 .name = dm_snapshot_merge_target_name,
2165 .version = {1, 0, 0},
2166 .module = THIS_MODULE,
2167 .ctr = snapshot_ctr,
2168 .dtr = snapshot_dtr,
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002169 .map = snapshot_merge_map,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002170 .end_io = snapshot_end_io,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002171 .presuspend = snapshot_merge_presuspend,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002172 .postsuspend = snapshot_postsuspend,
2173 .preresume = snapshot_preresume,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002174 .resume = snapshot_merge_resume,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002175 .status = snapshot_status,
2176 .iterate_devices = snapshot_iterate_devices,
2177};
2178
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179static int __init dm_snapshot_init(void)
2180{
2181 int r;
2182
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002183 r = dm_exception_store_init();
2184 if (r) {
2185 DMERR("Failed to initialize exception stores");
2186 return r;
2187 }
2188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 r = dm_register_target(&snapshot_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002190 if (r < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 DMERR("snapshot target register failed %d", r);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002192 goto bad_register_snapshot_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 }
2194
2195 r = dm_register_target(&origin_target);
2196 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002197 DMERR("Origin target register failed %d", r);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002198 goto bad_register_origin_target;
2199 }
2200
2201 r = dm_register_target(&merge_target);
2202 if (r < 0) {
2203 DMERR("Merge target register failed %d", r);
2204 goto bad_register_merge_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 }
2206
2207 r = init_origin_hash();
2208 if (r) {
2209 DMERR("init_origin_hash failed.");
Mikulas Patockad698aa42009-12-10 23:52:30 +00002210 goto bad_origin_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 }
2212
Jon Brassow1d4989c2009-12-10 23:52:10 +00002213 exception_cache = KMEM_CACHE(dm_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 if (!exception_cache) {
2215 DMERR("Couldn't create exception cache.");
2216 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002217 goto bad_exception_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 }
2219
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002220 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 if (!pending_cache) {
2222 DMERR("Couldn't create pending cache.");
2223 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002224 goto bad_pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 }
2226
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002227 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2228 if (!tracked_chunk_cache) {
2229 DMERR("Couldn't create cache to track chunks in use.");
2230 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002231 goto bad_tracked_chunk_cache;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002232 }
2233
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07002234 ksnapd = create_singlethread_workqueue("ksnapd");
2235 if (!ksnapd) {
2236 DMERR("Failed to create ksnapd workqueue.");
2237 r = -ENOMEM;
Mikulas Patocka92e86812008-07-21 12:00:35 +01002238 goto bad_pending_pool;
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07002239 }
2240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 return 0;
2242
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002243bad_pending_pool:
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002244 kmem_cache_destroy(tracked_chunk_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002245bad_tracked_chunk_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 kmem_cache_destroy(pending_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002247bad_pending_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 kmem_cache_destroy(exception_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002249bad_exception_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 exit_origin_hash();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002251bad_origin_hash:
2252 dm_unregister_target(&merge_target);
2253bad_register_merge_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002255bad_register_origin_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 dm_unregister_target(&snapshot_target);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002257bad_register_snapshot_target:
2258 dm_exception_store_exit();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 return r;
2261}
2262
2263static void __exit dm_snapshot_exit(void)
2264{
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07002265 destroy_workqueue(ksnapd);
2266
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002267 dm_unregister_target(&snapshot_target);
2268 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002269 dm_unregister_target(&merge_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
2271 exit_origin_hash();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 kmem_cache_destroy(pending_cache);
2273 kmem_cache_destroy(exception_cache);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002274 kmem_cache_destroy(tracked_chunk_cache);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002275
2276 dm_exception_store_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277}
2278
2279/* Module hooks */
2280module_init(dm_snapshot_init);
2281module_exit(dm_snapshot_exit);
2282
2283MODULE_DESCRIPTION(DM_NAME " snapshot target");
2284MODULE_AUTHOR("Joe Thornber");
2285MODULE_LICENSE("GPL");