blob: 973e63d530ae94111739ee853515e013df0b5ab8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9#include "dm-bio-list.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/blkpg.h>
15#include <linux/bio.h>
16#include <linux/buffer_head.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080020#include <linux/hdreg.h>
Jens Axboe2056a782006-03-23 20:00:26 +010021#include <linux/blktrace_api.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23static const char *_name = DM_NAME;
24
25static unsigned int major = 0;
26static unsigned int _major = 0;
27
28/*
29 * One of these is allocated per bio.
30 */
31struct dm_io {
32 struct mapped_device *md;
33 int error;
34 struct bio *bio;
35 atomic_t io_count;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080036 unsigned long start_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037};
38
39/*
40 * One of these is allocated per target within a bio. Hopefully
41 * this will be simplified out one day.
42 */
43struct target_io {
44 struct dm_io *io;
45 struct dm_target *ti;
46 union map_info info;
47};
48
49union map_info *dm_get_mapinfo(struct bio *bio)
50{
51 if (bio && bio->bi_private)
52 return &((struct target_io *)bio->bi_private)->info;
53 return NULL;
54}
55
56/*
57 * Bits for the md->flags field.
58 */
59#define DMF_BLOCK_IO 0
60#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -080061#define DMF_FROZEN 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -070064 struct rw_semaphore io_lock;
65 struct semaphore suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 rwlock_t map_lock;
67 atomic_t holders;
68
69 unsigned long flags;
70
71 request_queue_t *queue;
72 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -080073 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 void *interface_ptr;
76
77 /*
78 * A list of ios that arrived while we were suspended.
79 */
80 atomic_t pending;
81 wait_queue_head_t wait;
82 struct bio_list deferred;
83
84 /*
85 * The current mapping.
86 */
87 struct dm_table *map;
88
89 /*
90 * io objects are allocated from here.
91 */
92 mempool_t *io_pool;
93 mempool_t *tio_pool;
94
95 /*
96 * Event handling.
97 */
98 atomic_t event_nr;
99 wait_queue_head_t eventq;
100
101 /*
102 * freeze/thaw support require holding onto a super block
103 */
104 struct super_block *frozen_sb;
Alasdair G Kergone39e2e92006-01-06 00:20:05 -0800105 struct block_device *suspended_bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800106
107 /* forced geometry settings */
108 struct hd_geometry geometry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109};
110
111#define MIN_IOS 256
112static kmem_cache_t *_io_cache;
113static kmem_cache_t *_tio_cache;
114
115static struct bio_set *dm_set;
116
117static int __init local_init(void)
118{
119 int r;
120
121 dm_set = bioset_create(16, 16, 4);
122 if (!dm_set)
123 return -ENOMEM;
124
125 /* allocate a slab for the dm_ios */
126 _io_cache = kmem_cache_create("dm_io",
127 sizeof(struct dm_io), 0, 0, NULL, NULL);
128 if (!_io_cache)
129 return -ENOMEM;
130
131 /* allocate a slab for the target ios */
132 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
133 0, 0, NULL, NULL);
134 if (!_tio_cache) {
135 kmem_cache_destroy(_io_cache);
136 return -ENOMEM;
137 }
138
139 _major = major;
140 r = register_blkdev(_major, _name);
141 if (r < 0) {
142 kmem_cache_destroy(_tio_cache);
143 kmem_cache_destroy(_io_cache);
144 return r;
145 }
146
147 if (!_major)
148 _major = r;
149
150 return 0;
151}
152
153static void local_exit(void)
154{
155 kmem_cache_destroy(_tio_cache);
156 kmem_cache_destroy(_io_cache);
157
158 bioset_free(dm_set);
159
160 if (unregister_blkdev(_major, _name) < 0)
161 DMERR("devfs_unregister_blkdev failed");
162
163 _major = 0;
164
165 DMINFO("cleaned up");
166}
167
168int (*_inits[])(void) __initdata = {
169 local_init,
170 dm_target_init,
171 dm_linear_init,
172 dm_stripe_init,
173 dm_interface_init,
174};
175
176void (*_exits[])(void) = {
177 local_exit,
178 dm_target_exit,
179 dm_linear_exit,
180 dm_stripe_exit,
181 dm_interface_exit,
182};
183
184static int __init dm_init(void)
185{
186 const int count = ARRAY_SIZE(_inits);
187
188 int r, i;
189
190 for (i = 0; i < count; i++) {
191 r = _inits[i]();
192 if (r)
193 goto bad;
194 }
195
196 return 0;
197
198 bad:
199 while (i--)
200 _exits[i]();
201
202 return r;
203}
204
205static void __exit dm_exit(void)
206{
207 int i = ARRAY_SIZE(_exits);
208
209 while (i--)
210 _exits[i]();
211}
212
213/*
214 * Block device functions
215 */
216static int dm_blk_open(struct inode *inode, struct file *file)
217{
218 struct mapped_device *md;
219
220 md = inode->i_bdev->bd_disk->private_data;
221 dm_get(md);
222 return 0;
223}
224
225static int dm_blk_close(struct inode *inode, struct file *file)
226{
227 struct mapped_device *md;
228
229 md = inode->i_bdev->bd_disk->private_data;
230 dm_put(md);
231 return 0;
232}
233
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800234static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
235{
236 struct mapped_device *md = bdev->bd_disk->private_data;
237
238 return dm_get_geometry(md, geo);
239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241static inline struct dm_io *alloc_io(struct mapped_device *md)
242{
243 return mempool_alloc(md->io_pool, GFP_NOIO);
244}
245
246static inline void free_io(struct mapped_device *md, struct dm_io *io)
247{
248 mempool_free(io, md->io_pool);
249}
250
251static inline struct target_io *alloc_tio(struct mapped_device *md)
252{
253 return mempool_alloc(md->tio_pool, GFP_NOIO);
254}
255
256static inline void free_tio(struct mapped_device *md, struct target_io *tio)
257{
258 mempool_free(tio, md->tio_pool);
259}
260
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800261static void start_io_acct(struct dm_io *io)
262{
263 struct mapped_device *md = io->md;
264
265 io->start_time = jiffies;
266
267 preempt_disable();
268 disk_round_stats(dm_disk(md));
269 preempt_enable();
270 dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
271}
272
273static int end_io_acct(struct dm_io *io)
274{
275 struct mapped_device *md = io->md;
276 struct bio *bio = io->bio;
277 unsigned long duration = jiffies - io->start_time;
278 int pending;
279 int rw = bio_data_dir(bio);
280
281 preempt_disable();
282 disk_round_stats(dm_disk(md));
283 preempt_enable();
284 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
285
286 disk_stat_add(dm_disk(md), ticks[rw], duration);
287
288 return !pending;
289}
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291/*
292 * Add the bio to the list of deferred io.
293 */
294static int queue_io(struct mapped_device *md, struct bio *bio)
295{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700296 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700299 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 return 1;
301 }
302
303 bio_list_add(&md->deferred, bio);
304
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700305 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 return 0; /* deferred successfully */
307}
308
309/*
310 * Everyone (including functions in this file), should use this
311 * function to access the md->map field, and make sure they call
312 * dm_table_put() when finished.
313 */
314struct dm_table *dm_get_table(struct mapped_device *md)
315{
316 struct dm_table *t;
317
318 read_lock(&md->map_lock);
319 t = md->map;
320 if (t)
321 dm_table_get(t);
322 read_unlock(&md->map_lock);
323
324 return t;
325}
326
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800327/*
328 * Get the geometry associated with a dm device
329 */
330int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
331{
332 *geo = md->geometry;
333
334 return 0;
335}
336
337/*
338 * Set the geometry of a device.
339 */
340int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
341{
342 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
343
344 if (geo->start > sz) {
345 DMWARN("Start sector is beyond the geometry limits.");
346 return -EINVAL;
347 }
348
349 md->geometry = *geo;
350
351 return 0;
352}
353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354/*-----------------------------------------------------------------
355 * CRUD START:
356 * A more elegant soln is in the works that uses the queue
357 * merge fn, unfortunately there are a couple of changes to
358 * the block layer that I want to make for this. So in the
359 * interests of getting something for people to use I give
360 * you this clearly demarcated crap.
361 *---------------------------------------------------------------*/
362
363/*
364 * Decrements the number of outstanding ios that a bio has been
365 * cloned into, completing the original io if necc.
366 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800367static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 if (error)
370 io->error = error;
371
372 if (atomic_dec_and_test(&io->io_count)) {
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800373 if (end_io_acct(io))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 /* nudge anyone waiting on suspend queue */
375 wake_up(&io->md->wait);
376
Jens Axboe2056a782006-03-23 20:00:26 +0100377 blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 bio_endio(io->bio, io->bio->bi_size, io->error);
380 free_io(io->md, io);
381 }
382}
383
384static int clone_endio(struct bio *bio, unsigned int done, int error)
385{
386 int r = 0;
387 struct target_io *tio = bio->bi_private;
388 struct dm_io *io = tio->io;
389 dm_endio_fn endio = tio->ti->type->end_io;
390
391 if (bio->bi_size)
392 return 1;
393
394 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
395 error = -EIO;
396
397 if (endio) {
398 r = endio(tio->ti, bio, error, &tio->info);
399 if (r < 0)
400 error = r;
401
402 else if (r > 0)
403 /* the target wants another shot at the io */
404 return 1;
405 }
406
407 free_tio(io->md, tio);
408 dec_pending(io, error);
409 bio_put(bio);
410 return r;
411}
412
413static sector_t max_io_len(struct mapped_device *md,
414 sector_t sector, struct dm_target *ti)
415{
416 sector_t offset = sector - ti->begin;
417 sector_t len = ti->len - offset;
418
419 /*
420 * Does the target need to split even further ?
421 */
422 if (ti->split_io) {
423 sector_t boundary;
424 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
425 - offset;
426 if (len > boundary)
427 len = boundary;
428 }
429
430 return len;
431}
432
433static void __map_bio(struct dm_target *ti, struct bio *clone,
434 struct target_io *tio)
435{
436 int r;
Jens Axboe2056a782006-03-23 20:00:26 +0100437 sector_t sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
439 /*
440 * Sanity checks.
441 */
442 BUG_ON(!clone->bi_size);
443
444 clone->bi_end_io = clone_endio;
445 clone->bi_private = tio;
446
447 /*
448 * Map the clone. If r == 0 we don't need to do
449 * anything, the target has assumed ownership of
450 * this io.
451 */
452 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +0100453 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 r = ti->type->map(ti, clone, &tio->info);
Jens Axboe2056a782006-03-23 20:00:26 +0100455 if (r > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +0100457
458 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
459 tio->io->bio->bi_bdev->bd_dev, sector,
460 clone->bi_sector);
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 generic_make_request(clone);
Jens Axboe2056a782006-03-23 20:00:26 +0100463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 else if (r < 0) {
466 /* error the io and bail out */
467 struct dm_io *io = tio->io;
468 free_tio(tio->io->md, tio);
Alasdair G Kergonf6a80ea2005-07-12 15:53:01 -0700469 dec_pending(io, r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 bio_put(clone);
471 }
472}
473
474struct clone_info {
475 struct mapped_device *md;
476 struct dm_table *map;
477 struct bio *bio;
478 struct dm_io *io;
479 sector_t sector;
480 sector_t sector_count;
481 unsigned short idx;
482};
483
Peter Osterlund36763472005-09-06 15:16:42 -0700484static void dm_bio_destructor(struct bio *bio)
485{
486 bio_free(bio, dm_set);
487}
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489/*
490 * Creates a little bio that is just does part of a bvec.
491 */
492static struct bio *split_bvec(struct bio *bio, sector_t sector,
493 unsigned short idx, unsigned int offset,
494 unsigned int len)
495{
496 struct bio *clone;
497 struct bio_vec *bv = bio->bi_io_vec + idx;
498
499 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set);
Peter Osterlund36763472005-09-06 15:16:42 -0700500 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 *clone->bi_io_vec = *bv;
502
503 clone->bi_sector = sector;
504 clone->bi_bdev = bio->bi_bdev;
505 clone->bi_rw = bio->bi_rw;
506 clone->bi_vcnt = 1;
507 clone->bi_size = to_bytes(len);
508 clone->bi_io_vec->bv_offset = offset;
509 clone->bi_io_vec->bv_len = clone->bi_size;
510
511 return clone;
512}
513
514/*
515 * Creates a bio that consists of range of complete bvecs.
516 */
517static struct bio *clone_bio(struct bio *bio, sector_t sector,
518 unsigned short idx, unsigned short bv_count,
519 unsigned int len)
520{
521 struct bio *clone;
522
523 clone = bio_clone(bio, GFP_NOIO);
524 clone->bi_sector = sector;
525 clone->bi_idx = idx;
526 clone->bi_vcnt = idx + bv_count;
527 clone->bi_size = to_bytes(len);
528 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
529
530 return clone;
531}
532
533static void __clone_and_map(struct clone_info *ci)
534{
535 struct bio *clone, *bio = ci->bio;
536 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
537 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
538 struct target_io *tio;
539
540 /*
541 * Allocate a target io object.
542 */
543 tio = alloc_tio(ci->md);
544 tio->io = ci->io;
545 tio->ti = ti;
546 memset(&tio->info, 0, sizeof(tio->info));
547
548 if (ci->sector_count <= max) {
549 /*
550 * Optimise for the simple case where we can do all of
551 * the remaining io with a single clone.
552 */
553 clone = clone_bio(bio, ci->sector, ci->idx,
554 bio->bi_vcnt - ci->idx, ci->sector_count);
555 __map_bio(ti, clone, tio);
556 ci->sector_count = 0;
557
558 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
559 /*
560 * There are some bvecs that don't span targets.
561 * Do as many of these as possible.
562 */
563 int i;
564 sector_t remaining = max;
565 sector_t bv_len;
566
567 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
568 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
569
570 if (bv_len > remaining)
571 break;
572
573 remaining -= bv_len;
574 len += bv_len;
575 }
576
577 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
578 __map_bio(ti, clone, tio);
579
580 ci->sector += len;
581 ci->sector_count -= len;
582 ci->idx = i;
583
584 } else {
585 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800586 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 */
588 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800589 sector_t remaining = to_sector(bv->bv_len);
590 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800592 do {
593 if (offset) {
594 ti = dm_table_find_target(ci->map, ci->sector);
595 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800597 tio = alloc_tio(ci->md);
598 tio->io = ci->io;
599 tio->ti = ti;
600 memset(&tio->info, 0, sizeof(tio->info));
601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800603 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800605 clone = split_bvec(bio, ci->sector, ci->idx,
606 bv->bv_offset + offset, len);
607
608 __map_bio(ti, clone, tio);
609
610 ci->sector += len;
611 ci->sector_count -= len;
612 offset += to_bytes(len);
613 } while (remaining -= len);
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 ci->idx++;
616 }
617}
618
619/*
620 * Split the bio into several clones.
621 */
622static void __split_bio(struct mapped_device *md, struct bio *bio)
623{
624 struct clone_info ci;
625
626 ci.map = dm_get_table(md);
627 if (!ci.map) {
628 bio_io_error(bio, bio->bi_size);
629 return;
630 }
631
632 ci.md = md;
633 ci.bio = bio;
634 ci.io = alloc_io(md);
635 ci.io->error = 0;
636 atomic_set(&ci.io->io_count, 1);
637 ci.io->bio = bio;
638 ci.io->md = md;
639 ci.sector = bio->bi_sector;
640 ci.sector_count = bio_sectors(bio);
641 ci.idx = bio->bi_idx;
642
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800643 start_io_acct(ci.io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 while (ci.sector_count)
645 __clone_and_map(&ci);
646
647 /* drop the extra reference count */
648 dec_pending(ci.io, 0);
649 dm_table_put(ci.map);
650}
651/*-----------------------------------------------------------------
652 * CRUD END
653 *---------------------------------------------------------------*/
654
655/*
656 * The request function that just remaps the bio built up by
657 * dm_merge_bvec.
658 */
659static int dm_request(request_queue_t *q, struct bio *bio)
660{
661 int r;
Kevin Corry12f03a42006-02-01 03:04:52 -0800662 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 struct mapped_device *md = q->queuedata;
664
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700665 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Kevin Corry12f03a42006-02-01 03:04:52 -0800667 disk_stat_inc(dm_disk(md), ios[rw]);
668 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 /*
671 * If we're suspended we have to queue
672 * this io for later.
673 */
674 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700675 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677 if (bio_rw(bio) == READA) {
678 bio_io_error(bio, bio->bi_size);
679 return 0;
680 }
681
682 r = queue_io(md, bio);
683 if (r < 0) {
684 bio_io_error(bio, bio->bi_size);
685 return 0;
686
687 } else if (r == 0)
688 return 0; /* deferred successfully */
689
690 /*
691 * We're in a while loop, because someone could suspend
692 * before we get to the following read lock.
693 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700694 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 }
696
697 __split_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700698 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 return 0;
700}
701
702static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
703 sector_t *error_sector)
704{
705 struct mapped_device *md = q->queuedata;
706 struct dm_table *map = dm_get_table(md);
707 int ret = -ENXIO;
708
709 if (map) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700710 ret = dm_table_flush_all(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 dm_table_put(map);
712 }
713
714 return ret;
715}
716
717static void dm_unplug_all(request_queue_t *q)
718{
719 struct mapped_device *md = q->queuedata;
720 struct dm_table *map = dm_get_table(md);
721
722 if (map) {
723 dm_table_unplug_all(map);
724 dm_table_put(map);
725 }
726}
727
728static int dm_any_congested(void *congested_data, int bdi_bits)
729{
730 int r;
731 struct mapped_device *md = (struct mapped_device *) congested_data;
732 struct dm_table *map = dm_get_table(md);
733
734 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
735 r = bdi_bits;
736 else
737 r = dm_table_any_congested(map, bdi_bits);
738
739 dm_table_put(map);
740 return r;
741}
742
743/*-----------------------------------------------------------------
744 * An IDR is used to keep track of allocated minor numbers.
745 *---------------------------------------------------------------*/
746static DECLARE_MUTEX(_minor_lock);
747static DEFINE_IDR(_minor_idr);
748
749static void free_minor(unsigned int minor)
750{
751 down(&_minor_lock);
752 idr_remove(&_minor_idr, minor);
753 up(&_minor_lock);
754}
755
756/*
757 * See if the device with a specific minor # is free.
758 */
759static int specific_minor(struct mapped_device *md, unsigned int minor)
760{
761 int r, m;
762
763 if (minor >= (1 << MINORBITS))
764 return -EINVAL;
765
766 down(&_minor_lock);
767
768 if (idr_find(&_minor_idr, minor)) {
769 r = -EBUSY;
770 goto out;
771 }
772
773 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
774 if (!r) {
775 r = -ENOMEM;
776 goto out;
777 }
778
779 r = idr_get_new_above(&_minor_idr, md, minor, &m);
780 if (r) {
781 goto out;
782 }
783
784 if (m != minor) {
785 idr_remove(&_minor_idr, m);
786 r = -EBUSY;
787 goto out;
788 }
789
790out:
791 up(&_minor_lock);
792 return r;
793}
794
795static int next_free_minor(struct mapped_device *md, unsigned int *minor)
796{
797 int r;
798 unsigned int m;
799
800 down(&_minor_lock);
801
802 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
803 if (!r) {
804 r = -ENOMEM;
805 goto out;
806 }
807
808 r = idr_get_new(&_minor_idr, md, &m);
809 if (r) {
810 goto out;
811 }
812
813 if (m >= (1 << MINORBITS)) {
814 idr_remove(&_minor_idr, m);
815 r = -ENOSPC;
816 goto out;
817 }
818
819 *minor = m;
820
821out:
822 up(&_minor_lock);
823 return r;
824}
825
826static struct block_device_operations dm_blk_dops;
827
828/*
829 * Allocate and initialise a blank device with a given minor.
830 */
831static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
832{
833 int r;
834 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
835
836 if (!md) {
837 DMWARN("unable to allocate device, out of memory.");
838 return NULL;
839 }
840
841 /* get a minor number for the dev */
842 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
843 if (r < 0)
844 goto bad1;
845
846 memset(md, 0, sizeof(*md));
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700847 init_rwsem(&md->io_lock);
848 init_MUTEX(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 rwlock_init(&md->map_lock);
850 atomic_set(&md->holders, 1);
851 atomic_set(&md->event_nr, 0);
852
853 md->queue = blk_alloc_queue(GFP_KERNEL);
854 if (!md->queue)
855 goto bad1;
856
857 md->queue->queuedata = md;
858 md->queue->backing_dev_info.congested_fn = dm_any_congested;
859 md->queue->backing_dev_info.congested_data = md;
860 blk_queue_make_request(md->queue, dm_request);
Jens Axboedaef2652006-01-10 10:48:02 +0100861 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 md->queue->unplug_fn = dm_unplug_all;
863 md->queue->issue_flush_fn = dm_flush_all;
864
Matthew Dobson93d23412006-03-26 01:37:50 -0800865 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (!md->io_pool)
867 goto bad2;
868
Matthew Dobson93d23412006-03-26 01:37:50 -0800869 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 if (!md->tio_pool)
871 goto bad3;
872
873 md->disk = alloc_disk(1);
874 if (!md->disk)
875 goto bad4;
876
877 md->disk->major = _major;
878 md->disk->first_minor = minor;
879 md->disk->fops = &dm_blk_dops;
880 md->disk->queue = md->queue;
881 md->disk->private_data = md;
882 sprintf(md->disk->disk_name, "dm-%d", minor);
883 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -0800884 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886 atomic_set(&md->pending, 0);
887 init_waitqueue_head(&md->wait);
888 init_waitqueue_head(&md->eventq);
889
890 return md;
891
892 bad4:
893 mempool_destroy(md->tio_pool);
894 bad3:
895 mempool_destroy(md->io_pool);
896 bad2:
Al Viro1312f402006-03-12 11:02:03 -0500897 blk_cleanup_queue(md->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 free_minor(minor);
899 bad1:
900 kfree(md);
901 return NULL;
902}
903
904static void free_dev(struct mapped_device *md)
905{
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -0800906 unsigned int minor = md->disk->first_minor;
907
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -0800908 if (md->suspended_bdev) {
909 thaw_bdev(md->suspended_bdev, NULL);
910 bdput(md->suspended_bdev);
911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 mempool_destroy(md->tio_pool);
913 mempool_destroy(md->io_pool);
914 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -0800915 free_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -0500917 blk_cleanup_queue(md->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 kfree(md);
919}
920
921/*
922 * Bind a table to the device.
923 */
924static void event_callback(void *context)
925{
926 struct mapped_device *md = (struct mapped_device *) context;
927
928 atomic_inc(&md->event_nr);
929 wake_up(&md->eventq);
930}
931
Alasdair G Kergon4e901882005-07-28 21:15:59 -0700932static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933{
Alasdair G Kergon4e901882005-07-28 21:15:59 -0700934 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800936 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -0800937 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800938 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939}
940
941static int __bind(struct mapped_device *md, struct dm_table *t)
942{
943 request_queue_t *q = md->queue;
944 sector_t size;
945
946 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800947
948 /*
949 * Wipe any geometry if the size of the table changed.
950 */
951 if (size != get_capacity(md->disk))
952 memset(&md->geometry, 0, sizeof(md->geometry));
953
Alasdair G Kergon4e901882005-07-28 21:15:59 -0700954 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 if (size == 0)
956 return 0;
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 dm_table_get(t);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700959 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700960
961 write_lock(&md->map_lock);
962 md->map = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 dm_table_set_restrictions(t, q);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700964 write_unlock(&md->map_lock);
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 return 0;
967}
968
969static void __unbind(struct mapped_device *md)
970{
971 struct dm_table *map = md->map;
972
973 if (!map)
974 return;
975
976 dm_table_event_callback(map, NULL, NULL);
977 write_lock(&md->map_lock);
978 md->map = NULL;
979 write_unlock(&md->map_lock);
980 dm_table_put(map);
981}
982
983/*
984 * Constructor for a new device.
985 */
986static int create_aux(unsigned int minor, int persistent,
987 struct mapped_device **result)
988{
989 struct mapped_device *md;
990
991 md = alloc_dev(minor, persistent);
992 if (!md)
993 return -ENXIO;
994
995 *result = md;
996 return 0;
997}
998
999int dm_create(struct mapped_device **result)
1000{
1001 return create_aux(0, 0, result);
1002}
1003
1004int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
1005{
1006 return create_aux(minor, 1, result);
1007}
1008
David Teigland637842c2006-01-06 00:20:00 -08001009static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010{
1011 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 unsigned minor = MINOR(dev);
1013
1014 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1015 return NULL;
1016
1017 down(&_minor_lock);
1018
1019 md = idr_find(&_minor_idr, minor);
David Teigland637842c2006-01-06 00:20:00 -08001020 if (!md || (dm_disk(md)->first_minor != minor))
1021 md = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 up(&_minor_lock);
1024
David Teigland637842c2006-01-06 00:20:00 -08001025 return md;
1026}
1027
David Teiglandd229a952006-01-06 00:20:01 -08001028struct mapped_device *dm_get_md(dev_t dev)
1029{
1030 struct mapped_device *md = dm_find_md(dev);
1031
1032 if (md)
1033 dm_get(md);
1034
1035 return md;
1036}
1037
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001038void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08001039{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001040 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041}
1042
1043void dm_set_mdptr(struct mapped_device *md, void *ptr)
1044{
1045 md->interface_ptr = ptr;
1046}
1047
1048void dm_get(struct mapped_device *md)
1049{
1050 atomic_inc(&md->holders);
1051}
1052
1053void dm_put(struct mapped_device *md)
1054{
Mike Anderson1134e5a2006-03-27 01:17:54 -08001055 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
1057 if (atomic_dec_and_test(&md->holders)) {
Mike Anderson1134e5a2006-03-27 01:17:54 -08001058 map = dm_get_table(md);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001059 if (!dm_suspended(md)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 dm_table_presuspend_targets(map);
1061 dm_table_postsuspend_targets(map);
1062 }
1063 __unbind(md);
Mike Anderson1134e5a2006-03-27 01:17:54 -08001064 dm_table_put(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 free_dev(md);
1066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067}
1068
1069/*
1070 * Process the deferred bios
1071 */
1072static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
1073{
1074 struct bio *n;
1075
1076 while (c) {
1077 n = c->bi_next;
1078 c->bi_next = NULL;
1079 __split_bio(md, c);
1080 c = n;
1081 }
1082}
1083
1084/*
1085 * Swap in a new table (destroying old one).
1086 */
1087int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1088{
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001089 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001091 down(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
1093 /* device must be suspended */
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001094 if (!dm_suspended(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001095 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 __unbind(md);
1098 r = __bind(md, table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001100out:
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001101 up(&md->suspend_lock);
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001102 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103}
1104
1105/*
1106 * Functions to lock and unlock any filesystem running on the
1107 * device.
1108 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001109static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001111 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112
1113 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001114
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001115 md->frozen_sb = freeze_bdev(md->suspended_bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001116 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001117 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001118 md->frozen_sb = NULL;
1119 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001120 }
1121
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001122 set_bit(DMF_FROZEN, &md->flags);
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 /* don't bdput right now, we don't want the bdev
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001125 * to go away while it is locked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 */
1127 return 0;
1128}
1129
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001130static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001132 if (!test_bit(DMF_FROZEN, &md->flags))
1133 return;
1134
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001135 thaw_bdev(md->suspended_bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001137 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138}
1139
1140/*
1141 * We need to be able to change a mapping table under a mounted
1142 * filesystem. For example we might want to move some data in
1143 * the background. Before the table can be swapped with
1144 * dm_bind_table, dm_suspend must be called to flush any in
1145 * flight bios and ensure that any further io gets deferred.
1146 */
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001147int dm_suspend(struct mapped_device *md, int do_lockfs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001149 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 DECLARE_WAITQUEUE(wait, current);
Jun'ichi Nomura1ecac7f2006-03-27 01:17:51 -08001151 struct bio *def;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001152 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001154 down(&md->suspend_lock);
1155
1156 if (dm_suspended(md))
1157 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159 map = dm_get_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001161 /* This does not get reverted if there's an error later. */
1162 dm_table_presuspend_targets(map);
1163
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001164 md->suspended_bdev = bdget_disk(md->disk, 0);
1165 if (!md->suspended_bdev) {
1166 DMWARN("bdget failed in dm_suspend");
1167 r = -ENOMEM;
1168 goto out;
1169 }
1170
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001171 /* Flush I/O to the device. */
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001172 if (do_lockfs) {
1173 r = lock_fs(md);
1174 if (r)
1175 goto out;
1176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
1178 /*
Alasdair G Kergon354e0072005-05-05 16:16:05 -07001179 * First we set the BLOCK_IO flag so no more ios will be mapped.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001181 down_write(&md->io_lock);
1182 set_bit(DMF_BLOCK_IO, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 add_wait_queue(&md->wait, &wait);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001185 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187 /* unplug */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001188 if (map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 dm_table_unplug_all(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 /*
1192 * Then we wait for the already mapped ios to
1193 * complete.
1194 */
1195 while (1) {
1196 set_current_state(TASK_INTERRUPTIBLE);
1197
1198 if (!atomic_read(&md->pending) || signal_pending(current))
1199 break;
1200
1201 io_schedule();
1202 }
1203 set_current_state(TASK_RUNNING);
1204
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001205 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 remove_wait_queue(&md->wait, &wait);
1207
1208 /* were we interrupted ? */
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001209 r = -EINTR;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001210 if (atomic_read(&md->pending)) {
Jun'ichi Nomura1ecac7f2006-03-27 01:17:51 -08001211 clear_bit(DMF_BLOCK_IO, &md->flags);
1212 def = bio_list_get(&md->deferred);
1213 __flush_deferred_io(md, def);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001214 up_write(&md->io_lock);
1215 unlock_fs(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001216 goto out;
1217 }
1218 up_write(&md->io_lock);
1219
1220 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 set_bit(DMF_SUSPENDED, &md->flags);
1223
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001224 r = 0;
1225
1226out:
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001227 if (r && md->suspended_bdev) {
1228 bdput(md->suspended_bdev);
1229 md->suspended_bdev = NULL;
1230 }
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 dm_table_put(map);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001233 up(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001234 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235}
1236
1237int dm_resume(struct mapped_device *md)
1238{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001239 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 struct bio *def;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001241 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001243 down(&md->suspend_lock);
1244 if (!dm_suspended(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001245 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001246
1247 map = dm_get_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001248 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001249 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 dm_table_resume_targets(map);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001252
1253 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 clear_bit(DMF_BLOCK_IO, &md->flags);
1255
1256 def = bio_list_get(&md->deferred);
1257 __flush_deferred_io(md, def);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001258 up_write(&md->io_lock);
1259
1260 unlock_fs(md);
1261
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001262 bdput(md->suspended_bdev);
1263 md->suspended_bdev = NULL;
1264
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001265 clear_bit(DMF_SUSPENDED, &md->flags);
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 dm_table_unplug_all(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001269 r = 0;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001270
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001271out:
1272 dm_table_put(map);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001273 up(&md->suspend_lock);
1274
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001275 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276}
1277
1278/*-----------------------------------------------------------------
1279 * Event notification.
1280 *---------------------------------------------------------------*/
1281uint32_t dm_get_event_nr(struct mapped_device *md)
1282{
1283 return atomic_read(&md->event_nr);
1284}
1285
1286int dm_wait_event(struct mapped_device *md, int event_nr)
1287{
1288 return wait_event_interruptible(md->eventq,
1289 (event_nr != atomic_read(&md->event_nr)));
1290}
1291
1292/*
1293 * The gendisk is only valid as long as you have a reference
1294 * count on 'md'.
1295 */
1296struct gendisk *dm_disk(struct mapped_device *md)
1297{
1298 return md->disk;
1299}
1300
1301int dm_suspended(struct mapped_device *md)
1302{
1303 return test_bit(DMF_SUSPENDED, &md->flags);
1304}
1305
1306static struct block_device_operations dm_blk_dops = {
1307 .open = dm_blk_open,
1308 .release = dm_blk_close,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001309 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 .owner = THIS_MODULE
1311};
1312
1313EXPORT_SYMBOL(dm_get_mapinfo);
1314
1315/*
1316 * module hooks
1317 */
1318module_init(dm_init);
1319module_exit(dm_exit);
1320
1321module_param(major, uint, 0);
1322MODULE_PARM_DESC(major, "The major number of the device mapper");
1323MODULE_DESCRIPTION(DM_NAME " driver");
1324MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1325MODULE_LICENSE("GPL");