blob: db022e5f391268eb5afc6b07a462c8bc881a49b5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9#include "dm-bio-list.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +010010#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/init.h>
13#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080014#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/moduleparam.h>
16#include <linux/blkpg.h>
17#include <linux/bio.h>
18#include <linux/buffer_head.h>
19#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080022#include <linux/hdreg.h>
Jens Axboe2056a782006-03-23 20:00:26 +010023#include <linux/blktrace_api.h>
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +010024#include <trace/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "core"
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028static const char *_name = DM_NAME;
29
30static unsigned int major = 0;
31static unsigned int _major = 0;
32
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070033static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000035 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * One of these is allocated per bio.
37 */
38struct dm_io {
39 struct mapped_device *md;
40 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010042 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080043 unsigned long start_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044};
45
46/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000047 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * One of these is allocated per target within a bio. Hopefully
49 * this will be simplified out one day.
50 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010051struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 struct dm_io *io;
53 struct dm_target *ti;
54 union map_info info;
55};
56
Ingo Molnar0bfc2452008-11-26 11:59:56 +010057DEFINE_TRACE(block_bio_complete);
58
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000059/*
60 * For request-based dm.
61 * One of these is allocated per request.
62 */
63struct dm_rq_target_io {
64 struct mapped_device *md;
65 struct dm_target *ti;
66 struct request *orig, clone;
67 int error;
68 union map_info info;
69};
70
71/*
72 * For request-based dm.
73 * One of these is allocated per bio.
74 */
75struct dm_rq_clone_bio_info {
76 struct bio *orig;
77 struct request *rq;
78};
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080union map_info *dm_get_mapinfo(struct bio *bio)
81{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070082 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010083 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070084 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -070087#define MINOR_ALLOCED ((void *)-1)
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/*
90 * Bits for the md->flags field.
91 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010092#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -080094#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -070095#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -070096#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -080097#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010098#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Milan Broz304f3f62008-02-08 02:11:17 +0000100/*
101 * Work processed by per-device workqueue.
102 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700104 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000105 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 rwlock_t map_lock;
107 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700108 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 unsigned long flags;
111
Jens Axboe165125e2007-07-24 09:28:11 +0200112 struct request_queue *queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800114 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116 void *interface_ptr;
117
118 /*
119 * A list of ios that arrived while we were suspended.
120 */
121 atomic_t pending;
122 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100123 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800124 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100125 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000128 * Processing queue (flush/barriers)
129 */
130 struct workqueue_struct *wq;
131
132 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 * The current mapping.
134 */
135 struct dm_table *map;
136
137 /*
138 * io objects are allocated from here.
139 */
140 mempool_t *io_pool;
141 mempool_t *tio_pool;
142
Stefan Bader9faf4002006-10-03 01:15:41 -0700143 struct bio_set *bs;
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 /*
146 * Event handling.
147 */
148 atomic_t event_nr;
149 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100150 atomic_t uevent_seq;
151 struct list_head uevent_list;
152 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 /*
155 * freeze/thaw support require holding onto a super block
156 */
157 struct super_block *frozen_sb;
Alasdair G Kergone39e2e92006-01-06 00:20:05 -0800158 struct block_device *suspended_bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800159
160 /* forced geometry settings */
161 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000162
163 /* sysfs handle */
164 struct kobject kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165};
166
167#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800168static struct kmem_cache *_io_cache;
169static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000170static struct kmem_cache *_rq_tio_cache;
171static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173static int __init local_init(void)
174{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100175 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100178 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100180 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100183 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100184 if (!_tio_cache)
185 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000187 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
188 if (!_rq_tio_cache)
189 goto out_free_tio_cache;
190
191 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
192 if (!_rq_bio_info_cache)
193 goto out_free_rq_tio_cache;
194
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100195 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100196 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000197 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 _major = major;
200 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100201 if (r < 0)
202 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 if (!_major)
205 _major = r;
206
207 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100208
209out_uevent_exit:
210 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000211out_free_rq_bio_info_cache:
212 kmem_cache_destroy(_rq_bio_info_cache);
213out_free_rq_tio_cache:
214 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100215out_free_tio_cache:
216 kmem_cache_destroy(_tio_cache);
217out_free_io_cache:
218 kmem_cache_destroy(_io_cache);
219
220 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
223static void local_exit(void)
224{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000225 kmem_cache_destroy(_rq_bio_info_cache);
226 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 kmem_cache_destroy(_tio_cache);
228 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700229 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100230 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 _major = 0;
233
234 DMINFO("cleaned up");
235}
236
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000237static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 local_init,
239 dm_target_init,
240 dm_linear_init,
241 dm_stripe_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100242 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 dm_interface_init,
244};
245
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000246static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 local_exit,
248 dm_target_exit,
249 dm_linear_exit,
250 dm_stripe_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100251 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 dm_interface_exit,
253};
254
255static int __init dm_init(void)
256{
257 const int count = ARRAY_SIZE(_inits);
258
259 int r, i;
260
261 for (i = 0; i < count; i++) {
262 r = _inits[i]();
263 if (r)
264 goto bad;
265 }
266
267 return 0;
268
269 bad:
270 while (i--)
271 _exits[i]();
272
273 return r;
274}
275
276static void __exit dm_exit(void)
277{
278 int i = ARRAY_SIZE(_exits);
279
280 while (i--)
281 _exits[i]();
282}
283
284/*
285 * Block device functions
286 */
Al Virofe5f9f22008-03-02 10:29:31 -0500287static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 struct mapped_device *md;
290
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700291 spin_lock(&_minor_lock);
292
Al Virofe5f9f22008-03-02 10:29:31 -0500293 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700294 if (!md)
295 goto out;
296
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700297 if (test_bit(DMF_FREEING, &md->flags) ||
298 test_bit(DMF_DELETING, &md->flags)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700299 md = NULL;
300 goto out;
301 }
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700304 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700305
306out:
307 spin_unlock(&_minor_lock);
308
309 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
Al Virofe5f9f22008-03-02 10:29:31 -0500312static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
Al Virofe5f9f22008-03-02 10:29:31 -0500314 struct mapped_device *md = disk->private_data;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700315 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 dm_put(md);
317 return 0;
318}
319
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700320int dm_open_count(struct mapped_device *md)
321{
322 return atomic_read(&md->open_count);
323}
324
325/*
326 * Guarantees nothing is using the device before it's deleted.
327 */
328int dm_lock_for_deletion(struct mapped_device *md)
329{
330 int r = 0;
331
332 spin_lock(&_minor_lock);
333
334 if (dm_open_count(md))
335 r = -EBUSY;
336 else
337 set_bit(DMF_DELETING, &md->flags);
338
339 spin_unlock(&_minor_lock);
340
341 return r;
342}
343
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800344static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
345{
346 struct mapped_device *md = bdev->bd_disk->private_data;
347
348 return dm_get_geometry(md, geo);
349}
350
Al Virofe5f9f22008-03-02 10:29:31 -0500351static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700352 unsigned int cmd, unsigned long arg)
353{
Al Virofe5f9f22008-03-02 10:29:31 -0500354 struct mapped_device *md = bdev->bd_disk->private_data;
355 struct dm_table *map = dm_get_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700356 struct dm_target *tgt;
357 int r = -ENOTTY;
358
Milan Brozaa129a22006-10-03 01:15:15 -0700359 if (!map || !dm_table_get_size(map))
360 goto out;
361
362 /* We only support devices that have a single target */
363 if (dm_table_get_num_targets(map) != 1)
364 goto out;
365
366 tgt = dm_table_get_target(map, 0);
367
368 if (dm_suspended(md)) {
369 r = -EAGAIN;
370 goto out;
371 }
372
373 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400374 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700375
376out:
377 dm_table_put(map);
378
Milan Brozaa129a22006-10-03 01:15:15 -0700379 return r;
380}
381
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100382static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383{
384 return mempool_alloc(md->io_pool, GFP_NOIO);
385}
386
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100387static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 mempool_free(io, md->io_pool);
390}
391
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100392static struct dm_target_io *alloc_tio(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
394 return mempool_alloc(md->tio_pool, GFP_NOIO);
395}
396
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100397static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 mempool_free(tio, md->tio_pool);
400}
401
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800402static void start_io_acct(struct dm_io *io)
403{
404 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900405 int cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800406
407 io->start_time = jiffies;
408
Tejun Heo074a7ac2008-08-25 19:56:14 +0900409 cpu = part_stat_lock();
410 part_round_stats(cpu, &dm_disk(md)->part0);
411 part_stat_unlock();
412 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800413}
414
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000415static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800416{
417 struct mapped_device *md = io->md;
418 struct bio *bio = io->bio;
419 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900420 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800421 int rw = bio_data_dir(bio);
422
Tejun Heo074a7ac2008-08-25 19:56:14 +0900423 cpu = part_stat_lock();
424 part_round_stats(cpu, &dm_disk(md)->part0);
425 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
426 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800427
Tejun Heo074a7ac2008-08-25 19:56:14 +0900428 dm_disk(md)->part0.in_flight = pending =
429 atomic_dec_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800430
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000431 /* nudge anyone waiting on suspend queue */
432 if (!pending)
433 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800434}
435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436/*
437 * Add the bio to the list of deferred io.
438 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100439static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700441 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Mikulas Patocka022c2612009-04-02 19:55:39 +0100443 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100445 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Mikulas Patocka92c63902009-04-09 00:27:15 +0100447 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
448 queue_work(md->wq, &md->work);
449
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700450 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451}
452
453/*
454 * Everyone (including functions in this file), should use this
455 * function to access the md->map field, and make sure they call
456 * dm_table_put() when finished.
457 */
458struct dm_table *dm_get_table(struct mapped_device *md)
459{
460 struct dm_table *t;
461
462 read_lock(&md->map_lock);
463 t = md->map;
464 if (t)
465 dm_table_get(t);
466 read_unlock(&md->map_lock);
467
468 return t;
469}
470
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800471/*
472 * Get the geometry associated with a dm device
473 */
474int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
475{
476 *geo = md->geometry;
477
478 return 0;
479}
480
481/*
482 * Set the geometry of a device.
483 */
484int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
485{
486 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
487
488 if (geo->start > sz) {
489 DMWARN("Start sector is beyond the geometry limits.");
490 return -EINVAL;
491 }
492
493 md->geometry = *geo;
494
495 return 0;
496}
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498/*-----------------------------------------------------------------
499 * CRUD START:
500 * A more elegant soln is in the works that uses the queue
501 * merge fn, unfortunately there are a couple of changes to
502 * the block layer that I want to make for this. So in the
503 * interests of getting something for people to use I give
504 * you this clearly demarcated crap.
505 *---------------------------------------------------------------*/
506
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800507static int __noflush_suspending(struct mapped_device *md)
508{
509 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
510}
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512/*
513 * Decrements the number of outstanding ios that a bio has been
514 * cloned into, completing the original io if necc.
515 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800516static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800518 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000519 int io_error;
520 struct bio *bio;
521 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800522
523 /* Push-back supersedes any I/O errors */
Milan Brozb35f8ca2009-03-16 17:44:36 +0000524 if (error && !(io->error > 0 && __noflush_suspending(md)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 io->error = error;
526
527 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800528 if (io->error == DM_ENDIO_REQUEUE) {
529 /*
530 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800531 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100532 spin_lock_irqsave(&md->deferred_lock, flags);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000533 if (__noflush_suspending(md))
Mikulas Patocka022c2612009-04-02 19:55:39 +0100534 bio_list_add(&md->deferred, io->bio);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800535 else
536 /* noflush suspend was interrupted. */
537 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100538 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800539 }
540
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000541 end_io_acct(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Milan Brozb35f8ca2009-03-16 17:44:36 +0000543 io_error = io->error;
544 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100545
Milan Brozb35f8ca2009-03-16 17:44:36 +0000546 free_io(md, io);
547
548 if (io_error != DM_ENDIO_REQUEUE) {
549 trace_block_bio_complete(md->queue, bio);
550
551 bio_endio(bio, io_error);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 }
554}
555
NeilBrown6712ecf2007-09-27 12:47:43 +0200556static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
558 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100559 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000560 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700561 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 dm_endio_fn endio = tio->ti->type->end_io;
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
565 error = -EIO;
566
567 if (endio) {
568 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800569 if (r < 0 || r == DM_ENDIO_REQUEUE)
570 /*
571 * error and requeue request are handled
572 * in dec_pending().
573 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800575 else if (r == DM_ENDIO_INCOMPLETE)
576 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200577 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800578 else if (r) {
579 DMWARN("unimplemented target endio return value: %d", r);
580 BUG();
581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 }
583
Stefan Bader9faf4002006-10-03 01:15:41 -0700584 /*
585 * Store md for cleanup instead of tio which is about to get freed.
586 */
587 bio->bi_private = md->bs;
588
Stefan Bader9faf4002006-10-03 01:15:41 -0700589 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000590 bio_put(bio);
591 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
594static sector_t max_io_len(struct mapped_device *md,
595 sector_t sector, struct dm_target *ti)
596{
597 sector_t offset = sector - ti->begin;
598 sector_t len = ti->len - offset;
599
600 /*
601 * Does the target need to split even further ?
602 */
603 if (ti->split_io) {
604 sector_t boundary;
605 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
606 - offset;
607 if (len > boundary)
608 len = boundary;
609 }
610
611 return len;
612}
613
614static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100615 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616{
617 int r;
Jens Axboe2056a782006-03-23 20:00:26 +0100618 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -0700619 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 /*
622 * Sanity checks.
623 */
624 BUG_ON(!clone->bi_size);
625
626 clone->bi_end_io = clone_endio;
627 clone->bi_private = tio;
628
629 /*
630 * Map the clone. If r == 0 we don't need to do
631 * anything, the target has assumed ownership of
632 * this io.
633 */
634 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +0100635 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800637 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +0100639
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100640 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunellec7149d62007-08-07 15:30:23 +0200641 tio->io->bio->bi_bdev->bd_dev,
642 clone->bi_sector, sector);
Jens Axboe2056a782006-03-23 20:00:26 +0100643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800645 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
646 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -0700647 md = tio->io->md;
648 dec_pending(tio->io, r);
649 /*
650 * Store bio_set for cleanup.
651 */
652 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -0700654 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800655 } else if (r) {
656 DMWARN("unimplemented target map return value: %d", r);
657 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 }
659}
660
661struct clone_info {
662 struct mapped_device *md;
663 struct dm_table *map;
664 struct bio *bio;
665 struct dm_io *io;
666 sector_t sector;
667 sector_t sector_count;
668 unsigned short idx;
669};
670
Peter Osterlund36763472005-09-06 15:16:42 -0700671static void dm_bio_destructor(struct bio *bio)
672{
Stefan Bader9faf4002006-10-03 01:15:41 -0700673 struct bio_set *bs = bio->bi_private;
674
675 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700676}
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678/*
679 * Creates a little bio that is just does part of a bvec.
680 */
681static struct bio *split_bvec(struct bio *bio, sector_t sector,
682 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -0700683 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 struct bio *clone;
686 struct bio_vec *bv = bio->bi_io_vec + idx;
687
Stefan Bader9faf4002006-10-03 01:15:41 -0700688 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700689 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 *clone->bi_io_vec = *bv;
691
692 clone->bi_sector = sector;
693 clone->bi_bdev = bio->bi_bdev;
694 clone->bi_rw = bio->bi_rw;
695 clone->bi_vcnt = 1;
696 clone->bi_size = to_bytes(len);
697 clone->bi_io_vec->bv_offset = offset;
698 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +0100699 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Martin K. Petersen9c470082009-04-09 00:27:12 +0100701 if (bio_integrity(bio)) {
702 bio_integrity_clone(clone, bio, GFP_NOIO);
703 bio_integrity_trim(clone,
704 bio_sector_offset(bio, idx, offset), len);
705 }
706
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 return clone;
708}
709
710/*
711 * Creates a bio that consists of range of complete bvecs.
712 */
713static struct bio *clone_bio(struct bio *bio, sector_t sector,
714 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -0700715 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct bio *clone;
718
Stefan Bader9faf4002006-10-03 01:15:41 -0700719 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
720 __bio_clone(clone, bio);
721 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 clone->bi_sector = sector;
723 clone->bi_idx = idx;
724 clone->bi_vcnt = idx + bv_count;
725 clone->bi_size = to_bytes(len);
726 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
727
Martin K. Petersen9c470082009-04-09 00:27:12 +0100728 if (bio_integrity(bio)) {
729 bio_integrity_clone(clone, bio, GFP_NOIO);
730
731 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
732 bio_integrity_trim(clone,
733 bio_sector_offset(bio, idx, 0), len);
734 }
735
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 return clone;
737}
738
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000739static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
741 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000742 struct dm_target *ti;
743 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100744 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000746 ti = dm_table_find_target(ci->map, ci->sector);
747 if (!dm_target_is_valid(ti))
748 return -EIO;
749
750 max = max_io_len(ci->md, ci->sector, ti);
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 /*
753 * Allocate a target io object.
754 */
755 tio = alloc_tio(ci->md);
756 tio->io = ci->io;
757 tio->ti = ti;
758 memset(&tio->info, 0, sizeof(tio->info));
759
760 if (ci->sector_count <= max) {
761 /*
762 * Optimise for the simple case where we can do all of
763 * the remaining io with a single clone.
764 */
765 clone = clone_bio(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700766 bio->bi_vcnt - ci->idx, ci->sector_count,
767 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 __map_bio(ti, clone, tio);
769 ci->sector_count = 0;
770
771 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
772 /*
773 * There are some bvecs that don't span targets.
774 * Do as many of these as possible.
775 */
776 int i;
777 sector_t remaining = max;
778 sector_t bv_len;
779
780 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
781 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
782
783 if (bv_len > remaining)
784 break;
785
786 remaining -= bv_len;
787 len += bv_len;
788 }
789
Stefan Bader9faf4002006-10-03 01:15:41 -0700790 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
791 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 __map_bio(ti, clone, tio);
793
794 ci->sector += len;
795 ci->sector_count -= len;
796 ci->idx = i;
797
798 } else {
799 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800800 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 */
802 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800803 sector_t remaining = to_sector(bv->bv_len);
804 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800806 do {
807 if (offset) {
808 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000809 if (!dm_target_is_valid(ti))
810 return -EIO;
811
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800812 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800814 tio = alloc_tio(ci->md);
815 tio->io = ci->io;
816 tio->ti = ti;
817 memset(&tio->info, 0, sizeof(tio->info));
818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800820 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800822 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700823 bv->bv_offset + offset, len,
824 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800825
826 __map_bio(ti, clone, tio);
827
828 ci->sector += len;
829 ci->sector_count -= len;
830 offset += to_bytes(len);
831 } while (remaining -= len);
832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 ci->idx++;
834 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000835
836 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837}
838
839/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +0100840 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100842static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
844 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000845 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 ci.map = dm_get_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100848 if (unlikely(!ci.map)) {
849 bio_io_error(bio);
850 return;
851 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +0100852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 ci.md = md;
854 ci.bio = bio;
855 ci.io = alloc_io(md);
856 ci.io->error = 0;
857 atomic_set(&ci.io->io_count, 1);
858 ci.io->bio = bio;
859 ci.io->md = md;
860 ci.sector = bio->bi_sector;
861 ci.sector_count = bio_sectors(bio);
862 ci.idx = bio->bi_idx;
863
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800864 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000865 while (ci.sector_count && !error)
866 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000869 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 dm_table_put(ci.map);
871}
872/*-----------------------------------------------------------------
873 * CRUD END
874 *---------------------------------------------------------------*/
875
Milan Brozf6fccb12008-07-21 12:00:37 +0100876static int dm_merge_bvec(struct request_queue *q,
877 struct bvec_merge_data *bvm,
878 struct bio_vec *biovec)
879{
880 struct mapped_device *md = q->queuedata;
881 struct dm_table *map = dm_get_table(md);
882 struct dm_target *ti;
883 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +0100884 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +0100885
886 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +0100887 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +0100888
889 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100890 if (!dm_target_is_valid(ti))
891 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +0100892
893 /*
894 * Find maximum amount of I/O that won't need splitting
895 */
896 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
897 (sector_t) BIO_MAX_SECTORS);
898 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
899 if (max_size < 0)
900 max_size = 0;
901
902 /*
903 * merge_bvec_fn() returns number of bytes
904 * it can accept at this offset
905 * max is precomputed maximal io size
906 */
907 if (max_size && ti->type->merge)
908 max_size = ti->type->merge(ti, bvm, biovec, max_size);
909
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100910out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +0100911 dm_table_put(map);
912
913out:
Milan Brozf6fccb12008-07-21 12:00:37 +0100914 /*
915 * Always allow an entire first page
916 */
917 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
918 max_size = biovec->bv_len;
919
Milan Brozf6fccb12008-07-21 12:00:37 +0100920 return max_size;
921}
922
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923/*
924 * The request function that just remaps the bio built up by
925 * dm_merge_bvec.
926 */
Jens Axboe165125e2007-07-24 09:28:11 +0200927static int dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928{
Kevin Corry12f03a42006-02-01 03:04:52 -0800929 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +0900931 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Mikulas Patocka692d0eb2009-04-09 00:27:13 +0100933 /*
934 * There is no use in forwarding any barrier request since we can't
935 * guarantee it is (or can be) handled by the targets correctly.
936 */
937 if (unlikely(bio_barrier(bio))) {
938 bio_endio(bio, -EOPNOTSUPP);
939 return 0;
940 }
941
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700942 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Tejun Heo074a7ac2008-08-25 19:56:14 +0900944 cpu = part_stat_lock();
945 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
946 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
947 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -0800948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100950 * If we're suspended or the thread is processing barriers
951 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100953 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700954 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +0100956 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
957 bio_rw(bio) == READA) {
958 bio_io_error(bio);
959 return 0;
960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
Mikulas Patocka92c63902009-04-09 00:27:15 +0100962 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
Mikulas Patocka92c63902009-04-09 00:27:15 +0100964 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 }
966
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100967 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700968 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100969 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970}
971
Jens Axboe165125e2007-07-24 09:28:11 +0200972static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973{
974 struct mapped_device *md = q->queuedata;
975 struct dm_table *map = dm_get_table(md);
976
977 if (map) {
978 dm_table_unplug_all(map);
979 dm_table_put(map);
980 }
981}
982
983static int dm_any_congested(void *congested_data, int bdi_bits)
984{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +0000985 int r = bdi_bits;
986 struct mapped_device *md = congested_data;
987 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100989 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +0000990 map = dm_get_table(md);
991 if (map) {
992 r = dm_table_any_congested(map, bdi_bits);
993 dm_table_put(map);
994 }
995 }
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return r;
998}
999
1000/*-----------------------------------------------------------------
1001 * An IDR is used to keep track of allocated minor numbers.
1002 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003static DEFINE_IDR(_minor_idr);
1004
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001005static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001007 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001009 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010}
1011
1012/*
1013 * See if the device with a specific minor # is free.
1014 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001015static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
1017 int r, m;
1018
1019 if (minor >= (1 << MINORBITS))
1020 return -EINVAL;
1021
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001022 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1023 if (!r)
1024 return -ENOMEM;
1025
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001026 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 if (idr_find(&_minor_idr, minor)) {
1029 r = -EBUSY;
1030 goto out;
1031 }
1032
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001033 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001034 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 if (m != minor) {
1038 idr_remove(&_minor_idr, m);
1039 r = -EBUSY;
1040 goto out;
1041 }
1042
1043out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001044 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 return r;
1046}
1047
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001048static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001050 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001053 if (!r)
1054 return -ENOMEM;
1055
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001056 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001058 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001059 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
1062 if (m >= (1 << MINORBITS)) {
1063 idr_remove(&_minor_idr, m);
1064 r = -ENOSPC;
1065 goto out;
1066 }
1067
1068 *minor = m;
1069
1070out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001071 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 return r;
1073}
1074
1075static struct block_device_operations dm_blk_dops;
1076
Mikulas Patocka53d59142009-04-02 19:55:37 +01001077static void dm_wq_work(struct work_struct *work);
1078
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079/*
1080 * Allocate and initialise a blank device with a given minor.
1081 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001082static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
1084 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001085 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001086 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 if (!md) {
1089 DMWARN("unable to allocate device, out of memory.");
1090 return NULL;
1091 }
1092
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001093 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001094 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001097 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001098 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001099 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001100 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001102 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001104 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001105 mutex_init(&md->suspend_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001106 spin_lock_init(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 rwlock_init(&md->map_lock);
1108 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001109 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001111 atomic_set(&md->uevent_seq, 0);
1112 INIT_LIST_HEAD(&md->uevent_list);
1113 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
1115 md->queue = blk_alloc_queue(GFP_KERNEL);
1116 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001117 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
1119 md->queue->queuedata = md;
1120 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1121 md->queue->backing_dev_info.congested_data = md;
1122 blk_queue_make_request(md->queue, dm_request);
Mikulas Patocka99360b42009-04-02 19:55:39 +01001123 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
Jens Axboedaef2652006-01-10 10:48:02 +01001124 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 md->queue->unplug_fn = dm_unplug_all;
Milan Brozf6fccb12008-07-21 12:00:37 +01001126 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Matthew Dobson93d23412006-03-26 01:37:50 -08001128 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
Kiyoshi Ueda74859362006-12-08 02:41:02 -08001129 if (!md->io_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001130 goto bad_io_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Matthew Dobson93d23412006-03-26 01:37:50 -08001132 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 if (!md->tio_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001134 goto bad_tio_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Jens Axboebb799ca2008-12-10 15:35:05 +01001136 md->bs = bioset_create(16, 0);
Stefan Bader9faf4002006-10-03 01:15:41 -07001137 if (!md->bs)
1138 goto bad_no_bioset;
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 md->disk = alloc_disk(1);
1141 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001142 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001144 atomic_set(&md->pending, 0);
1145 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001146 INIT_WORK(&md->work, dm_wq_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001147 init_waitqueue_head(&md->eventq);
1148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 md->disk->major = _major;
1150 md->disk->first_minor = minor;
1151 md->disk->fops = &dm_blk_dops;
1152 md->disk->queue = md->queue;
1153 md->disk->private_data = md;
1154 sprintf(md->disk->disk_name, "dm-%d", minor);
1155 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001156 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
Milan Broz304f3f62008-02-08 02:11:17 +00001158 md->wq = create_singlethread_workqueue("kdmflush");
1159 if (!md->wq)
1160 goto bad_thread;
1161
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001162 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001163 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001164 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001165 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001166
1167 BUG_ON(old_md != MINOR_ALLOCED);
1168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return md;
1170
Milan Broz304f3f62008-02-08 02:11:17 +00001171bad_thread:
1172 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001173bad_disk:
Stefan Bader9faf4002006-10-03 01:15:41 -07001174 bioset_free(md->bs);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001175bad_no_bioset:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 mempool_destroy(md->tio_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001177bad_tio_pool:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 mempool_destroy(md->io_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001179bad_io_pool:
Al Viro1312f402006-03-12 11:02:03 -05001180 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001181bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001183bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001184 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001185bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 kfree(md);
1187 return NULL;
1188}
1189
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001190static void unlock_fs(struct mapped_device *md);
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192static void free_dev(struct mapped_device *md)
1193{
Tejun Heof331c022008-09-03 09:01:48 +02001194 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001195
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001196 if (md->suspended_bdev) {
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001197 unlock_fs(md);
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001198 bdput(md->suspended_bdev);
1199 }
Milan Broz304f3f62008-02-08 02:11:17 +00001200 destroy_workqueue(md->wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 mempool_destroy(md->tio_pool);
1202 mempool_destroy(md->io_pool);
Stefan Bader9faf4002006-10-03 01:15:41 -07001203 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001204 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001206 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001207
1208 spin_lock(&_minor_lock);
1209 md->disk->private_data = NULL;
1210 spin_unlock(&_minor_lock);
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001213 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001214 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 kfree(md);
1216}
1217
1218/*
1219 * Bind a table to the device.
1220 */
1221static void event_callback(void *context)
1222{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001223 unsigned long flags;
1224 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 struct mapped_device *md = (struct mapped_device *) context;
1226
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001227 spin_lock_irqsave(&md->uevent_lock, flags);
1228 list_splice_init(&md->uevent_list, &uevents);
1229 spin_unlock_irqrestore(&md->uevent_lock, flags);
1230
Tejun Heoed9e1982008-08-25 19:56:05 +09001231 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 atomic_inc(&md->event_nr);
1234 wake_up(&md->eventq);
1235}
1236
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001237static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238{
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001239 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001241 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001242 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001243 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244}
1245
1246static int __bind(struct mapped_device *md, struct dm_table *t)
1247{
Jens Axboe165125e2007-07-24 09:28:11 +02001248 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 sector_t size;
1250
1251 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001252
1253 /*
1254 * Wipe any geometry if the size of the table changed.
1255 */
1256 if (size != get_capacity(md->disk))
1257 memset(&md->geometry, 0, sizeof(md->geometry));
1258
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001259 if (md->suspended_bdev)
1260 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Mikulas Patockad5816872009-01-06 03:05:10 +00001262 if (!size) {
1263 dm_table_destroy(t);
1264 return 0;
1265 }
1266
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001267 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001268
1269 write_lock(&md->map_lock);
1270 md->map = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 dm_table_set_restrictions(t, q);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001272 write_unlock(&md->map_lock);
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 return 0;
1275}
1276
1277static void __unbind(struct mapped_device *md)
1278{
1279 struct dm_table *map = md->map;
1280
1281 if (!map)
1282 return;
1283
1284 dm_table_event_callback(map, NULL, NULL);
1285 write_lock(&md->map_lock);
1286 md->map = NULL;
1287 write_unlock(&md->map_lock);
Mikulas Patockad5816872009-01-06 03:05:10 +00001288 dm_table_destroy(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289}
1290
1291/*
1292 * Constructor for a new device.
1293 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001294int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295{
1296 struct mapped_device *md;
1297
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001298 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 if (!md)
1300 return -ENXIO;
1301
Milan Broz784aae72009-01-06 03:05:12 +00001302 dm_sysfs_init(md);
1303
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 *result = md;
1305 return 0;
1306}
1307
David Teigland637842c2006-01-06 00:20:00 -08001308static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
1310 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 unsigned minor = MINOR(dev);
1312
1313 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1314 return NULL;
1315
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001316 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001319 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02001320 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07001321 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08001322 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001323 goto out;
1324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001326out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001327 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
David Teigland637842c2006-01-06 00:20:00 -08001329 return md;
1330}
1331
David Teiglandd229a952006-01-06 00:20:01 -08001332struct mapped_device *dm_get_md(dev_t dev)
1333{
1334 struct mapped_device *md = dm_find_md(dev);
1335
1336 if (md)
1337 dm_get(md);
1338
1339 return md;
1340}
1341
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001342void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08001343{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001344 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345}
1346
1347void dm_set_mdptr(struct mapped_device *md, void *ptr)
1348{
1349 md->interface_ptr = ptr;
1350}
1351
1352void dm_get(struct mapped_device *md)
1353{
1354 atomic_inc(&md->holders);
1355}
1356
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001357const char *dm_device_name(struct mapped_device *md)
1358{
1359 return md->name;
1360}
1361EXPORT_SYMBOL_GPL(dm_device_name);
1362
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363void dm_put(struct mapped_device *md)
1364{
Mike Anderson1134e5a2006-03-27 01:17:54 -08001365 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001367 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1368
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001369 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
Mike Anderson1134e5a2006-03-27 01:17:54 -08001370 map = dm_get_table(md);
Tejun Heof331c022008-09-03 09:01:48 +02001371 idr_replace(&_minor_idr, MINOR_ALLOCED,
1372 MINOR(disk_devt(dm_disk(md))));
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001373 set_bit(DMF_FREEING, &md->flags);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001374 spin_unlock(&_minor_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001375 if (!dm_suspended(md)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 dm_table_presuspend_targets(map);
1377 dm_table_postsuspend_targets(map);
1378 }
Milan Broz784aae72009-01-06 03:05:12 +00001379 dm_sysfs_exit(md);
Mike Anderson1134e5a2006-03-27 01:17:54 -08001380 dm_table_put(map);
Mikulas Patockaa1b51e92009-01-06 03:04:53 +00001381 __unbind(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 free_dev(md);
1383 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384}
Edward Goggin79eb8852007-05-09 02:32:56 -07001385EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Mikulas Patocka401600d2009-04-02 19:55:38 +01001387static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00001388{
1389 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001390 DECLARE_WAITQUEUE(wait, current);
1391
1392 dm_unplug_all(md->queue);
1393
1394 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00001395
1396 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01001397 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00001398
1399 smp_mb();
1400 if (!atomic_read(&md->pending))
1401 break;
1402
Mikulas Patocka401600d2009-04-02 19:55:38 +01001403 if (interruptible == TASK_INTERRUPTIBLE &&
1404 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00001405 r = -EINTR;
1406 break;
1407 }
1408
1409 io_schedule();
1410 }
1411 set_current_state(TASK_RUNNING);
1412
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001413 remove_wait_queue(&md->wait, &wait);
1414
Milan Broz46125c12008-02-08 02:10:30 +00001415 return r;
1416}
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418/*
1419 * Process the deferred bios
1420 */
Mikulas Patockaef208582009-04-02 19:55:38 +01001421static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422{
Mikulas Patockaef208582009-04-02 19:55:38 +01001423 struct mapped_device *md = container_of(work, struct mapped_device,
1424 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001425 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Mikulas Patockaef208582009-04-02 19:55:38 +01001427 down_write(&md->io_lock);
1428
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001429 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001430 spin_lock_irq(&md->deferred_lock);
1431 c = bio_list_pop(&md->deferred);
1432 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001433
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001434 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001435 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001436 break;
1437 }
1438
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001439 up_write(&md->io_lock);
1440
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001441 __split_and_process_bio(md, c);
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001442
1443 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001444 }
Milan Broz73d410c2008-02-08 02:10:25 +00001445
Mikulas Patockaef208582009-04-02 19:55:38 +01001446 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447}
1448
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001449static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00001450{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001451 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1452 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01001453 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00001454}
1455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456/*
1457 * Swap in a new table (destroying old one).
1458 */
1459int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1460{
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001461 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Daniel Walkere61290a2008-02-08 02:10:08 +00001463 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
1465 /* device must be suspended */
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001466 if (!dm_suspended(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001467 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001469 /* without bdev, the device size cannot be changed */
1470 if (!md->suspended_bdev)
1471 if (get_capacity(md->disk) != dm_table_get_size(table))
1472 goto out;
1473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 __unbind(md);
1475 r = __bind(md, table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001477out:
Daniel Walkere61290a2008-02-08 02:10:08 +00001478 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001479 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480}
1481
1482/*
1483 * Functions to lock and unlock any filesystem running on the
1484 * device.
1485 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001486static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001488 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489
1490 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001491
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001492 md->frozen_sb = freeze_bdev(md->suspended_bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001493 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001494 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001495 md->frozen_sb = NULL;
1496 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001497 }
1498
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001499 set_bit(DMF_FROZEN, &md->flags);
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 /* don't bdput right now, we don't want the bdev
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001502 * to go away while it is locked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 */
1504 return 0;
1505}
1506
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001507static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001509 if (!test_bit(DMF_FROZEN, &md->flags))
1510 return;
1511
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001512 thaw_bdev(md->suspended_bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001514 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
1516
1517/*
1518 * We need to be able to change a mapping table under a mounted
1519 * filesystem. For example we might want to move some data in
1520 * the background. Before the table can be swapped with
1521 * dm_bind_table, dm_suspend must be called to flush any in
1522 * flight bios and ensure that any further io gets deferred.
1523 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001524int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001526 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00001527 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001528 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001529 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
Daniel Walkere61290a2008-02-08 02:10:08 +00001531 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001532
Milan Broz73d410c2008-02-08 02:10:25 +00001533 if (dm_suspended(md)) {
1534 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08001535 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00001536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538 map = dm_get_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001540 /*
1541 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1542 * This flag is cleared before dm_suspend returns.
1543 */
1544 if (noflush)
1545 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1546
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001547 /* This does not get reverted if there's an error later. */
1548 dm_table_presuspend_targets(map);
1549
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001550 /* bdget() can stall if the pending I/Os are not flushed */
1551 if (!noflush) {
1552 md->suspended_bdev = bdget_disk(md->disk, 0);
1553 if (!md->suspended_bdev) {
1554 DMWARN("bdget failed in dm_suspend");
1555 r = -ENOMEM;
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01001556 goto out;
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001557 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001558
Milan Broz6d6f10d2008-02-08 02:10:22 +00001559 /*
1560 * Flush I/O to the device. noflush supersedes do_lockfs,
1561 * because lock_fs() needs to flush I/Os.
1562 */
1563 if (do_lockfs) {
1564 r = lock_fs(md);
1565 if (r)
1566 goto out;
1567 }
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001571 * Here we must make sure that no processes are submitting requests
1572 * to target drivers i.e. no one may be executing
1573 * __split_and_process_bio. This is called from dm_request and
1574 * dm_wq_work.
1575 *
1576 * To get all processes out of __split_and_process_bio in dm_request,
1577 * we take the write lock. To prevent any process from reentering
1578 * __split_and_process_bio from dm_request, we set
1579 * DMF_QUEUE_IO_TO_THREAD.
1580 *
1581 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1582 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1583 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1584 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001586 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001587 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1588 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001589 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001591 flush_workqueue(md->wq);
1592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001594 * At this point no more requests are entering target request routines.
1595 * We call dm_wait_for_completion to wait for all existing requests
1596 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01001598 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001600 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001601 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01001602 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00001603 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00001606 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001607 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00001608
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001609 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001610 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001611 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001612
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001613 /*
1614 * If dm_wait_for_completion returned 0, the device is completely
1615 * quiescent now. There is no request-processing activity. All new
1616 * requests are being added to md->deferred list.
1617 */
1618
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001619 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621 set_bit(DMF_SUSPENDED, &md->flags);
1622
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001623out:
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001624 if (r && md->suspended_bdev) {
1625 bdput(md->suspended_bdev);
1626 md->suspended_bdev = NULL;
1627 }
1628
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08001630
1631out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00001632 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001633 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634}
1635
1636int dm_resume(struct mapped_device *md)
1637{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001638 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001639 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
Daniel Walkere61290a2008-02-08 02:10:08 +00001641 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001642 if (!dm_suspended(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001643 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001644
1645 map = dm_get_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001646 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001647 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Milan Broz8757b772006-10-03 01:15:36 -07001649 r = dm_table_resume_targets(map);
1650 if (r)
1651 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001652
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001653 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001654
1655 unlock_fs(md);
1656
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001657 if (md->suspended_bdev) {
1658 bdput(md->suspended_bdev);
1659 md->suspended_bdev = NULL;
1660 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001661
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001662 clear_bit(DMF_SUSPENDED, &md->flags);
1663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 dm_table_unplug_all(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001666 dm_kobject_uevent(md);
Hannes Reinecke8560ed62006-10-03 01:15:35 -07001667
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001668 r = 0;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001669
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001670out:
1671 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00001672 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001673
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001674 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675}
1676
1677/*-----------------------------------------------------------------
1678 * Event notification.
1679 *---------------------------------------------------------------*/
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001680void dm_kobject_uevent(struct mapped_device *md)
1681{
Tejun Heoed9e1982008-08-25 19:56:05 +09001682 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001683}
1684
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001685uint32_t dm_next_uevent_seq(struct mapped_device *md)
1686{
1687 return atomic_add_return(1, &md->uevent_seq);
1688}
1689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690uint32_t dm_get_event_nr(struct mapped_device *md)
1691{
1692 return atomic_read(&md->event_nr);
1693}
1694
1695int dm_wait_event(struct mapped_device *md, int event_nr)
1696{
1697 return wait_event_interruptible(md->eventq,
1698 (event_nr != atomic_read(&md->event_nr)));
1699}
1700
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001701void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1702{
1703 unsigned long flags;
1704
1705 spin_lock_irqsave(&md->uevent_lock, flags);
1706 list_add(elist, &md->uevent_list);
1707 spin_unlock_irqrestore(&md->uevent_lock, flags);
1708}
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710/*
1711 * The gendisk is only valid as long as you have a reference
1712 * count on 'md'.
1713 */
1714struct gendisk *dm_disk(struct mapped_device *md)
1715{
1716 return md->disk;
1717}
1718
Milan Broz784aae72009-01-06 03:05:12 +00001719struct kobject *dm_kobject(struct mapped_device *md)
1720{
1721 return &md->kobj;
1722}
1723
1724/*
1725 * struct mapped_device should not be exported outside of dm.c
1726 * so use this check to verify that kobj is part of md structure
1727 */
1728struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1729{
1730 struct mapped_device *md;
1731
1732 md = container_of(kobj, struct mapped_device, kobj);
1733 if (&md->kobj != kobj)
1734 return NULL;
1735
1736 dm_get(md);
1737 return md;
1738}
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740int dm_suspended(struct mapped_device *md)
1741{
1742 return test_bit(DMF_SUSPENDED, &md->flags);
1743}
1744
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001745int dm_noflush_suspending(struct dm_target *ti)
1746{
1747 struct mapped_device *md = dm_table_get_md(ti->table);
1748 int r = __noflush_suspending(md);
1749
1750 dm_put(md);
1751
1752 return r;
1753}
1754EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756static struct block_device_operations dm_blk_dops = {
1757 .open = dm_blk_open,
1758 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07001759 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001760 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 .owner = THIS_MODULE
1762};
1763
1764EXPORT_SYMBOL(dm_get_mapinfo);
1765
1766/*
1767 * module hooks
1768 */
1769module_init(dm_init);
1770module_exit(dm_exit);
1771
1772module_param(major, uint, 0);
1773MODULE_PARM_DESC(major, "The major number of the device mapper");
1774MODULE_DESCRIPTION(DM_NAME " driver");
1775MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1776MODULE_LICENSE("GPL");