blob: f934e9878436300252483632b31693bb6e497ea8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +01009#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <linux/init.h>
12#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080013#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
Arnd Bergmann6e9624b2010-08-07 18:25:34 +020018#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080022#include <linux/hdreg.h>
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +010023#include <linux/delay.h>
Li Zefan55782132009-06-09 13:43:05 +080024
25#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027#define DM_MSG_PREFIX "core"
28
Milan Broz60935eb2009-06-22 10:12:30 +010029/*
30 * Cookies are numeric values sent with CHANGE and REMOVE
31 * uevents while resuming, removing or renaming the device.
32 */
33#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
34#define DM_COOKIE_LENGTH 24
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036static const char *_name = DM_NAME;
37
38static unsigned int major = 0;
39static unsigned int _major = 0;
40
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070041static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000043 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * One of these is allocated per bio.
45 */
46struct dm_io {
47 struct mapped_device *md;
48 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010050 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080051 unsigned long start_time;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +010052 spinlock_t endio_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053};
54
55/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000056 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 * One of these is allocated per target within a bio. Hopefully
58 * this will be simplified out one day.
59 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010060struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 struct dm_io *io;
62 struct dm_target *ti;
63 union map_info info;
64};
65
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000066/*
67 * For request-based dm.
68 * One of these is allocated per request.
69 */
70struct dm_rq_target_io {
71 struct mapped_device *md;
72 struct dm_target *ti;
73 struct request *orig, clone;
74 int error;
75 union map_info info;
76};
77
78/*
79 * For request-based dm.
80 * One of these is allocated per bio.
81 */
82struct dm_rq_clone_bio_info {
83 struct bio *orig;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010084 struct dm_rq_target_io *tio;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000085};
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087union map_info *dm_get_mapinfo(struct bio *bio)
88{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070089 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010090 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070091 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010094union map_info *dm_get_rq_mapinfo(struct request *rq)
95{
96 if (rq && rq->end_io_data)
97 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
98 return NULL;
99}
100EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
101
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -0700102#define MINOR_ALLOCED ((void *)-1)
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/*
105 * Bits for the md->flags field.
106 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100107#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -0800109#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700110#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700111#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800112#define DMF_NOFLUSH_SUSPENDING 5
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Milan Broz304f3f62008-02-08 02:11:17 +0000114/*
115 * Work processed by per-device workqueue.
116 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700118 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000119 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 rwlock_t map_lock;
121 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700122 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124 unsigned long flags;
125
Jens Axboe165125e2007-07-24 09:28:11 +0200126 struct request_queue *queue;
Mike Snitzera5664da2010-08-12 04:14:01 +0100127 unsigned type;
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +0100128 /* Protect queue and type against concurrent access. */
Mike Snitzera5664da2010-08-12 04:14:01 +0100129 struct mutex type_lock;
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800132 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 void *interface_ptr;
135
136 /*
137 * A list of ios that arrived while we were suspended.
138 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200139 atomic_t pending[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100141 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800142 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100143 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 /*
Tejun Heo29e40132010-09-08 18:07:00 +0200146 * Processing queue (flush)
Milan Broz304f3f62008-02-08 02:11:17 +0000147 */
148 struct workqueue_struct *wq;
149
150 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * The current mapping.
152 */
153 struct dm_table *map;
154
155 /*
156 * io objects are allocated from here.
157 */
158 mempool_t *io_pool;
159 mempool_t *tio_pool;
160
Stefan Bader9faf4002006-10-03 01:15:41 -0700161 struct bio_set *bs;
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 /*
164 * Event handling.
165 */
166 atomic_t event_nr;
167 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100168 atomic_t uevent_seq;
169 struct list_head uevent_list;
170 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172 /*
173 * freeze/thaw support require holding onto a super block
174 */
175 struct super_block *frozen_sb;
Mikulas Patockadb8fef42009-06-22 10:12:15 +0100176 struct block_device *bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800177
178 /* forced geometry settings */
179 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000180
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100181 /* For saving the address of __make_request for request based dm */
182 make_request_fn *saved_make_request_fn;
183
Milan Broz784aae72009-01-06 03:05:12 +0000184 /* sysfs handle */
185 struct kobject kobj;
Mikulas Patocka52b1fd52009-06-22 10:12:21 +0100186
Tejun Heod87f4c12010-09-03 11:56:19 +0200187 /* zero-length flush that will be cloned and submitted to targets */
188 struct bio flush_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189};
190
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100191/*
192 * For mempools pre-allocation at the table loading time.
193 */
194struct dm_md_mempools {
195 mempool_t *io_pool;
196 mempool_t *tio_pool;
197 struct bio_set *bs;
198};
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800201static struct kmem_cache *_io_cache;
202static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000203static struct kmem_cache *_rq_tio_cache;
204static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206static int __init local_init(void)
207{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100208 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100211 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100213 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100216 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100217 if (!_tio_cache)
218 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000220 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
221 if (!_rq_tio_cache)
222 goto out_free_tio_cache;
223
224 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
225 if (!_rq_bio_info_cache)
226 goto out_free_rq_tio_cache;
227
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100228 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100229 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000230 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 _major = major;
233 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100234 if (r < 0)
235 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237 if (!_major)
238 _major = r;
239
240 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100241
242out_uevent_exit:
243 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000244out_free_rq_bio_info_cache:
245 kmem_cache_destroy(_rq_bio_info_cache);
246out_free_rq_tio_cache:
247 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100248out_free_tio_cache:
249 kmem_cache_destroy(_tio_cache);
250out_free_io_cache:
251 kmem_cache_destroy(_io_cache);
252
253 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256static void local_exit(void)
257{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000258 kmem_cache_destroy(_rq_bio_info_cache);
259 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 kmem_cache_destroy(_tio_cache);
261 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700262 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100263 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265 _major = 0;
266
267 DMINFO("cleaned up");
268}
269
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000270static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 local_init,
272 dm_target_init,
273 dm_linear_init,
274 dm_stripe_init,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000275 dm_io_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100276 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 dm_interface_init,
278};
279
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000280static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 local_exit,
282 dm_target_exit,
283 dm_linear_exit,
284 dm_stripe_exit,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000285 dm_io_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100286 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 dm_interface_exit,
288};
289
290static int __init dm_init(void)
291{
292 const int count = ARRAY_SIZE(_inits);
293
294 int r, i;
295
296 for (i = 0; i < count; i++) {
297 r = _inits[i]();
298 if (r)
299 goto bad;
300 }
301
302 return 0;
303
304 bad:
305 while (i--)
306 _exits[i]();
307
308 return r;
309}
310
311static void __exit dm_exit(void)
312{
313 int i = ARRAY_SIZE(_exits);
314
315 while (i--)
316 _exits[i]();
317}
318
319/*
320 * Block device functions
321 */
Mike Anderson432a2122009-12-10 23:52:20 +0000322int dm_deleting_md(struct mapped_device *md)
323{
324 return test_bit(DMF_DELETING, &md->flags);
325}
326
Al Virofe5f9f22008-03-02 10:29:31 -0500327static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 struct mapped_device *md;
330
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200331 lock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700332 spin_lock(&_minor_lock);
333
Al Virofe5f9f22008-03-02 10:29:31 -0500334 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700335 if (!md)
336 goto out;
337
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700338 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +0000339 dm_deleting_md(md)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700340 md = NULL;
341 goto out;
342 }
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700345 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700346
347out:
348 spin_unlock(&_minor_lock);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200349 unlock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700350
351 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Al Virofe5f9f22008-03-02 10:29:31 -0500354static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Al Virofe5f9f22008-03-02 10:29:31 -0500356 struct mapped_device *md = disk->private_data;
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200357
358 lock_kernel();
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700359 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 dm_put(md);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200361 unlock_kernel();
362
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 return 0;
364}
365
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700366int dm_open_count(struct mapped_device *md)
367{
368 return atomic_read(&md->open_count);
369}
370
371/*
372 * Guarantees nothing is using the device before it's deleted.
373 */
374int dm_lock_for_deletion(struct mapped_device *md)
375{
376 int r = 0;
377
378 spin_lock(&_minor_lock);
379
380 if (dm_open_count(md))
381 r = -EBUSY;
382 else
383 set_bit(DMF_DELETING, &md->flags);
384
385 spin_unlock(&_minor_lock);
386
387 return r;
388}
389
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800390static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
391{
392 struct mapped_device *md = bdev->bd_disk->private_data;
393
394 return dm_get_geometry(md, geo);
395}
396
Al Virofe5f9f22008-03-02 10:29:31 -0500397static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700398 unsigned int cmd, unsigned long arg)
399{
Al Virofe5f9f22008-03-02 10:29:31 -0500400 struct mapped_device *md = bdev->bd_disk->private_data;
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000401 struct dm_table *map = dm_get_live_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700402 struct dm_target *tgt;
403 int r = -ENOTTY;
404
Milan Brozaa129a22006-10-03 01:15:15 -0700405 if (!map || !dm_table_get_size(map))
406 goto out;
407
408 /* We only support devices that have a single target */
409 if (dm_table_get_num_targets(map) != 1)
410 goto out;
411
412 tgt = dm_table_get_target(map, 0);
413
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +0000414 if (dm_suspended_md(md)) {
Milan Brozaa129a22006-10-03 01:15:15 -0700415 r = -EAGAIN;
416 goto out;
417 }
418
419 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400420 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700421
422out:
423 dm_table_put(map);
424
Milan Brozaa129a22006-10-03 01:15:15 -0700425 return r;
426}
427
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100428static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
430 return mempool_alloc(md->io_pool, GFP_NOIO);
431}
432
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100433static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 mempool_free(io, md->io_pool);
436}
437
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100438static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
440 mempool_free(tio, md->tio_pool);
441}
442
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000443static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
444 gfp_t gfp_mask)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100445{
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000446 return mempool_alloc(md->tio_pool, gfp_mask);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100447}
448
449static void free_rq_tio(struct dm_rq_target_io *tio)
450{
451 mempool_free(tio, tio->md->tio_pool);
452}
453
454static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
455{
456 return mempool_alloc(md->io_pool, GFP_ATOMIC);
457}
458
459static void free_bio_info(struct dm_rq_clone_bio_info *info)
460{
461 mempool_free(info, info->tio->md->io_pool);
462}
463
Kiyoshi Ueda90abb8c2009-12-10 23:52:13 +0000464static int md_in_flight(struct mapped_device *md)
465{
466 return atomic_read(&md->pending[READ]) +
467 atomic_read(&md->pending[WRITE]);
468}
469
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800470static void start_io_acct(struct dm_io *io)
471{
472 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900473 int cpu;
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200474 int rw = bio_data_dir(io->bio);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800475
476 io->start_time = jiffies;
477
Tejun Heo074a7ac2008-08-25 19:56:14 +0900478 cpu = part_stat_lock();
479 part_round_stats(cpu, &dm_disk(md)->part0);
480 part_stat_unlock();
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200481 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800482}
483
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000484static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800485{
486 struct mapped_device *md = io->md;
487 struct bio *bio = io->bio;
488 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900489 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800490 int rw = bio_data_dir(bio);
491
Tejun Heo074a7ac2008-08-25 19:56:14 +0900492 cpu = part_stat_lock();
493 part_round_stats(cpu, &dm_disk(md)->part0);
494 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
495 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800496
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100497 /*
498 * After this is decremented the bio must not be touched if it is
Tejun Heod87f4c12010-09-03 11:56:19 +0200499 * a flush.
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100500 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200501 dm_disk(md)->part0.in_flight[rw] = pending =
502 atomic_dec_return(&md->pending[rw]);
503 pending += atomic_read(&md->pending[rw^0x1]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800504
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000505 /* nudge anyone waiting on suspend queue */
506 if (!pending)
507 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800508}
509
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510/*
511 * Add the bio to the list of deferred io.
512 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100513static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514{
Kiyoshi Ueda05447422010-09-08 18:07:01 +0200515 unsigned long flags;
516
517 spin_lock_irqsave(&md->deferred_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 bio_list_add(&md->deferred, bio);
Kiyoshi Ueda05447422010-09-08 18:07:01 +0200519 spin_unlock_irqrestore(&md->deferred_lock, flags);
Tejun Heo6a8736d2010-09-08 18:07:00 +0200520 queue_work(md->wq, &md->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521}
522
523/*
524 * Everyone (including functions in this file), should use this
525 * function to access the md->map field, and make sure they call
526 * dm_table_put() when finished.
527 */
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000528struct dm_table *dm_get_live_table(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
530 struct dm_table *t;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100531 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100533 read_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 t = md->map;
535 if (t)
536 dm_table_get(t);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100537 read_unlock_irqrestore(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539 return t;
540}
541
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800542/*
543 * Get the geometry associated with a dm device
544 */
545int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
546{
547 *geo = md->geometry;
548
549 return 0;
550}
551
552/*
553 * Set the geometry of a device.
554 */
555int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
556{
557 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
558
559 if (geo->start > sz) {
560 DMWARN("Start sector is beyond the geometry limits.");
561 return -EINVAL;
562 }
563
564 md->geometry = *geo;
565
566 return 0;
567}
568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569/*-----------------------------------------------------------------
570 * CRUD START:
571 * A more elegant soln is in the works that uses the queue
572 * merge fn, unfortunately there are a couple of changes to
573 * the block layer that I want to make for this. So in the
574 * interests of getting something for people to use I give
575 * you this clearly demarcated crap.
576 *---------------------------------------------------------------*/
577
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800578static int __noflush_suspending(struct mapped_device *md)
579{
580 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583/*
584 * Decrements the number of outstanding ios that a bio has been
585 * cloned into, completing the original io if necc.
586 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800587static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800589 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000590 int io_error;
591 struct bio *bio;
592 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800593
594 /* Push-back supersedes any I/O errors */
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100595 if (unlikely(error)) {
596 spin_lock_irqsave(&io->endio_lock, flags);
597 if (!(io->error > 0 && __noflush_suspending(md)))
598 io->error = error;
599 spin_unlock_irqrestore(&io->endio_lock, flags);
600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800603 if (io->error == DM_ENDIO_REQUEUE) {
604 /*
605 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800606 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100607 spin_lock_irqsave(&md->deferred_lock, flags);
Tejun Heo6a8736d2010-09-08 18:07:00 +0200608 if (__noflush_suspending(md))
609 bio_list_add_head(&md->deferred, io->bio);
610 else
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800611 /* noflush suspend was interrupted. */
612 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100613 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800614 }
615
Milan Brozb35f8ca2009-03-16 17:44:36 +0000616 io_error = io->error;
617 bio = io->bio;
Tejun Heo6a8736d2010-09-08 18:07:00 +0200618 end_io_acct(io);
619 free_io(md, io);
Jens Axboe2056a782006-03-23 20:00:26 +0100620
Tejun Heo6a8736d2010-09-08 18:07:00 +0200621 if (io_error == DM_ENDIO_REQUEUE)
622 return;
623
Mike Snitzerb372d362010-09-08 18:07:01 +0200624 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
Tejun Heo6a8736d2010-09-08 18:07:00 +0200625 /*
626 * Preflush done for flush with data, reissue
627 * without REQ_FLUSH.
628 */
629 bio->bi_rw &= ~REQ_FLUSH;
630 queue_io(md, bio);
Mike Snitzerb372d362010-09-08 18:07:01 +0200631 } else {
632 /* done with normal IO or empty flush */
633 trace_block_bio_complete(md->queue, bio);
634 bio_endio(bio, io_error);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
637}
638
NeilBrown6712ecf2007-09-27 12:47:43 +0200639static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
641 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100642 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000643 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700644 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 dm_endio_fn endio = tio->ti->type->end_io;
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
648 error = -EIO;
649
650 if (endio) {
651 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800652 if (r < 0 || r == DM_ENDIO_REQUEUE)
653 /*
654 * error and requeue request are handled
655 * in dec_pending().
656 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800658 else if (r == DM_ENDIO_INCOMPLETE)
659 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200660 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800661 else if (r) {
662 DMWARN("unimplemented target endio return value: %d", r);
663 BUG();
664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 }
666
Stefan Bader9faf4002006-10-03 01:15:41 -0700667 /*
668 * Store md for cleanup instead of tio which is about to get freed.
669 */
670 bio->bi_private = md->bs;
671
Stefan Bader9faf4002006-10-03 01:15:41 -0700672 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000673 bio_put(bio);
674 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675}
676
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100677/*
678 * Partial completion handling for request-based dm
679 */
680static void end_clone_bio(struct bio *clone, int error)
681{
682 struct dm_rq_clone_bio_info *info = clone->bi_private;
683 struct dm_rq_target_io *tio = info->tio;
684 struct bio *bio = info->orig;
685 unsigned int nr_bytes = info->orig->bi_size;
686
687 bio_put(clone);
688
689 if (tio->error)
690 /*
691 * An error has already been detected on the request.
692 * Once error occurred, just let clone->end_io() handle
693 * the remainder.
694 */
695 return;
696 else if (error) {
697 /*
698 * Don't notice the error to the upper layer yet.
699 * The error handling decision is made by the target driver,
700 * when the request is completed.
701 */
702 tio->error = error;
703 return;
704 }
705
706 /*
707 * I/O for the bio successfully completed.
708 * Notice the data completion to the upper layer.
709 */
710
711 /*
712 * bios are processed from the head of the list.
713 * So the completing bio should always be rq->bio.
714 * If it's not, something wrong is happening.
715 */
716 if (tio->orig->bio != bio)
717 DMERR("bio completion is going in the middle of the request");
718
719 /*
720 * Update the original request.
721 * Do not use blk_end_request() here, because it may complete
722 * the original request before the clone, and break the ordering.
723 */
724 blk_update_request(tio->orig, 0, nr_bytes);
725}
726
727/*
728 * Don't touch any member of the md after calling this function because
729 * the md may be freed in dm_put() at the end of this function.
730 * Or do dm_get() before calling this function and dm_put() later.
731 */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000732static void rq_completed(struct mapped_device *md, int rw, int run_queue)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100733{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000734 atomic_dec(&md->pending[rw]);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100735
736 /* nudge anyone waiting on suspend queue */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000737 if (!md_in_flight(md))
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100738 wake_up(&md->wait);
739
740 if (run_queue)
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000741 blk_run_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100742
743 /*
744 * dm_put() must be at the end of this function. See the comment above
745 */
746 dm_put(md);
747}
748
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100749static void free_rq_clone(struct request *clone)
750{
751 struct dm_rq_target_io *tio = clone->end_io_data;
752
753 blk_rq_unprep_clone(clone);
754 free_rq_tio(tio);
755}
756
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000757/*
758 * Complete the clone and the original request.
759 * Must be called without queue lock.
760 */
761static void dm_end_request(struct request *clone, int error)
762{
763 int rw = rq_data_dir(clone);
764 struct dm_rq_target_io *tio = clone->end_io_data;
765 struct mapped_device *md = tio->md;
766 struct request *rq = tio->orig;
767
Tejun Heo29e40132010-09-08 18:07:00 +0200768 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000769 rq->errors = clone->errors;
770 rq->resid_len = clone->resid_len;
771
772 if (rq->sense)
773 /*
774 * We are using the sense buffer of the original
775 * request.
776 * So setting the length of the sense data is enough.
777 */
778 rq->sense_len = clone->sense_len;
779 }
780
781 free_rq_clone(clone);
Tejun Heo29e40132010-09-08 18:07:00 +0200782 blk_end_request_all(rq, error);
783 rq_completed(md, rw, true);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000784}
785
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100786static void dm_unprep_request(struct request *rq)
787{
788 struct request *clone = rq->special;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100789
790 rq->special = NULL;
791 rq->cmd_flags &= ~REQ_DONTPREP;
792
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100793 free_rq_clone(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100794}
795
796/*
797 * Requeue the original request of a clone.
798 */
799void dm_requeue_unmapped_request(struct request *clone)
800{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000801 int rw = rq_data_dir(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100802 struct dm_rq_target_io *tio = clone->end_io_data;
803 struct mapped_device *md = tio->md;
804 struct request *rq = tio->orig;
805 struct request_queue *q = rq->q;
806 unsigned long flags;
807
808 dm_unprep_request(rq);
809
810 spin_lock_irqsave(q->queue_lock, flags);
811 if (elv_queue_empty(q))
812 blk_plug_device(q);
813 blk_requeue_request(q, rq);
814 spin_unlock_irqrestore(q->queue_lock, flags);
815
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000816 rq_completed(md, rw, 0);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100817}
818EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
819
820static void __stop_queue(struct request_queue *q)
821{
822 blk_stop_queue(q);
823}
824
825static void stop_queue(struct request_queue *q)
826{
827 unsigned long flags;
828
829 spin_lock_irqsave(q->queue_lock, flags);
830 __stop_queue(q);
831 spin_unlock_irqrestore(q->queue_lock, flags);
832}
833
834static void __start_queue(struct request_queue *q)
835{
836 if (blk_queue_stopped(q))
837 blk_start_queue(q);
838}
839
840static void start_queue(struct request_queue *q)
841{
842 unsigned long flags;
843
844 spin_lock_irqsave(q->queue_lock, flags);
845 __start_queue(q);
846 spin_unlock_irqrestore(q->queue_lock, flags);
847}
848
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000849static void dm_done(struct request *clone, int error, bool mapped)
850{
851 int r = error;
852 struct dm_rq_target_io *tio = clone->end_io_data;
853 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
854
855 if (mapped && rq_end_io)
856 r = rq_end_io(tio->ti, clone, error, &tio->info);
857
858 if (r <= 0)
859 /* The target wants to complete the I/O */
860 dm_end_request(clone, r);
861 else if (r == DM_ENDIO_INCOMPLETE)
862 /* The target will handle the I/O */
863 return;
864 else if (r == DM_ENDIO_REQUEUE)
865 /* The target wants to requeue the I/O */
866 dm_requeue_unmapped_request(clone);
867 else {
868 DMWARN("unimplemented target endio return value: %d", r);
869 BUG();
870 }
871}
872
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100873/*
874 * Request completion handler for request-based dm
875 */
876static void dm_softirq_done(struct request *rq)
877{
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000878 bool mapped = true;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100879 struct request *clone = rq->completion_data;
880 struct dm_rq_target_io *tio = clone->end_io_data;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100881
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000882 if (rq->cmd_flags & REQ_FAILED)
883 mapped = false;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100884
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000885 dm_done(clone, tio->error, mapped);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100886}
887
888/*
889 * Complete the clone and the original request with the error status
890 * through softirq context.
891 */
892static void dm_complete_request(struct request *clone, int error)
893{
894 struct dm_rq_target_io *tio = clone->end_io_data;
895 struct request *rq = tio->orig;
896
897 tio->error = error;
898 rq->completion_data = clone;
899 blk_complete_request(rq);
900}
901
902/*
903 * Complete the not-mapped clone and the original request with the error status
904 * through softirq context.
905 * Target's rq_end_io() function isn't called.
906 * This may be used when the target's map_rq() function fails.
907 */
908void dm_kill_unmapped_request(struct request *clone, int error)
909{
910 struct dm_rq_target_io *tio = clone->end_io_data;
911 struct request *rq = tio->orig;
912
913 rq->cmd_flags |= REQ_FAILED;
914 dm_complete_request(clone, error);
915}
916EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
917
918/*
919 * Called with the queue lock held
920 */
921static void end_clone_request(struct request *clone, int error)
922{
923 /*
924 * For just cleaning up the information of the queue in which
925 * the clone was dispatched.
926 * The clone is *NOT* freed actually here because it is alloced from
927 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
928 */
929 __blk_put_request(clone->q, clone);
930
931 /*
932 * Actual request completion is done in a softirq context which doesn't
933 * hold the queue lock. Otherwise, deadlock could occur because:
934 * - another request may be submitted by the upper level driver
935 * of the stacking during the completion
936 * - the submission which requires queue lock may be done
937 * against this queue
938 */
939 dm_complete_request(clone, error);
940}
941
Mike Snitzer56a67df2010-08-12 04:14:10 +0100942/*
943 * Return maximum size of I/O possible at the supplied sector up to the current
944 * target boundary.
945 */
946static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947{
Mike Snitzer56a67df2010-08-12 04:14:10 +0100948 sector_t target_offset = dm_target_offset(ti, sector);
949
950 return ti->len - target_offset;
951}
952
953static sector_t max_io_len(sector_t sector, struct dm_target *ti)
954{
955 sector_t len = max_io_len_target_boundary(sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 /*
958 * Does the target need to split even further ?
959 */
960 if (ti->split_io) {
961 sector_t boundary;
Mike Snitzer56a67df2010-08-12 04:14:10 +0100962 sector_t offset = dm_target_offset(ti, sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
964 - offset;
965 if (len > boundary)
966 len = boundary;
967 }
968
969 return len;
970}
971
972static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100973 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974{
975 int r;
Jens Axboe2056a782006-03-23 20:00:26 +0100976 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -0700977 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 clone->bi_end_io = clone_endio;
980 clone->bi_private = tio;
981
982 /*
983 * Map the clone. If r == 0 we don't need to do
984 * anything, the target has assumed ownership of
985 * this io.
986 */
987 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +0100988 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800990 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +0100992
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100993 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -0400994 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +0100995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800997 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
998 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -0700999 md = tio->io->md;
1000 dec_pending(tio->io, r);
1001 /*
1002 * Store bio_set for cleanup.
1003 */
1004 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -07001006 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001007 } else if (r) {
1008 DMWARN("unimplemented target map return value: %d", r);
1009 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
1011}
1012
1013struct clone_info {
1014 struct mapped_device *md;
1015 struct dm_table *map;
1016 struct bio *bio;
1017 struct dm_io *io;
1018 sector_t sector;
1019 sector_t sector_count;
1020 unsigned short idx;
1021};
1022
Peter Osterlund36763472005-09-06 15:16:42 -07001023static void dm_bio_destructor(struct bio *bio)
1024{
Stefan Bader9faf4002006-10-03 01:15:41 -07001025 struct bio_set *bs = bio->bi_private;
1026
1027 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001028}
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030/*
Tejun Heod87f4c12010-09-03 11:56:19 +02001031 * Creates a little bio that just does part of a bvec.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 */
1033static struct bio *split_bvec(struct bio *bio, sector_t sector,
1034 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -07001035 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036{
1037 struct bio *clone;
1038 struct bio_vec *bv = bio->bi_io_vec + idx;
1039
Stefan Bader9faf4002006-10-03 01:15:41 -07001040 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001041 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 *clone->bi_io_vec = *bv;
1043
1044 clone->bi_sector = sector;
1045 clone->bi_bdev = bio->bi_bdev;
Tejun Heod87f4c12010-09-03 11:56:19 +02001046 clone->bi_rw = bio->bi_rw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 clone->bi_vcnt = 1;
1048 clone->bi_size = to_bytes(len);
1049 clone->bi_io_vec->bv_offset = offset;
1050 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +01001051 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Martin K. Petersen9c470082009-04-09 00:27:12 +01001053 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001054 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001055 bio_integrity_trim(clone,
1056 bio_sector_offset(bio, idx, offset), len);
1057 }
1058
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 return clone;
1060}
1061
1062/*
1063 * Creates a bio that consists of range of complete bvecs.
1064 */
1065static struct bio *clone_bio(struct bio *bio, sector_t sector,
1066 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -07001067 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068{
1069 struct bio *clone;
1070
Stefan Bader9faf4002006-10-03 01:15:41 -07001071 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1072 __bio_clone(clone, bio);
1073 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 clone->bi_sector = sector;
1075 clone->bi_idx = idx;
1076 clone->bi_vcnt = idx + bv_count;
1077 clone->bi_size = to_bytes(len);
1078 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1079
Martin K. Petersen9c470082009-04-09 00:27:12 +01001080 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001081 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001082
1083 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1084 bio_integrity_trim(clone,
1085 bio_sector_offset(bio, idx, 0), len);
1086 }
1087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 return clone;
1089}
1090
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001091static struct dm_target_io *alloc_tio(struct clone_info *ci,
1092 struct dm_target *ti)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001093{
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001094 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001095
1096 tio->io = ci->io;
1097 tio->ti = ti;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001098 memset(&tio->info, 0, sizeof(tio->info));
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001099
1100 return tio;
1101}
1102
Mike Snitzer06a426c2010-08-12 04:14:09 +01001103static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
Mike Snitzera79245b2010-08-12 04:14:24 +01001104 unsigned request_nr, sector_t len)
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001105{
1106 struct dm_target_io *tio = alloc_tio(ci, ti);
1107 struct bio *clone;
1108
Mike Snitzer57cba5d2010-08-12 04:14:04 +01001109 tio->info.target_request_nr = request_nr;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001110
Mike Snitzer06a426c2010-08-12 04:14:09 +01001111 /*
1112 * Discard requests require the bio's inline iovecs be initialized.
1113 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1114 * and discard, so no need for concern about wasted bvec allocations.
1115 */
1116 clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001117 __bio_clone(clone, ci->bio);
1118 clone->bi_destructor = dm_bio_destructor;
Mike Snitzera79245b2010-08-12 04:14:24 +01001119 if (len) {
1120 clone->bi_sector = ci->sector;
1121 clone->bi_size = to_bytes(len);
1122 }
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001123
1124 __map_bio(ti, clone, tio);
1125}
1126
Mike Snitzer06a426c2010-08-12 04:14:09 +01001127static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
Mike Snitzera79245b2010-08-12 04:14:24 +01001128 unsigned num_requests, sector_t len)
Mike Snitzer06a426c2010-08-12 04:14:09 +01001129{
1130 unsigned request_nr;
1131
1132 for (request_nr = 0; request_nr < num_requests; request_nr++)
Mike Snitzera79245b2010-08-12 04:14:24 +01001133 __issue_target_request(ci, ti, request_nr, len);
Mike Snitzer06a426c2010-08-12 04:14:09 +01001134}
1135
Mike Snitzerb372d362010-09-08 18:07:01 +02001136static int __clone_and_map_empty_flush(struct clone_info *ci)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001137{
Mike Snitzer06a426c2010-08-12 04:14:09 +01001138 unsigned target_nr = 0;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001139 struct dm_target *ti;
1140
Mike Snitzerb372d362010-09-08 18:07:01 +02001141 BUG_ON(bio_has_data(ci->bio));
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001142 while ((ti = dm_table_get_target(ci->map, target_nr++)))
Mike Snitzera79245b2010-08-12 04:14:24 +01001143 __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001144
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001145 return 0;
1146}
1147
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001148/*
1149 * Perform all io with a single clone.
1150 */
1151static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1152{
1153 struct bio *clone, *bio = ci->bio;
1154 struct dm_target_io *tio;
1155
1156 tio = alloc_tio(ci, ti);
1157 clone = clone_bio(bio, ci->sector, ci->idx,
1158 bio->bi_vcnt - ci->idx, ci->sector_count,
1159 ci->md->bs);
1160 __map_bio(ti, clone, tio);
1161 ci->sector_count = 0;
1162}
1163
1164static int __clone_and_map_discard(struct clone_info *ci)
1165{
1166 struct dm_target *ti;
Mike Snitzera79245b2010-08-12 04:14:24 +01001167 sector_t len;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001168
Mike Snitzera79245b2010-08-12 04:14:24 +01001169 do {
1170 ti = dm_table_find_target(ci->map, ci->sector);
1171 if (!dm_target_is_valid(ti))
1172 return -EIO;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001173
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001174 /*
Mike Snitzera79245b2010-08-12 04:14:24 +01001175 * Even though the device advertised discard support,
1176 * reconfiguration might have changed that since the
1177 * check was performed.
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001178 */
Mike Snitzera79245b2010-08-12 04:14:24 +01001179 if (!ti->num_discard_requests)
1180 return -EOPNOTSUPP;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001181
Mike Snitzera79245b2010-08-12 04:14:24 +01001182 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
Mike Snitzer06a426c2010-08-12 04:14:09 +01001183
Mike Snitzera79245b2010-08-12 04:14:24 +01001184 __issue_target_requests(ci, ti, ti->num_discard_requests, len);
1185
1186 ci->sector += len;
1187 } while (ci->sector_count -= len);
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001188
1189 return 0;
1190}
1191
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001192static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
1194 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001195 struct dm_target *ti;
1196 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001197 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001199 if (unlikely(bio->bi_rw & REQ_DISCARD))
1200 return __clone_and_map_discard(ci);
1201
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001202 ti = dm_table_find_target(ci->map, ci->sector);
1203 if (!dm_target_is_valid(ti))
1204 return -EIO;
1205
Mike Snitzer56a67df2010-08-12 04:14:10 +01001206 max = max_io_len(ci->sector, ti);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (ci->sector_count <= max) {
1209 /*
1210 * Optimise for the simple case where we can do all of
1211 * the remaining io with a single clone.
1212 */
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001213 __clone_and_map_simple(ci, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1216 /*
1217 * There are some bvecs that don't span targets.
1218 * Do as many of these as possible.
1219 */
1220 int i;
1221 sector_t remaining = max;
1222 sector_t bv_len;
1223
1224 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1225 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1226
1227 if (bv_len > remaining)
1228 break;
1229
1230 remaining -= bv_len;
1231 len += bv_len;
1232 }
1233
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001234 tio = alloc_tio(ci, ti);
Stefan Bader9faf4002006-10-03 01:15:41 -07001235 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1236 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 __map_bio(ti, clone, tio);
1238
1239 ci->sector += len;
1240 ci->sector_count -= len;
1241 ci->idx = i;
1242
1243 } else {
1244 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001245 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 */
1247 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001248 sector_t remaining = to_sector(bv->bv_len);
1249 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001251 do {
1252 if (offset) {
1253 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001254 if (!dm_target_is_valid(ti))
1255 return -EIO;
1256
Mike Snitzer56a67df2010-08-12 04:14:10 +01001257 max = max_io_len(ci->sector, ti);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001260 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001262 tio = alloc_tio(ci, ti);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001263 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001264 bv->bv_offset + offset, len,
1265 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001266
1267 __map_bio(ti, clone, tio);
1268
1269 ci->sector += len;
1270 ci->sector_count -= len;
1271 offset += to_bytes(len);
1272 } while (remaining -= len);
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 ci->idx++;
1275 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001276
1277 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
1279
1280/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +01001281 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001283static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284{
1285 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001286 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001288 ci.map = dm_get_live_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001289 if (unlikely(!ci.map)) {
Tejun Heo6a8736d2010-09-08 18:07:00 +02001290 bio_io_error(bio);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001291 return;
1292 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +01001293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 ci.md = md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 ci.io = alloc_io(md);
1296 ci.io->error = 0;
1297 atomic_set(&ci.io->io_count, 1);
1298 ci.io->bio = bio;
1299 ci.io->md = md;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +01001300 spin_lock_init(&ci.io->endio_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 ci.sector = bio->bi_sector;
Tejun Heo6a8736d2010-09-08 18:07:00 +02001302 ci.idx = bio->bi_idx;
1303
Mike Snitzerb372d362010-09-08 18:07:01 +02001304 start_io_acct(ci.io);
1305 if (bio->bi_rw & REQ_FLUSH) {
1306 ci.bio = &ci.md->flush_bio;
1307 ci.sector_count = 0;
1308 error = __clone_and_map_empty_flush(&ci);
1309 /* dec_pending submits any data associated with flush */
1310 } else {
Tejun Heo6a8736d2010-09-08 18:07:00 +02001311 ci.bio = bio;
Tejun Heod87f4c12010-09-03 11:56:19 +02001312 ci.sector_count = bio_sectors(bio);
Mike Snitzerb372d362010-09-08 18:07:01 +02001313 while (ci.sector_count && !error)
Tejun Heod87f4c12010-09-03 11:56:19 +02001314 error = __clone_and_map(&ci);
Tejun Heod87f4c12010-09-03 11:56:19 +02001315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
1317 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001318 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 dm_table_put(ci.map);
1320}
1321/*-----------------------------------------------------------------
1322 * CRUD END
1323 *---------------------------------------------------------------*/
1324
Milan Brozf6fccb12008-07-21 12:00:37 +01001325static int dm_merge_bvec(struct request_queue *q,
1326 struct bvec_merge_data *bvm,
1327 struct bio_vec *biovec)
1328{
1329 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001330 struct dm_table *map = dm_get_live_table(md);
Milan Brozf6fccb12008-07-21 12:00:37 +01001331 struct dm_target *ti;
1332 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +01001333 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001334
1335 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +01001336 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +01001337
1338 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001339 if (!dm_target_is_valid(ti))
1340 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +01001341
1342 /*
1343 * Find maximum amount of I/O that won't need splitting
1344 */
Mike Snitzer56a67df2010-08-12 04:14:10 +01001345 max_sectors = min(max_io_len(bvm->bi_sector, ti),
Milan Brozf6fccb12008-07-21 12:00:37 +01001346 (sector_t) BIO_MAX_SECTORS);
1347 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1348 if (max_size < 0)
1349 max_size = 0;
1350
1351 /*
1352 * merge_bvec_fn() returns number of bytes
1353 * it can accept at this offset
1354 * max is precomputed maximal io size
1355 */
1356 if (max_size && ti->type->merge)
1357 max_size = ti->type->merge(ti, bvm, biovec, max_size);
Mikulas Patocka8cbeb672009-06-22 10:12:14 +01001358 /*
1359 * If the target doesn't support merge method and some of the devices
1360 * provided their merge_bvec method (we know this by looking at
1361 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1362 * entries. So always set max_size to 0, and the code below allows
1363 * just one page.
1364 */
1365 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1366
1367 max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001368
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001369out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +01001370 dm_table_put(map);
1371
1372out:
Milan Brozf6fccb12008-07-21 12:00:37 +01001373 /*
1374 * Always allow an entire first page
1375 */
1376 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1377 max_size = biovec->bv_len;
1378
Milan Brozf6fccb12008-07-21 12:00:37 +01001379 return max_size;
1380}
1381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382/*
1383 * The request function that just remaps the bio built up by
1384 * dm_merge_bvec.
1385 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001386static int _dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
Kevin Corry12f03a42006-02-01 03:04:52 -08001388 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +09001390 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001392 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Tejun Heo074a7ac2008-08-25 19:56:14 +09001394 cpu = part_stat_lock();
1395 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1396 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1397 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -08001398
Tejun Heo6a8736d2010-09-08 18:07:00 +02001399 /* if we're suspended, we have to queue this io for later */
1400 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001401 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Tejun Heo6a8736d2010-09-08 18:07:00 +02001403 if (bio_rw(bio) != READA)
1404 queue_io(md, bio);
1405 else
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +01001406 bio_io_error(bio);
Mikulas Patocka92c63902009-04-09 00:27:15 +01001407 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 }
1409
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001410 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001411 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001412 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413}
1414
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001415static int dm_make_request(struct request_queue *q, struct bio *bio)
1416{
1417 struct mapped_device *md = q->queuedata;
1418
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001419 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1420}
1421
1422static int dm_request_based(struct mapped_device *md)
1423{
1424 return blk_queue_stackable(md->queue);
1425}
1426
1427static int dm_request(struct request_queue *q, struct bio *bio)
1428{
1429 struct mapped_device *md = q->queuedata;
1430
1431 if (dm_request_based(md))
1432 return dm_make_request(q, bio);
1433
1434 return _dm_request(q, bio);
1435}
1436
1437void dm_dispatch_request(struct request *rq)
1438{
1439 int r;
1440
1441 if (blk_queue_io_stat(rq->q))
1442 rq->cmd_flags |= REQ_IO_STAT;
1443
1444 rq->start_time = jiffies;
1445 r = blk_insert_cloned_request(rq->q, rq);
1446 if (r)
1447 dm_complete_request(rq, r);
1448}
1449EXPORT_SYMBOL_GPL(dm_dispatch_request);
1450
1451static void dm_rq_bio_destructor(struct bio *bio)
1452{
1453 struct dm_rq_clone_bio_info *info = bio->bi_private;
1454 struct mapped_device *md = info->tio->md;
1455
1456 free_bio_info(info);
1457 bio_free(bio, md->bs);
1458}
1459
1460static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1461 void *data)
1462{
1463 struct dm_rq_target_io *tio = data;
1464 struct mapped_device *md = tio->md;
1465 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1466
1467 if (!info)
1468 return -ENOMEM;
1469
1470 info->orig = bio_orig;
1471 info->tio = tio;
1472 bio->bi_end_io = end_clone_bio;
1473 bio->bi_private = info;
1474 bio->bi_destructor = dm_rq_bio_destructor;
1475
1476 return 0;
1477}
1478
1479static int setup_clone(struct request *clone, struct request *rq,
1480 struct dm_rq_target_io *tio)
1481{
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001482 int r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001483
Tejun Heo29e40132010-09-08 18:07:00 +02001484 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1485 dm_rq_bio_constructor, tio);
1486 if (r)
1487 return r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001488
Tejun Heo29e40132010-09-08 18:07:00 +02001489 clone->cmd = rq->cmd;
1490 clone->cmd_len = rq->cmd_len;
1491 clone->sense = rq->sense;
1492 clone->buffer = rq->buffer;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001493 clone->end_io = end_clone_request;
1494 clone->end_io_data = tio;
1495
1496 return 0;
1497}
1498
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001499static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1500 gfp_t gfp_mask)
1501{
1502 struct request *clone;
1503 struct dm_rq_target_io *tio;
1504
1505 tio = alloc_rq_tio(md, gfp_mask);
1506 if (!tio)
1507 return NULL;
1508
1509 tio->md = md;
1510 tio->ti = NULL;
1511 tio->orig = rq;
1512 tio->error = 0;
1513 memset(&tio->info, 0, sizeof(tio->info));
1514
1515 clone = &tio->clone;
1516 if (setup_clone(clone, rq, tio)) {
1517 /* -ENOMEM */
1518 free_rq_tio(tio);
1519 return NULL;
1520 }
1521
1522 return clone;
1523}
1524
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001525/*
1526 * Called with the queue lock held.
1527 */
1528static int dm_prep_fn(struct request_queue *q, struct request *rq)
1529{
1530 struct mapped_device *md = q->queuedata;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001531 struct request *clone;
1532
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001533 if (unlikely(rq->special)) {
1534 DMWARN("Already has something in rq->special.");
1535 return BLKPREP_KILL;
1536 }
1537
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001538 clone = clone_rq(rq, md, GFP_ATOMIC);
1539 if (!clone)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001540 return BLKPREP_DEFER;
1541
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001542 rq->special = clone;
1543 rq->cmd_flags |= REQ_DONTPREP;
1544
1545 return BLKPREP_OK;
1546}
1547
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001548/*
1549 * Returns:
1550 * 0 : the request has been processed (not requeued)
1551 * !0 : the request has been requeued
1552 */
1553static int map_request(struct dm_target *ti, struct request *clone,
1554 struct mapped_device *md)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001555{
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001556 int r, requeued = 0;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001557 struct dm_rq_target_io *tio = clone->end_io_data;
1558
1559 /*
1560 * Hold the md reference here for the in-flight I/O.
1561 * We can't rely on the reference count by device opener,
1562 * because the device may be closed during the request completion
1563 * when all bios are completed.
1564 * See the comment in rq_completed() too.
1565 */
1566 dm_get(md);
1567
1568 tio->ti = ti;
1569 r = ti->type->map_rq(ti, clone, &tio->info);
1570 switch (r) {
1571 case DM_MAPIO_SUBMITTED:
1572 /* The target has taken the I/O to submit by itself later */
1573 break;
1574 case DM_MAPIO_REMAPPED:
1575 /* The target has remapped the I/O so dispatch it */
Jun'ichi Nomura6db4ccd2009-12-10 23:52:25 +00001576 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1577 blk_rq_pos(tio->orig));
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001578 dm_dispatch_request(clone);
1579 break;
1580 case DM_MAPIO_REQUEUE:
1581 /* The target wants to requeue the I/O */
1582 dm_requeue_unmapped_request(clone);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001583 requeued = 1;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001584 break;
1585 default:
1586 if (r > 0) {
1587 DMWARN("unimplemented target map return value: %d", r);
1588 BUG();
1589 }
1590
1591 /* The target wants to complete the I/O */
1592 dm_kill_unmapped_request(clone, r);
1593 break;
1594 }
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001595
1596 return requeued;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001597}
1598
1599/*
1600 * q->request_fn for request-based dm.
1601 * Called with the queue lock held.
1602 */
1603static void dm_request_fn(struct request_queue *q)
1604{
1605 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001606 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001607 struct dm_target *ti;
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001608 struct request *rq, *clone;
Tejun Heo29e40132010-09-08 18:07:00 +02001609 sector_t pos;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001610
1611 /*
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001612 * For suspend, check blk_queue_stopped() and increment
1613 * ->pending within a single queue_lock not to increment the
1614 * number of in-flight I/Os after the queue is stopped in
1615 * dm_suspend().
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001616 */
1617 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1618 rq = blk_peek_request(q);
1619 if (!rq)
1620 goto plug_and_out;
1621
Tejun Heo29e40132010-09-08 18:07:00 +02001622 /* always use block 0 to find the target for flushes for now */
1623 pos = 0;
1624 if (!(rq->cmd_flags & REQ_FLUSH))
1625 pos = blk_rq_pos(rq);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001626
Tejun Heo29e40132010-09-08 18:07:00 +02001627 ti = dm_table_find_target(map, pos);
1628 BUG_ON(!dm_target_is_valid(ti));
1629
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001630 if (ti->type->busy && ti->type->busy(ti))
1631 goto plug_and_out;
1632
1633 blk_start_request(rq);
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001634 clone = rq->special;
1635 atomic_inc(&md->pending[rq_data_dir(clone)]);
1636
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001637 spin_unlock(q->queue_lock);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001638 if (map_request(ti, clone, md))
1639 goto requeued;
1640
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001641 spin_lock_irq(q->queue_lock);
1642 }
1643
1644 goto out;
1645
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001646requeued:
1647 spin_lock_irq(q->queue_lock);
1648
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001649plug_and_out:
1650 if (!elv_queue_empty(q))
1651 /* Some requests still remain, retry later */
1652 blk_plug_device(q);
1653
1654out:
1655 dm_table_put(map);
1656
1657 return;
1658}
1659
1660int dm_underlying_device_busy(struct request_queue *q)
1661{
1662 return blk_lld_busy(q);
1663}
1664EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1665
1666static int dm_lld_busy(struct request_queue *q)
1667{
1668 int r;
1669 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001670 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001671
1672 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1673 r = 1;
1674 else
1675 r = dm_table_any_busy_target(map);
1676
1677 dm_table_put(map);
1678
1679 return r;
1680}
1681
Jens Axboe165125e2007-07-24 09:28:11 +02001682static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683{
1684 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001685 struct dm_table *map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001688 if (dm_request_based(md))
1689 generic_unplug_device(q);
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 dm_table_unplug_all(map);
1692 dm_table_put(map);
1693 }
1694}
1695
1696static int dm_any_congested(void *congested_data, int bdi_bits)
1697{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001698 int r = bdi_bits;
1699 struct mapped_device *md = congested_data;
1700 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001702 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001703 map = dm_get_live_table(md);
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001704 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001705 /*
1706 * Request-based dm cares about only own queue for
1707 * the query about congestion status of request_queue
1708 */
1709 if (dm_request_based(md))
1710 r = md->queue->backing_dev_info.state &
1711 bdi_bits;
1712 else
1713 r = dm_table_any_congested(map, bdi_bits);
1714
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001715 dm_table_put(map);
1716 }
1717 }
1718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 return r;
1720}
1721
1722/*-----------------------------------------------------------------
1723 * An IDR is used to keep track of allocated minor numbers.
1724 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725static DEFINE_IDR(_minor_idr);
1726
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001727static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001729 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001731 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732}
1733
1734/*
1735 * See if the device with a specific minor # is free.
1736 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001737static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738{
1739 int r, m;
1740
1741 if (minor >= (1 << MINORBITS))
1742 return -EINVAL;
1743
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001744 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1745 if (!r)
1746 return -ENOMEM;
1747
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001748 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 if (idr_find(&_minor_idr, minor)) {
1751 r = -EBUSY;
1752 goto out;
1753 }
1754
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001755 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001756 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759 if (m != minor) {
1760 idr_remove(&_minor_idr, m);
1761 r = -EBUSY;
1762 goto out;
1763 }
1764
1765out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001766 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 return r;
1768}
1769
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001770static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001772 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001775 if (!r)
1776 return -ENOMEM;
1777
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001778 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001780 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001781 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
1784 if (m >= (1 << MINORBITS)) {
1785 idr_remove(&_minor_idr, m);
1786 r = -ENOSPC;
1787 goto out;
1788 }
1789
1790 *minor = m;
1791
1792out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001793 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 return r;
1795}
1796
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001797static const struct block_device_operations dm_blk_dops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Mikulas Patocka53d59142009-04-02 19:55:37 +01001799static void dm_wq_work(struct work_struct *work);
1800
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01001801static void dm_init_md_queue(struct mapped_device *md)
1802{
1803 /*
1804 * Request-based dm devices cannot be stacked on top of bio-based dm
1805 * devices. The type of this dm device has not been decided yet.
1806 * The type is decided at the first table loading time.
1807 * To prevent problematic device stacking, clear the queue flag
1808 * for request stacking support until then.
1809 *
1810 * This queue is new, so no concurrency on the queue_flags.
1811 */
1812 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1813
1814 md->queue->queuedata = md;
1815 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1816 md->queue->backing_dev_info.congested_data = md;
1817 blk_queue_make_request(md->queue, dm_request);
1818 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1819 md->queue->unplug_fn = dm_unplug_all;
1820 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Tejun Heod87f4c12010-09-03 11:56:19 +02001821 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01001822}
1823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824/*
1825 * Allocate and initialise a blank device with a given minor.
1826 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001827static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828{
1829 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001830 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001831 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
1833 if (!md) {
1834 DMWARN("unable to allocate device, out of memory.");
1835 return NULL;
1836 }
1837
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001838 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001839 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001840
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001842 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001843 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001844 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001845 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001847 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Mike Snitzera5664da2010-08-12 04:14:01 +01001849 md->type = DM_TYPE_NONE;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001850 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001851 mutex_init(&md->suspend_lock);
Mike Snitzera5664da2010-08-12 04:14:01 +01001852 mutex_init(&md->type_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001853 spin_lock_init(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 rwlock_init(&md->map_lock);
1855 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001856 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001858 atomic_set(&md->uevent_seq, 0);
1859 INIT_LIST_HEAD(&md->uevent_list);
1860 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01001862 md->queue = blk_alloc_queue(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001864 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01001866 dm_init_md_queue(md);
Stefan Bader9faf4002006-10-03 01:15:41 -07001867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 md->disk = alloc_disk(1);
1869 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001870 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02001872 atomic_set(&md->pending[0], 0);
1873 atomic_set(&md->pending[1], 0);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001874 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001875 INIT_WORK(&md->work, dm_wq_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001876 init_waitqueue_head(&md->eventq);
1877
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 md->disk->major = _major;
1879 md->disk->first_minor = minor;
1880 md->disk->fops = &dm_blk_dops;
1881 md->disk->queue = md->queue;
1882 md->disk->private_data = md;
1883 sprintf(md->disk->disk_name, "dm-%d", minor);
1884 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001885 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
Milan Broz304f3f62008-02-08 02:11:17 +00001887 md->wq = create_singlethread_workqueue("kdmflush");
1888 if (!md->wq)
1889 goto bad_thread;
1890
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001891 md->bdev = bdget_disk(md->disk, 0);
1892 if (!md->bdev)
1893 goto bad_bdev;
1894
Tejun Heo6a8736d2010-09-08 18:07:00 +02001895 bio_init(&md->flush_bio);
1896 md->flush_bio.bi_bdev = md->bdev;
1897 md->flush_bio.bi_rw = WRITE_FLUSH;
1898
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001899 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001900 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001901 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001902 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001903
1904 BUG_ON(old_md != MINOR_ALLOCED);
1905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 return md;
1907
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001908bad_bdev:
1909 destroy_workqueue(md->wq);
Milan Broz304f3f62008-02-08 02:11:17 +00001910bad_thread:
Zdenek Kabelac03022c52009-10-16 23:18:15 +01001911 del_gendisk(md->disk);
Milan Broz304f3f62008-02-08 02:11:17 +00001912 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001913bad_disk:
Al Viro1312f402006-03-12 11:02:03 -05001914 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001915bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001917bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001918 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001919bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 kfree(md);
1921 return NULL;
1922}
1923
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001924static void unlock_fs(struct mapped_device *md);
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926static void free_dev(struct mapped_device *md)
1927{
Tejun Heof331c022008-09-03 09:01:48 +02001928 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001929
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001930 unlock_fs(md);
1931 bdput(md->bdev);
Milan Broz304f3f62008-02-08 02:11:17 +00001932 destroy_workqueue(md->wq);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001933 if (md->tio_pool)
1934 mempool_destroy(md->tio_pool);
1935 if (md->io_pool)
1936 mempool_destroy(md->io_pool);
1937 if (md->bs)
1938 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001939 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001941 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001942
1943 spin_lock(&_minor_lock);
1944 md->disk->private_data = NULL;
1945 spin_unlock(&_minor_lock);
1946
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001948 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001949 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 kfree(md);
1951}
1952
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001953static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1954{
1955 struct dm_md_mempools *p;
1956
1957 if (md->io_pool && md->tio_pool && md->bs)
1958 /* the md already has necessary mempools */
1959 goto out;
1960
1961 p = dm_table_get_md_mempools(t);
1962 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1963
1964 md->io_pool = p->io_pool;
1965 p->io_pool = NULL;
1966 md->tio_pool = p->tio_pool;
1967 p->tio_pool = NULL;
1968 md->bs = p->bs;
1969 p->bs = NULL;
1970
1971out:
1972 /* mempool bind completed, now no need any mempools in the table */
1973 dm_table_free_md_mempools(t);
1974}
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976/*
1977 * Bind a table to the device.
1978 */
1979static void event_callback(void *context)
1980{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001981 unsigned long flags;
1982 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 struct mapped_device *md = (struct mapped_device *) context;
1984
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001985 spin_lock_irqsave(&md->uevent_lock, flags);
1986 list_splice_init(&md->uevent_list, &uevents);
1987 spin_unlock_irqrestore(&md->uevent_lock, flags);
1988
Tejun Heoed9e1982008-08-25 19:56:05 +09001989 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001990
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 atomic_inc(&md->event_nr);
1992 wake_up(&md->eventq);
1993}
1994
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001995static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996{
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001997 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
Mikulas Patockadb8fef42009-06-22 10:12:15 +01001999 mutex_lock(&md->bdev->bd_inode->i_mutex);
2000 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2001 mutex_unlock(&md->bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002}
2003
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002004/*
2005 * Returns old map, which caller must destroy.
2006 */
2007static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2008 struct queue_limits *limits)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002010 struct dm_table *old_map;
Jens Axboe165125e2007-07-24 09:28:11 +02002011 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 sector_t size;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002013 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002016
2017 /*
2018 * Wipe any geometry if the size of the table changed.
2019 */
2020 if (size != get_capacity(md->disk))
2021 memset(&md->geometry, 0, sizeof(md->geometry));
2022
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002023 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002025 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002026
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002027 /*
2028 * The queue hasn't been stopped yet, if the old table type wasn't
2029 * for request-based during suspension. So stop it to prevent
2030 * I/O mapping before resume.
2031 * This must be done before setting the queue restrictions,
2032 * because request-based dm may be run just after the setting.
2033 */
2034 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2035 stop_queue(q);
2036
2037 __bind_mempools(md, t);
2038
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002039 write_lock_irqsave(&md->map_lock, flags);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002040 old_map = md->map;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002041 md->map = t;
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002042 dm_table_set_restrictions(t, q, limits);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002043 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002044
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002045 return old_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046}
2047
Alasdair G Kergona7940152009-12-10 23:52:23 +00002048/*
2049 * Returns unbound table for the caller to free.
2050 */
2051static struct dm_table *__unbind(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
2053 struct dm_table *map = md->map;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002054 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
2056 if (!map)
Alasdair G Kergona7940152009-12-10 23:52:23 +00002057 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
2059 dm_table_event_callback(map, NULL, NULL);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002060 write_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 md->map = NULL;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002062 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002063
2064 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065}
2066
2067/*
2068 * Constructor for a new device.
2069 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002070int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071{
2072 struct mapped_device *md;
2073
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002074 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 if (!md)
2076 return -ENXIO;
2077
Milan Broz784aae72009-01-06 03:05:12 +00002078 dm_sysfs_init(md);
2079
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 *result = md;
2081 return 0;
2082}
2083
Mike Snitzera5664da2010-08-12 04:14:01 +01002084/*
2085 * Functions to manage md->type.
2086 * All are required to hold md->type_lock.
2087 */
2088void dm_lock_md_type(struct mapped_device *md)
2089{
2090 mutex_lock(&md->type_lock);
2091}
2092
2093void dm_unlock_md_type(struct mapped_device *md)
2094{
2095 mutex_unlock(&md->type_lock);
2096}
2097
2098void dm_set_md_type(struct mapped_device *md, unsigned type)
2099{
2100 md->type = type;
2101}
2102
2103unsigned dm_get_md_type(struct mapped_device *md)
2104{
2105 return md->type;
2106}
2107
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002108/*
2109 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2110 */
2111static int dm_init_request_based_queue(struct mapped_device *md)
2112{
2113 struct request_queue *q = NULL;
2114
2115 if (md->queue->elevator)
2116 return 1;
2117
2118 /* Fully initialize the queue */
2119 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2120 if (!q)
2121 return 0;
2122
2123 md->queue = q;
2124 md->saved_make_request_fn = md->queue->make_request_fn;
2125 dm_init_md_queue(md);
2126 blk_queue_softirq_done(md->queue, dm_softirq_done);
2127 blk_queue_prep_rq(md->queue, dm_prep_fn);
2128 blk_queue_lld_busy(md->queue, dm_lld_busy);
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002129
2130 elv_register_queue(md->queue);
2131
2132 return 1;
2133}
2134
2135/*
2136 * Setup the DM device's queue based on md's type
2137 */
2138int dm_setup_md_queue(struct mapped_device *md)
2139{
2140 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2141 !dm_init_request_based_queue(md)) {
2142 DMWARN("Cannot initialize queue for request-based mapped device");
2143 return -EINVAL;
2144 }
2145
2146 return 0;
2147}
2148
David Teigland637842c2006-01-06 00:20:00 -08002149static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150{
2151 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 unsigned minor = MINOR(dev);
2153
2154 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2155 return NULL;
2156
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002157 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
2159 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002160 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02002161 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Kiyoshi Uedaabdc5682010-08-12 04:13:54 +01002162 dm_deleting_md(md) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07002163 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08002164 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002165 goto out;
2166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002168out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002169 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
David Teigland637842c2006-01-06 00:20:00 -08002171 return md;
2172}
2173
David Teiglandd229a952006-01-06 00:20:01 -08002174struct mapped_device *dm_get_md(dev_t dev)
2175{
2176 struct mapped_device *md = dm_find_md(dev);
2177
2178 if (md)
2179 dm_get(md);
2180
2181 return md;
2182}
2183
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002184void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08002185{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002186 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187}
2188
2189void dm_set_mdptr(struct mapped_device *md, void *ptr)
2190{
2191 md->interface_ptr = ptr;
2192}
2193
2194void dm_get(struct mapped_device *md)
2195{
2196 atomic_inc(&md->holders);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002197 BUG_ON(test_bit(DMF_FREEING, &md->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198}
2199
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002200const char *dm_device_name(struct mapped_device *md)
2201{
2202 return md->name;
2203}
2204EXPORT_SYMBOL_GPL(dm_device_name);
2205
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002206static void __dm_destroy(struct mapped_device *md, bool wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207{
Mike Anderson1134e5a2006-03-27 01:17:54 -08002208 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002210 might_sleep();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002211
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002212 spin_lock(&_minor_lock);
2213 map = dm_get_live_table(md);
2214 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2215 set_bit(DMF_FREEING, &md->flags);
2216 spin_unlock(&_minor_lock);
2217
2218 if (!dm_suspended_md(md)) {
2219 dm_table_presuspend_targets(map);
2220 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 }
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002222
2223 /*
2224 * Rare, but there may be I/O requests still going to complete,
2225 * for example. Wait for all references to disappear.
2226 * No one should increment the reference count of the mapped_device,
2227 * after the mapped_device state becomes DMF_FREEING.
2228 */
2229 if (wait)
2230 while (atomic_read(&md->holders))
2231 msleep(1);
2232 else if (atomic_read(&md->holders))
2233 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2234 dm_device_name(md), atomic_read(&md->holders));
2235
2236 dm_sysfs_exit(md);
2237 dm_table_put(map);
2238 dm_table_destroy(__unbind(md));
2239 free_dev(md);
2240}
2241
2242void dm_destroy(struct mapped_device *md)
2243{
2244 __dm_destroy(md, true);
2245}
2246
2247void dm_destroy_immediate(struct mapped_device *md)
2248{
2249 __dm_destroy(md, false);
2250}
2251
2252void dm_put(struct mapped_device *md)
2253{
2254 atomic_dec(&md->holders);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255}
Edward Goggin79eb8852007-05-09 02:32:56 -07002256EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Mikulas Patocka401600d2009-04-02 19:55:38 +01002258static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00002259{
2260 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002261 DECLARE_WAITQUEUE(wait, current);
2262
2263 dm_unplug_all(md->queue);
2264
2265 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00002266
2267 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01002268 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00002269
2270 smp_mb();
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00002271 if (!md_in_flight(md))
Milan Broz46125c12008-02-08 02:10:30 +00002272 break;
2273
Mikulas Patocka401600d2009-04-02 19:55:38 +01002274 if (interruptible == TASK_INTERRUPTIBLE &&
2275 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00002276 r = -EINTR;
2277 break;
2278 }
2279
2280 io_schedule();
2281 }
2282 set_current_state(TASK_RUNNING);
2283
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002284 remove_wait_queue(&md->wait, &wait);
2285
Milan Broz46125c12008-02-08 02:10:30 +00002286 return r;
2287}
2288
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289/*
2290 * Process the deferred bios
2291 */
Mikulas Patockaef208582009-04-02 19:55:38 +01002292static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293{
Mikulas Patockaef208582009-04-02 19:55:38 +01002294 struct mapped_device *md = container_of(work, struct mapped_device,
2295 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002296 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Tejun Heo6a8736d2010-09-08 18:07:00 +02002298 down_read(&md->io_lock);
Mikulas Patockaef208582009-04-02 19:55:38 +01002299
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002300 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002301 spin_lock_irq(&md->deferred_lock);
2302 c = bio_list_pop(&md->deferred);
2303 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002304
Tejun Heo6a8736d2010-09-08 18:07:00 +02002305 if (!c)
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002306 break;
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002307
Tejun Heo6a8736d2010-09-08 18:07:00 +02002308 up_read(&md->io_lock);
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002309
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002310 if (dm_request_based(md))
2311 generic_make_request(c);
Tejun Heo6a8736d2010-09-08 18:07:00 +02002312 else
2313 __split_and_process_bio(md, c);
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002314
Tejun Heo6a8736d2010-09-08 18:07:00 +02002315 down_read(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002316 }
Milan Broz73d410c2008-02-08 02:10:25 +00002317
Tejun Heo6a8736d2010-09-08 18:07:00 +02002318 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319}
2320
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002321static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00002322{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002323 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2324 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01002325 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00002326}
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328/*
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002329 * Swap in a new table, returning the old one for the caller to destroy.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 */
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002331struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002333 struct dm_table *map = ERR_PTR(-EINVAL);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002334 struct queue_limits limits;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002335 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
Daniel Walkere61290a2008-02-08 02:10:08 +00002337 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
2339 /* device must be suspended */
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002340 if (!dm_suspended_md(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002341 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002343 r = dm_calculate_queue_limits(table, &limits);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002344 if (r) {
2345 map = ERR_PTR(r);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002346 goto out;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002347 }
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002348
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002349 map = __bind(md, table, &limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002351out:
Daniel Walkere61290a2008-02-08 02:10:08 +00002352 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002353 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354}
2355
2356/*
2357 * Functions to lock and unlock any filesystem running on the
2358 * device.
2359 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002360static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002362 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
2364 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002365
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002366 md->frozen_sb = freeze_bdev(md->bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002367 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002368 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002369 md->frozen_sb = NULL;
2370 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002371 }
2372
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002373 set_bit(DMF_FROZEN, &md->flags);
2374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 return 0;
2376}
2377
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002378static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002380 if (!test_bit(DMF_FROZEN, &md->flags))
2381 return;
2382
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002383 thaw_bdev(md->bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002385 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
2387
2388/*
2389 * We need to be able to change a mapping table under a mounted
2390 * filesystem. For example we might want to move some data in
2391 * the background. Before the table can be swapped with
2392 * dm_bind_table, dm_suspend must be called to flush any in
2393 * flight bios and ensure that any further io gets deferred.
2394 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002395/*
2396 * Suspend mechanism in request-based dm.
2397 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002398 * 1. Flush all I/Os by lock_fs() if needed.
2399 * 2. Stop dispatching any I/O by stopping the request_queue.
2400 * 3. Wait for all in-flight I/Os to be completed or requeued.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002401 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002402 * To abort suspend, start the request_queue.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002403 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002404int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002406 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00002407 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002408 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002409 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
Daniel Walkere61290a2008-02-08 02:10:08 +00002411 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002412
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002413 if (dm_suspended_md(md)) {
Milan Broz73d410c2008-02-08 02:10:25 +00002414 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08002415 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00002416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002418 map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002420 /*
2421 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2422 * This flag is cleared before dm_suspend returns.
2423 */
2424 if (noflush)
2425 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2426
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002427 /* This does not get reverted if there's an error later. */
2428 dm_table_presuspend_targets(map);
2429
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002430 /*
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002431 * Flush I/O to the device.
2432 * Any I/O submitted after lock_fs() may not be flushed.
2433 * noflush takes precedence over do_lockfs.
2434 * (lock_fs() flushes I/Os and waits for them to complete.)
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002435 */
2436 if (!noflush && do_lockfs) {
2437 r = lock_fs(md);
2438 if (r)
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01002439 goto out;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
2442 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002443 * Here we must make sure that no processes are submitting requests
2444 * to target drivers i.e. no one may be executing
2445 * __split_and_process_bio. This is called from dm_request and
2446 * dm_wq_work.
2447 *
2448 * To get all processes out of __split_and_process_bio in dm_request,
2449 * we take the write lock. To prevent any process from reentering
Tejun Heo6a8736d2010-09-08 18:07:00 +02002450 * __split_and_process_bio from dm_request and quiesce the thread
2451 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2452 * flush_workqueue(md->wq).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002454 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002455 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002456 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002458 /*
Tejun Heo29e40132010-09-08 18:07:00 +02002459 * Stop md->queue before flushing md->wq in case request-based
2460 * dm defers requests to md->wq from md->queue.
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002461 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002462 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002463 stop_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002464
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002465 flush_workqueue(md->wq);
2466
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002468 * At this point no more requests are entering target request routines.
2469 * We call dm_wait_for_completion to wait for all existing requests
2470 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01002472 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002474 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002475 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01002476 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00002477 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002478
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00002480 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002481 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00002482
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002483 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002484 start_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002485
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002486 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002487 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002488 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002489
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002490 /*
2491 * If dm_wait_for_completion returned 0, the device is completely
2492 * quiescent now. There is no request-processing activity. All new
2493 * requests are being added to md->deferred list.
2494 */
2495
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 set_bit(DMF_SUSPENDED, &md->flags);
2497
Kiyoshi Ueda4d4471c2009-12-10 23:52:26 +00002498 dm_table_postsuspend_targets(map);
2499
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002500out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08002502
2503out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00002504 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002505 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506}
2507
2508int dm_resume(struct mapped_device *md)
2509{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002510 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002511 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512
Daniel Walkere61290a2008-02-08 02:10:08 +00002513 mutex_lock(&md->suspend_lock);
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002514 if (!dm_suspended_md(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002515 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002516
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002517 map = dm_get_live_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002518 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002519 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
Milan Broz8757b772006-10-03 01:15:36 -07002521 r = dm_table_resume_targets(map);
2522 if (r)
2523 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002524
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002525 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002526
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002527 /*
2528 * Flushing deferred I/Os must be done after targets are resumed
2529 * so that mapping of targets can work correctly.
2530 * Request-based dm is queueing the deferred I/Os in its request_queue.
2531 */
2532 if (dm_request_based(md))
2533 start_queue(md->queue);
2534
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002535 unlock_fs(md);
2536
2537 clear_bit(DMF_SUSPENDED, &md->flags);
2538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 dm_table_unplug_all(map);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002540 r = 0;
2541out:
2542 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00002543 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002544
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002545 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546}
2547
2548/*-----------------------------------------------------------------
2549 * Event notification.
2550 *---------------------------------------------------------------*/
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002551int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
Milan Broz60935eb2009-06-22 10:12:30 +01002552 unsigned cookie)
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002553{
Milan Broz60935eb2009-06-22 10:12:30 +01002554 char udev_cookie[DM_COOKIE_LENGTH];
2555 char *envp[] = { udev_cookie, NULL };
2556
2557 if (!cookie)
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002558 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
Milan Broz60935eb2009-06-22 10:12:30 +01002559 else {
2560 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2561 DM_COOKIE_ENV_VAR_NAME, cookie);
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002562 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2563 action, envp);
Milan Broz60935eb2009-06-22 10:12:30 +01002564 }
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002565}
2566
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002567uint32_t dm_next_uevent_seq(struct mapped_device *md)
2568{
2569 return atomic_add_return(1, &md->uevent_seq);
2570}
2571
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572uint32_t dm_get_event_nr(struct mapped_device *md)
2573{
2574 return atomic_read(&md->event_nr);
2575}
2576
2577int dm_wait_event(struct mapped_device *md, int event_nr)
2578{
2579 return wait_event_interruptible(md->eventq,
2580 (event_nr != atomic_read(&md->event_nr)));
2581}
2582
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002583void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2584{
2585 unsigned long flags;
2586
2587 spin_lock_irqsave(&md->uevent_lock, flags);
2588 list_add(elist, &md->uevent_list);
2589 spin_unlock_irqrestore(&md->uevent_lock, flags);
2590}
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592/*
2593 * The gendisk is only valid as long as you have a reference
2594 * count on 'md'.
2595 */
2596struct gendisk *dm_disk(struct mapped_device *md)
2597{
2598 return md->disk;
2599}
2600
Milan Broz784aae72009-01-06 03:05:12 +00002601struct kobject *dm_kobject(struct mapped_device *md)
2602{
2603 return &md->kobj;
2604}
2605
2606/*
2607 * struct mapped_device should not be exported outside of dm.c
2608 * so use this check to verify that kobj is part of md structure
2609 */
2610struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2611{
2612 struct mapped_device *md;
2613
2614 md = container_of(kobj, struct mapped_device, kobj);
2615 if (&md->kobj != kobj)
2616 return NULL;
2617
Milan Broz4d89b7b2009-06-22 10:12:11 +01002618 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +00002619 dm_deleting_md(md))
Milan Broz4d89b7b2009-06-22 10:12:11 +01002620 return NULL;
2621
Milan Broz784aae72009-01-06 03:05:12 +00002622 dm_get(md);
2623 return md;
2624}
2625
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002626int dm_suspended_md(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627{
2628 return test_bit(DMF_SUSPENDED, &md->flags);
2629}
2630
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002631int dm_suspended(struct dm_target *ti)
2632{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002633 return dm_suspended_md(dm_table_get_md(ti->table));
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002634}
2635EXPORT_SYMBOL_GPL(dm_suspended);
2636
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002637int dm_noflush_suspending(struct dm_target *ti)
2638{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002639 return __noflush_suspending(dm_table_get_md(ti->table));
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002640}
2641EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2642
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002643struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2644{
2645 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2646
2647 if (!pools)
2648 return NULL;
2649
2650 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2651 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2652 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2653 if (!pools->io_pool)
2654 goto free_pools_and_out;
2655
2656 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2657 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2658 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2659 if (!pools->tio_pool)
2660 goto free_io_pool_and_out;
2661
2662 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2663 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2664 if (!pools->bs)
2665 goto free_tio_pool_and_out;
2666
2667 return pools;
2668
2669free_tio_pool_and_out:
2670 mempool_destroy(pools->tio_pool);
2671
2672free_io_pool_and_out:
2673 mempool_destroy(pools->io_pool);
2674
2675free_pools_and_out:
2676 kfree(pools);
2677
2678 return NULL;
2679}
2680
2681void dm_free_md_mempools(struct dm_md_mempools *pools)
2682{
2683 if (!pools)
2684 return;
2685
2686 if (pools->io_pool)
2687 mempool_destroy(pools->io_pool);
2688
2689 if (pools->tio_pool)
2690 mempool_destroy(pools->tio_pool);
2691
2692 if (pools->bs)
2693 bioset_free(pools->bs);
2694
2695 kfree(pools);
2696}
2697
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07002698static const struct block_device_operations dm_blk_dops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 .open = dm_blk_open,
2700 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07002701 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002702 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 .owner = THIS_MODULE
2704};
2705
2706EXPORT_SYMBOL(dm_get_mapinfo);
2707
2708/*
2709 * module hooks
2710 */
2711module_init(dm_init);
2712module_exit(dm_exit);
2713
2714module_param(major, uint, 0);
2715MODULE_PARM_DESC(major, "The major number of the device mapper");
2716MODULE_DESCRIPTION(DM_NAME " driver");
2717MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2718MODULE_LICENSE("GPL");