blob: 3fd8b1e65483da53070f0fe633e82843ac1a5ba0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +01009#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <linux/init.h>
12#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080013#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080021#include <linux/hdreg.h>
Jens Axboe2056a782006-03-23 20:00:26 +010022#include <linux/blktrace_api.h>
Li Zefan55782132009-06-09 13:43:05 +080023
24#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "core"
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028static const char *_name = DM_NAME;
29
30static unsigned int major = 0;
31static unsigned int _major = 0;
32
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070033static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000035 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * One of these is allocated per bio.
37 */
38struct dm_io {
39 struct mapped_device *md;
40 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010042 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080043 unsigned long start_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044};
45
46/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000047 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * One of these is allocated per target within a bio. Hopefully
49 * this will be simplified out one day.
50 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010051struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 struct dm_io *io;
53 struct dm_target *ti;
54 union map_info info;
55};
56
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000057/*
58 * For request-based dm.
59 * One of these is allocated per request.
60 */
61struct dm_rq_target_io {
62 struct mapped_device *md;
63 struct dm_target *ti;
64 struct request *orig, clone;
65 int error;
66 union map_info info;
67};
68
69/*
70 * For request-based dm.
71 * One of these is allocated per bio.
72 */
73struct dm_rq_clone_bio_info {
74 struct bio *orig;
75 struct request *rq;
76};
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078union map_info *dm_get_mapinfo(struct bio *bio)
79{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070080 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010081 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070082 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -070085#define MINOR_ALLOCED ((void *)-1)
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087/*
88 * Bits for the md->flags field.
89 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010090#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -080092#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -070093#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -070094#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -080095#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010096#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Milan Broz304f3f62008-02-08 02:11:17 +000098/*
99 * Work processed by per-device workqueue.
100 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700102 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000103 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 rwlock_t map_lock;
105 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700106 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 unsigned long flags;
109
Jens Axboe165125e2007-07-24 09:28:11 +0200110 struct request_queue *queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800112 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114 void *interface_ptr;
115
116 /*
117 * A list of ios that arrived while we were suspended.
118 */
119 atomic_t pending;
120 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100121 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800122 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100123 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 /*
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100126 * An error from the barrier request currently being processed.
127 */
128 int barrier_error;
129
130 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000131 * Processing queue (flush/barriers)
132 */
133 struct workqueue_struct *wq;
134
135 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 * The current mapping.
137 */
138 struct dm_table *map;
139
140 /*
141 * io objects are allocated from here.
142 */
143 mempool_t *io_pool;
144 mempool_t *tio_pool;
145
Stefan Bader9faf4002006-10-03 01:15:41 -0700146 struct bio_set *bs;
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 /*
149 * Event handling.
150 */
151 atomic_t event_nr;
152 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100153 atomic_t uevent_seq;
154 struct list_head uevent_list;
155 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157 /*
158 * freeze/thaw support require holding onto a super block
159 */
160 struct super_block *frozen_sb;
Alasdair G Kergone39e2e92006-01-06 00:20:05 -0800161 struct block_device *suspended_bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800162
163 /* forced geometry settings */
164 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000165
166 /* sysfs handle */
167 struct kobject kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168};
169
170#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800171static struct kmem_cache *_io_cache;
172static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000173static struct kmem_cache *_rq_tio_cache;
174static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176static int __init local_init(void)
177{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100178 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100181 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100183 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100186 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100187 if (!_tio_cache)
188 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000190 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
191 if (!_rq_tio_cache)
192 goto out_free_tio_cache;
193
194 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
195 if (!_rq_bio_info_cache)
196 goto out_free_rq_tio_cache;
197
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100198 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100199 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000200 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 _major = major;
203 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100204 if (r < 0)
205 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 if (!_major)
208 _major = r;
209
210 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100211
212out_uevent_exit:
213 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000214out_free_rq_bio_info_cache:
215 kmem_cache_destroy(_rq_bio_info_cache);
216out_free_rq_tio_cache:
217 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100218out_free_tio_cache:
219 kmem_cache_destroy(_tio_cache);
220out_free_io_cache:
221 kmem_cache_destroy(_io_cache);
222
223 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
226static void local_exit(void)
227{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000228 kmem_cache_destroy(_rq_bio_info_cache);
229 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 kmem_cache_destroy(_tio_cache);
231 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700232 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100233 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235 _major = 0;
236
237 DMINFO("cleaned up");
238}
239
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000240static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 local_init,
242 dm_target_init,
243 dm_linear_init,
244 dm_stripe_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100245 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 dm_interface_init,
247};
248
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000249static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 local_exit,
251 dm_target_exit,
252 dm_linear_exit,
253 dm_stripe_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100254 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 dm_interface_exit,
256};
257
258static int __init dm_init(void)
259{
260 const int count = ARRAY_SIZE(_inits);
261
262 int r, i;
263
264 for (i = 0; i < count; i++) {
265 r = _inits[i]();
266 if (r)
267 goto bad;
268 }
269
270 return 0;
271
272 bad:
273 while (i--)
274 _exits[i]();
275
276 return r;
277}
278
279static void __exit dm_exit(void)
280{
281 int i = ARRAY_SIZE(_exits);
282
283 while (i--)
284 _exits[i]();
285}
286
287/*
288 * Block device functions
289 */
Al Virofe5f9f22008-03-02 10:29:31 -0500290static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291{
292 struct mapped_device *md;
293
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700294 spin_lock(&_minor_lock);
295
Al Virofe5f9f22008-03-02 10:29:31 -0500296 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700297 if (!md)
298 goto out;
299
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700300 if (test_bit(DMF_FREEING, &md->flags) ||
301 test_bit(DMF_DELETING, &md->flags)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700302 md = NULL;
303 goto out;
304 }
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700307 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700308
309out:
310 spin_unlock(&_minor_lock);
311
312 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Al Virofe5f9f22008-03-02 10:29:31 -0500315static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
Al Virofe5f9f22008-03-02 10:29:31 -0500317 struct mapped_device *md = disk->private_data;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700318 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 dm_put(md);
320 return 0;
321}
322
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700323int dm_open_count(struct mapped_device *md)
324{
325 return atomic_read(&md->open_count);
326}
327
328/*
329 * Guarantees nothing is using the device before it's deleted.
330 */
331int dm_lock_for_deletion(struct mapped_device *md)
332{
333 int r = 0;
334
335 spin_lock(&_minor_lock);
336
337 if (dm_open_count(md))
338 r = -EBUSY;
339 else
340 set_bit(DMF_DELETING, &md->flags);
341
342 spin_unlock(&_minor_lock);
343
344 return r;
345}
346
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800347static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
348{
349 struct mapped_device *md = bdev->bd_disk->private_data;
350
351 return dm_get_geometry(md, geo);
352}
353
Al Virofe5f9f22008-03-02 10:29:31 -0500354static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700355 unsigned int cmd, unsigned long arg)
356{
Al Virofe5f9f22008-03-02 10:29:31 -0500357 struct mapped_device *md = bdev->bd_disk->private_data;
358 struct dm_table *map = dm_get_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700359 struct dm_target *tgt;
360 int r = -ENOTTY;
361
Milan Brozaa129a22006-10-03 01:15:15 -0700362 if (!map || !dm_table_get_size(map))
363 goto out;
364
365 /* We only support devices that have a single target */
366 if (dm_table_get_num_targets(map) != 1)
367 goto out;
368
369 tgt = dm_table_get_target(map, 0);
370
371 if (dm_suspended(md)) {
372 r = -EAGAIN;
373 goto out;
374 }
375
376 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400377 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700378
379out:
380 dm_table_put(map);
381
Milan Brozaa129a22006-10-03 01:15:15 -0700382 return r;
383}
384
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100385static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
387 return mempool_alloc(md->io_pool, GFP_NOIO);
388}
389
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100390static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
392 mempool_free(io, md->io_pool);
393}
394
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100395static struct dm_target_io *alloc_tio(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396{
397 return mempool_alloc(md->tio_pool, GFP_NOIO);
398}
399
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100400static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
402 mempool_free(tio, md->tio_pool);
403}
404
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800405static void start_io_acct(struct dm_io *io)
406{
407 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900408 int cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800409
410 io->start_time = jiffies;
411
Tejun Heo074a7ac2008-08-25 19:56:14 +0900412 cpu = part_stat_lock();
413 part_round_stats(cpu, &dm_disk(md)->part0);
414 part_stat_unlock();
415 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800416}
417
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000418static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800419{
420 struct mapped_device *md = io->md;
421 struct bio *bio = io->bio;
422 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900423 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800424 int rw = bio_data_dir(bio);
425
Tejun Heo074a7ac2008-08-25 19:56:14 +0900426 cpu = part_stat_lock();
427 part_round_stats(cpu, &dm_disk(md)->part0);
428 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
429 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800430
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100431 /*
432 * After this is decremented the bio must not be touched if it is
433 * a barrier.
434 */
Tejun Heo074a7ac2008-08-25 19:56:14 +0900435 dm_disk(md)->part0.in_flight = pending =
436 atomic_dec_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800437
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000438 /* nudge anyone waiting on suspend queue */
439 if (!pending)
440 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800441}
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443/*
444 * Add the bio to the list of deferred io.
445 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100446static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700448 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Mikulas Patocka022c2612009-04-02 19:55:39 +0100450 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100452 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Mikulas Patocka92c63902009-04-09 00:27:15 +0100454 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
455 queue_work(md->wq, &md->work);
456
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700457 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459
460/*
461 * Everyone (including functions in this file), should use this
462 * function to access the md->map field, and make sure they call
463 * dm_table_put() when finished.
464 */
465struct dm_table *dm_get_table(struct mapped_device *md)
466{
467 struct dm_table *t;
468
469 read_lock(&md->map_lock);
470 t = md->map;
471 if (t)
472 dm_table_get(t);
473 read_unlock(&md->map_lock);
474
475 return t;
476}
477
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800478/*
479 * Get the geometry associated with a dm device
480 */
481int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
482{
483 *geo = md->geometry;
484
485 return 0;
486}
487
488/*
489 * Set the geometry of a device.
490 */
491int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
492{
493 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
494
495 if (geo->start > sz) {
496 DMWARN("Start sector is beyond the geometry limits.");
497 return -EINVAL;
498 }
499
500 md->geometry = *geo;
501
502 return 0;
503}
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505/*-----------------------------------------------------------------
506 * CRUD START:
507 * A more elegant soln is in the works that uses the queue
508 * merge fn, unfortunately there are a couple of changes to
509 * the block layer that I want to make for this. So in the
510 * interests of getting something for people to use I give
511 * you this clearly demarcated crap.
512 *---------------------------------------------------------------*/
513
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800514static int __noflush_suspending(struct mapped_device *md)
515{
516 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
517}
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519/*
520 * Decrements the number of outstanding ios that a bio has been
521 * cloned into, completing the original io if necc.
522 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800523static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800525 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000526 int io_error;
527 struct bio *bio;
528 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800529
530 /* Push-back supersedes any I/O errors */
Milan Brozb35f8ca2009-03-16 17:44:36 +0000531 if (error && !(io->error > 0 && __noflush_suspending(md)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 io->error = error;
533
534 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800535 if (io->error == DM_ENDIO_REQUEUE) {
536 /*
537 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800538 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100539 spin_lock_irqsave(&md->deferred_lock, flags);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000540 if (__noflush_suspending(md))
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100541 bio_list_add_head(&md->deferred, io->bio);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800542 else
543 /* noflush suspend was interrupted. */
544 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100545 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800546 }
547
Milan Brozb35f8ca2009-03-16 17:44:36 +0000548 io_error = io->error;
549 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100550
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100551 if (bio_barrier(bio)) {
552 /*
553 * There can be just one barrier request so we use
554 * a per-device variable for error reporting.
555 * Note that you can't touch the bio after end_io_acct
556 */
557 md->barrier_error = io_error;
558 end_io_acct(io);
559 } else {
560 end_io_acct(io);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000561
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100562 if (io_error != DM_ENDIO_REQUEUE) {
563 trace_block_bio_complete(md->queue, bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000564
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100565 bio_endio(bio, io_error);
566 }
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800567 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100568
569 free_io(md, io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571}
572
NeilBrown6712ecf2007-09-27 12:47:43 +0200573static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
575 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100576 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000577 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700578 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 dm_endio_fn endio = tio->ti->type->end_io;
580
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
582 error = -EIO;
583
584 if (endio) {
585 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800586 if (r < 0 || r == DM_ENDIO_REQUEUE)
587 /*
588 * error and requeue request are handled
589 * in dec_pending().
590 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800592 else if (r == DM_ENDIO_INCOMPLETE)
593 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200594 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800595 else if (r) {
596 DMWARN("unimplemented target endio return value: %d", r);
597 BUG();
598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 }
600
Stefan Bader9faf4002006-10-03 01:15:41 -0700601 /*
602 * Store md for cleanup instead of tio which is about to get freed.
603 */
604 bio->bi_private = md->bs;
605
Stefan Bader9faf4002006-10-03 01:15:41 -0700606 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000607 bio_put(bio);
608 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609}
610
611static sector_t max_io_len(struct mapped_device *md,
612 sector_t sector, struct dm_target *ti)
613{
614 sector_t offset = sector - ti->begin;
615 sector_t len = ti->len - offset;
616
617 /*
618 * Does the target need to split even further ?
619 */
620 if (ti->split_io) {
621 sector_t boundary;
622 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
623 - offset;
624 if (len > boundary)
625 len = boundary;
626 }
627
628 return len;
629}
630
631static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100632 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
634 int r;
Jens Axboe2056a782006-03-23 20:00:26 +0100635 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -0700636 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638 /*
639 * Sanity checks.
640 */
641 BUG_ON(!clone->bi_size);
642
643 clone->bi_end_io = clone_endio;
644 clone->bi_private = tio;
645
646 /*
647 * Map the clone. If r == 0 we don't need to do
648 * anything, the target has assumed ownership of
649 * this io.
650 */
651 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +0100652 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800654 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +0100656
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100657 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -0400658 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +0100659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800661 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
662 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -0700663 md = tio->io->md;
664 dec_pending(tio->io, r);
665 /*
666 * Store bio_set for cleanup.
667 */
668 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -0700670 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800671 } else if (r) {
672 DMWARN("unimplemented target map return value: %d", r);
673 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 }
675}
676
677struct clone_info {
678 struct mapped_device *md;
679 struct dm_table *map;
680 struct bio *bio;
681 struct dm_io *io;
682 sector_t sector;
683 sector_t sector_count;
684 unsigned short idx;
685};
686
Peter Osterlund36763472005-09-06 15:16:42 -0700687static void dm_bio_destructor(struct bio *bio)
688{
Stefan Bader9faf4002006-10-03 01:15:41 -0700689 struct bio_set *bs = bio->bi_private;
690
691 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700692}
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694/*
695 * Creates a little bio that is just does part of a bvec.
696 */
697static struct bio *split_bvec(struct bio *bio, sector_t sector,
698 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -0700699 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
701 struct bio *clone;
702 struct bio_vec *bv = bio->bi_io_vec + idx;
703
Stefan Bader9faf4002006-10-03 01:15:41 -0700704 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700705 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 *clone->bi_io_vec = *bv;
707
708 clone->bi_sector = sector;
709 clone->bi_bdev = bio->bi_bdev;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100710 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 clone->bi_vcnt = 1;
712 clone->bi_size = to_bytes(len);
713 clone->bi_io_vec->bv_offset = offset;
714 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +0100715 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Martin K. Petersen9c470082009-04-09 00:27:12 +0100717 if (bio_integrity(bio)) {
718 bio_integrity_clone(clone, bio, GFP_NOIO);
719 bio_integrity_trim(clone,
720 bio_sector_offset(bio, idx, offset), len);
721 }
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return clone;
724}
725
726/*
727 * Creates a bio that consists of range of complete bvecs.
728 */
729static struct bio *clone_bio(struct bio *bio, sector_t sector,
730 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -0700731 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
733 struct bio *clone;
734
Stefan Bader9faf4002006-10-03 01:15:41 -0700735 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
736 __bio_clone(clone, bio);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100737 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
Stefan Bader9faf4002006-10-03 01:15:41 -0700738 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 clone->bi_sector = sector;
740 clone->bi_idx = idx;
741 clone->bi_vcnt = idx + bv_count;
742 clone->bi_size = to_bytes(len);
743 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
744
Martin K. Petersen9c470082009-04-09 00:27:12 +0100745 if (bio_integrity(bio)) {
746 bio_integrity_clone(clone, bio, GFP_NOIO);
747
748 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
749 bio_integrity_trim(clone,
750 bio_sector_offset(bio, idx, 0), len);
751 }
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 return clone;
754}
755
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000756static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757{
758 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000759 struct dm_target *ti;
760 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100761 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000763 ti = dm_table_find_target(ci->map, ci->sector);
764 if (!dm_target_is_valid(ti))
765 return -EIO;
766
767 max = max_io_len(ci->md, ci->sector, ti);
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 /*
770 * Allocate a target io object.
771 */
772 tio = alloc_tio(ci->md);
773 tio->io = ci->io;
774 tio->ti = ti;
775 memset(&tio->info, 0, sizeof(tio->info));
776
777 if (ci->sector_count <= max) {
778 /*
779 * Optimise for the simple case where we can do all of
780 * the remaining io with a single clone.
781 */
782 clone = clone_bio(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700783 bio->bi_vcnt - ci->idx, ci->sector_count,
784 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 __map_bio(ti, clone, tio);
786 ci->sector_count = 0;
787
788 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
789 /*
790 * There are some bvecs that don't span targets.
791 * Do as many of these as possible.
792 */
793 int i;
794 sector_t remaining = max;
795 sector_t bv_len;
796
797 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
798 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
799
800 if (bv_len > remaining)
801 break;
802
803 remaining -= bv_len;
804 len += bv_len;
805 }
806
Stefan Bader9faf4002006-10-03 01:15:41 -0700807 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
808 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 __map_bio(ti, clone, tio);
810
811 ci->sector += len;
812 ci->sector_count -= len;
813 ci->idx = i;
814
815 } else {
816 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800817 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 */
819 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800820 sector_t remaining = to_sector(bv->bv_len);
821 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800823 do {
824 if (offset) {
825 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000826 if (!dm_target_is_valid(ti))
827 return -EIO;
828
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800829 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800831 tio = alloc_tio(ci->md);
832 tio->io = ci->io;
833 tio->ti = ti;
834 memset(&tio->info, 0, sizeof(tio->info));
835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800837 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800839 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700840 bv->bv_offset + offset, len,
841 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800842
843 __map_bio(ti, clone, tio);
844
845 ci->sector += len;
846 ci->sector_count -= len;
847 offset += to_bytes(len);
848 } while (remaining -= len);
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 ci->idx++;
851 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000852
853 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854}
855
856/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +0100857 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100859static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
861 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000862 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 ci.map = dm_get_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100865 if (unlikely(!ci.map)) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100866 if (!bio_barrier(bio))
867 bio_io_error(bio);
868 else
869 md->barrier_error = -EIO;
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100870 return;
871 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +0100872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 ci.md = md;
874 ci.bio = bio;
875 ci.io = alloc_io(md);
876 ci.io->error = 0;
877 atomic_set(&ci.io->io_count, 1);
878 ci.io->bio = bio;
879 ci.io->md = md;
880 ci.sector = bio->bi_sector;
881 ci.sector_count = bio_sectors(bio);
882 ci.idx = bio->bi_idx;
883
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800884 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000885 while (ci.sector_count && !error)
886 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000889 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 dm_table_put(ci.map);
891}
892/*-----------------------------------------------------------------
893 * CRUD END
894 *---------------------------------------------------------------*/
895
Milan Brozf6fccb12008-07-21 12:00:37 +0100896static int dm_merge_bvec(struct request_queue *q,
897 struct bvec_merge_data *bvm,
898 struct bio_vec *biovec)
899{
900 struct mapped_device *md = q->queuedata;
901 struct dm_table *map = dm_get_table(md);
902 struct dm_target *ti;
903 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +0100904 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +0100905
906 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +0100907 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +0100908
909 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100910 if (!dm_target_is_valid(ti))
911 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +0100912
913 /*
914 * Find maximum amount of I/O that won't need splitting
915 */
916 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
917 (sector_t) BIO_MAX_SECTORS);
918 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
919 if (max_size < 0)
920 max_size = 0;
921
922 /*
923 * merge_bvec_fn() returns number of bytes
924 * it can accept at this offset
925 * max is precomputed maximal io size
926 */
927 if (max_size && ti->type->merge)
928 max_size = ti->type->merge(ti, bvm, biovec, max_size);
929
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100930out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +0100931 dm_table_put(map);
932
933out:
Milan Brozf6fccb12008-07-21 12:00:37 +0100934 /*
935 * Always allow an entire first page
936 */
937 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
938 max_size = biovec->bv_len;
939
Milan Brozf6fccb12008-07-21 12:00:37 +0100940 return max_size;
941}
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943/*
944 * The request function that just remaps the bio built up by
945 * dm_merge_bvec.
946 */
Jens Axboe165125e2007-07-24 09:28:11 +0200947static int dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
Kevin Corry12f03a42006-02-01 03:04:52 -0800949 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +0900951 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700953 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
Tejun Heo074a7ac2008-08-25 19:56:14 +0900955 cpu = part_stat_lock();
956 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
957 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
958 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -0800959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100961 * If we're suspended or the thread is processing barriers
962 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 */
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100964 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
965 unlikely(bio_barrier(bio))) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700966 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +0100968 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
969 bio_rw(bio) == READA) {
970 bio_io_error(bio);
971 return 0;
972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Mikulas Patocka92c63902009-04-09 00:27:15 +0100974 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
Mikulas Patocka92c63902009-04-09 00:27:15 +0100976 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 }
978
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100979 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700980 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100981 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982}
983
Jens Axboe165125e2007-07-24 09:28:11 +0200984static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
986 struct mapped_device *md = q->queuedata;
987 struct dm_table *map = dm_get_table(md);
988
989 if (map) {
990 dm_table_unplug_all(map);
991 dm_table_put(map);
992 }
993}
994
995static int dm_any_congested(void *congested_data, int bdi_bits)
996{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +0000997 int r = bdi_bits;
998 struct mapped_device *md = congested_data;
999 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001001 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001002 map = dm_get_table(md);
1003 if (map) {
1004 r = dm_table_any_congested(map, bdi_bits);
1005 dm_table_put(map);
1006 }
1007 }
1008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 return r;
1010}
1011
1012/*-----------------------------------------------------------------
1013 * An IDR is used to keep track of allocated minor numbers.
1014 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015static DEFINE_IDR(_minor_idr);
1016
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001017static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001019 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001021 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022}
1023
1024/*
1025 * See if the device with a specific minor # is free.
1026 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001027static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
1029 int r, m;
1030
1031 if (minor >= (1 << MINORBITS))
1032 return -EINVAL;
1033
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001034 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1035 if (!r)
1036 return -ENOMEM;
1037
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001038 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 if (idr_find(&_minor_idr, minor)) {
1041 r = -EBUSY;
1042 goto out;
1043 }
1044
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001045 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001046 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 if (m != minor) {
1050 idr_remove(&_minor_idr, m);
1051 r = -EBUSY;
1052 goto out;
1053 }
1054
1055out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001056 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 return r;
1058}
1059
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001060static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001062 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001065 if (!r)
1066 return -ENOMEM;
1067
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001068 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001070 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001071 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074 if (m >= (1 << MINORBITS)) {
1075 idr_remove(&_minor_idr, m);
1076 r = -ENOSPC;
1077 goto out;
1078 }
1079
1080 *minor = m;
1081
1082out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001083 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 return r;
1085}
1086
1087static struct block_device_operations dm_blk_dops;
1088
Mikulas Patocka53d59142009-04-02 19:55:37 +01001089static void dm_wq_work(struct work_struct *work);
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091/*
1092 * Allocate and initialise a blank device with a given minor.
1093 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001094static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095{
1096 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001097 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001098 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 if (!md) {
1101 DMWARN("unable to allocate device, out of memory.");
1102 return NULL;
1103 }
1104
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001105 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001106 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001109 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001110 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001111 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001112 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001114 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001116 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001117 mutex_init(&md->suspend_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001118 spin_lock_init(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 rwlock_init(&md->map_lock);
1120 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001121 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001123 atomic_set(&md->uevent_seq, 0);
1124 INIT_LIST_HEAD(&md->uevent_list);
1125 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 md->queue = blk_alloc_queue(GFP_KERNEL);
1128 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001129 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
1131 md->queue->queuedata = md;
1132 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1133 md->queue->backing_dev_info.congested_data = md;
1134 blk_queue_make_request(md->queue, dm_request);
Mikulas Patocka99360b42009-04-02 19:55:39 +01001135 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
Jens Axboedaef2652006-01-10 10:48:02 +01001136 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 md->queue->unplug_fn = dm_unplug_all;
Milan Brozf6fccb12008-07-21 12:00:37 +01001138 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Matthew Dobson93d23412006-03-26 01:37:50 -08001140 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
Kiyoshi Ueda74859362006-12-08 02:41:02 -08001141 if (!md->io_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001142 goto bad_io_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Matthew Dobson93d23412006-03-26 01:37:50 -08001144 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 if (!md->tio_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001146 goto bad_tio_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
Jens Axboebb799ca2008-12-10 15:35:05 +01001148 md->bs = bioset_create(16, 0);
Stefan Bader9faf4002006-10-03 01:15:41 -07001149 if (!md->bs)
1150 goto bad_no_bioset;
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 md->disk = alloc_disk(1);
1153 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001154 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001156 atomic_set(&md->pending, 0);
1157 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001158 INIT_WORK(&md->work, dm_wq_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001159 init_waitqueue_head(&md->eventq);
1160
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 md->disk->major = _major;
1162 md->disk->first_minor = minor;
1163 md->disk->fops = &dm_blk_dops;
1164 md->disk->queue = md->queue;
1165 md->disk->private_data = md;
1166 sprintf(md->disk->disk_name, "dm-%d", minor);
1167 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001168 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
Milan Broz304f3f62008-02-08 02:11:17 +00001170 md->wq = create_singlethread_workqueue("kdmflush");
1171 if (!md->wq)
1172 goto bad_thread;
1173
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001174 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001175 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001176 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001177 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001178
1179 BUG_ON(old_md != MINOR_ALLOCED);
1180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 return md;
1182
Milan Broz304f3f62008-02-08 02:11:17 +00001183bad_thread:
1184 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001185bad_disk:
Stefan Bader9faf4002006-10-03 01:15:41 -07001186 bioset_free(md->bs);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001187bad_no_bioset:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 mempool_destroy(md->tio_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001189bad_tio_pool:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 mempool_destroy(md->io_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001191bad_io_pool:
Al Viro1312f402006-03-12 11:02:03 -05001192 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001193bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001195bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001196 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001197bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 kfree(md);
1199 return NULL;
1200}
1201
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001202static void unlock_fs(struct mapped_device *md);
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204static void free_dev(struct mapped_device *md)
1205{
Tejun Heof331c022008-09-03 09:01:48 +02001206 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001207
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001208 if (md->suspended_bdev) {
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001209 unlock_fs(md);
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001210 bdput(md->suspended_bdev);
1211 }
Milan Broz304f3f62008-02-08 02:11:17 +00001212 destroy_workqueue(md->wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 mempool_destroy(md->tio_pool);
1214 mempool_destroy(md->io_pool);
Stefan Bader9faf4002006-10-03 01:15:41 -07001215 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001216 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001218 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001219
1220 spin_lock(&_minor_lock);
1221 md->disk->private_data = NULL;
1222 spin_unlock(&_minor_lock);
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001225 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001226 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 kfree(md);
1228}
1229
1230/*
1231 * Bind a table to the device.
1232 */
1233static void event_callback(void *context)
1234{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001235 unsigned long flags;
1236 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 struct mapped_device *md = (struct mapped_device *) context;
1238
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001239 spin_lock_irqsave(&md->uevent_lock, flags);
1240 list_splice_init(&md->uevent_list, &uevents);
1241 spin_unlock_irqrestore(&md->uevent_lock, flags);
1242
Tejun Heoed9e1982008-08-25 19:56:05 +09001243 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001244
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 atomic_inc(&md->event_nr);
1246 wake_up(&md->eventq);
1247}
1248
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001249static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250{
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001251 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001253 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001254 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001255 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256}
1257
1258static int __bind(struct mapped_device *md, struct dm_table *t)
1259{
Jens Axboe165125e2007-07-24 09:28:11 +02001260 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 sector_t size;
1262
1263 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001264
1265 /*
1266 * Wipe any geometry if the size of the table changed.
1267 */
1268 if (size != get_capacity(md->disk))
1269 memset(&md->geometry, 0, sizeof(md->geometry));
1270
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001271 if (md->suspended_bdev)
1272 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
Mikulas Patockad5816872009-01-06 03:05:10 +00001274 if (!size) {
1275 dm_table_destroy(t);
1276 return 0;
1277 }
1278
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001279 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001280
1281 write_lock(&md->map_lock);
1282 md->map = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 dm_table_set_restrictions(t, q);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001284 write_unlock(&md->map_lock);
1285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 return 0;
1287}
1288
1289static void __unbind(struct mapped_device *md)
1290{
1291 struct dm_table *map = md->map;
1292
1293 if (!map)
1294 return;
1295
1296 dm_table_event_callback(map, NULL, NULL);
1297 write_lock(&md->map_lock);
1298 md->map = NULL;
1299 write_unlock(&md->map_lock);
Mikulas Patockad5816872009-01-06 03:05:10 +00001300 dm_table_destroy(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
1303/*
1304 * Constructor for a new device.
1305 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001306int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307{
1308 struct mapped_device *md;
1309
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001310 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 if (!md)
1312 return -ENXIO;
1313
Milan Broz784aae72009-01-06 03:05:12 +00001314 dm_sysfs_init(md);
1315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 *result = md;
1317 return 0;
1318}
1319
David Teigland637842c2006-01-06 00:20:00 -08001320static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321{
1322 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 unsigned minor = MINOR(dev);
1324
1325 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1326 return NULL;
1327
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001328 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
1330 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001331 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02001332 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07001333 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08001334 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001335 goto out;
1336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001338out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001339 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
David Teigland637842c2006-01-06 00:20:00 -08001341 return md;
1342}
1343
David Teiglandd229a952006-01-06 00:20:01 -08001344struct mapped_device *dm_get_md(dev_t dev)
1345{
1346 struct mapped_device *md = dm_find_md(dev);
1347
1348 if (md)
1349 dm_get(md);
1350
1351 return md;
1352}
1353
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001354void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08001355{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001356 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357}
1358
1359void dm_set_mdptr(struct mapped_device *md, void *ptr)
1360{
1361 md->interface_ptr = ptr;
1362}
1363
1364void dm_get(struct mapped_device *md)
1365{
1366 atomic_inc(&md->holders);
1367}
1368
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001369const char *dm_device_name(struct mapped_device *md)
1370{
1371 return md->name;
1372}
1373EXPORT_SYMBOL_GPL(dm_device_name);
1374
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375void dm_put(struct mapped_device *md)
1376{
Mike Anderson1134e5a2006-03-27 01:17:54 -08001377 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001379 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1380
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001381 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
Mike Anderson1134e5a2006-03-27 01:17:54 -08001382 map = dm_get_table(md);
Tejun Heof331c022008-09-03 09:01:48 +02001383 idr_replace(&_minor_idr, MINOR_ALLOCED,
1384 MINOR(disk_devt(dm_disk(md))));
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001385 set_bit(DMF_FREEING, &md->flags);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001386 spin_unlock(&_minor_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001387 if (!dm_suspended(md)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 dm_table_presuspend_targets(map);
1389 dm_table_postsuspend_targets(map);
1390 }
Milan Broz784aae72009-01-06 03:05:12 +00001391 dm_sysfs_exit(md);
Mike Anderson1134e5a2006-03-27 01:17:54 -08001392 dm_table_put(map);
Mikulas Patockaa1b51e92009-01-06 03:04:53 +00001393 __unbind(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 free_dev(md);
1395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396}
Edward Goggin79eb8852007-05-09 02:32:56 -07001397EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Mikulas Patocka401600d2009-04-02 19:55:38 +01001399static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00001400{
1401 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001402 DECLARE_WAITQUEUE(wait, current);
1403
1404 dm_unplug_all(md->queue);
1405
1406 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00001407
1408 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01001409 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00001410
1411 smp_mb();
1412 if (!atomic_read(&md->pending))
1413 break;
1414
Mikulas Patocka401600d2009-04-02 19:55:38 +01001415 if (interruptible == TASK_INTERRUPTIBLE &&
1416 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00001417 r = -EINTR;
1418 break;
1419 }
1420
1421 io_schedule();
1422 }
1423 set_current_state(TASK_RUNNING);
1424
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001425 remove_wait_queue(&md->wait, &wait);
1426
Milan Broz46125c12008-02-08 02:10:30 +00001427 return r;
1428}
1429
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001430static int dm_flush(struct mapped_device *md)
1431{
1432 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1433 return 0;
1434}
1435
1436static void process_barrier(struct mapped_device *md, struct bio *bio)
1437{
1438 int error = dm_flush(md);
1439
1440 if (unlikely(error)) {
1441 bio_endio(bio, error);
1442 return;
1443 }
1444 if (bio_empty_barrier(bio)) {
1445 bio_endio(bio, 0);
1446 return;
1447 }
1448
1449 __split_and_process_bio(md, bio);
1450
1451 error = dm_flush(md);
1452
1453 if (!error && md->barrier_error)
1454 error = md->barrier_error;
1455
1456 if (md->barrier_error != DM_ENDIO_REQUEUE)
1457 bio_endio(bio, error);
1458}
1459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460/*
1461 * Process the deferred bios
1462 */
Mikulas Patockaef208582009-04-02 19:55:38 +01001463static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464{
Mikulas Patockaef208582009-04-02 19:55:38 +01001465 struct mapped_device *md = container_of(work, struct mapped_device,
1466 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001467 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Mikulas Patockaef208582009-04-02 19:55:38 +01001469 down_write(&md->io_lock);
1470
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001471 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001472 spin_lock_irq(&md->deferred_lock);
1473 c = bio_list_pop(&md->deferred);
1474 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001475
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001476 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001477 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001478 break;
1479 }
1480
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001481 up_write(&md->io_lock);
1482
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001483 if (bio_barrier(c))
1484 process_barrier(md, c);
1485 else
1486 __split_and_process_bio(md, c);
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001487
1488 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001489 }
Milan Broz73d410c2008-02-08 02:10:25 +00001490
Mikulas Patockaef208582009-04-02 19:55:38 +01001491 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492}
1493
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001494static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00001495{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001496 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1497 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01001498 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00001499}
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501/*
1502 * Swap in a new table (destroying old one).
1503 */
1504int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1505{
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001506 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Daniel Walkere61290a2008-02-08 02:10:08 +00001508 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 /* device must be suspended */
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001511 if (!dm_suspended(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001512 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001514 /* without bdev, the device size cannot be changed */
1515 if (!md->suspended_bdev)
1516 if (get_capacity(md->disk) != dm_table_get_size(table))
1517 goto out;
1518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 __unbind(md);
1520 r = __bind(md, table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001522out:
Daniel Walkere61290a2008-02-08 02:10:08 +00001523 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001524 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525}
1526
1527/*
1528 * Functions to lock and unlock any filesystem running on the
1529 * device.
1530 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001531static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001533 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001536
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001537 md->frozen_sb = freeze_bdev(md->suspended_bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001538 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001539 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001540 md->frozen_sb = NULL;
1541 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001542 }
1543
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001544 set_bit(DMF_FROZEN, &md->flags);
1545
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 /* don't bdput right now, we don't want the bdev
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001547 * to go away while it is locked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 */
1549 return 0;
1550}
1551
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001552static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001554 if (!test_bit(DMF_FROZEN, &md->flags))
1555 return;
1556
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001557 thaw_bdev(md->suspended_bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001559 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560}
1561
1562/*
1563 * We need to be able to change a mapping table under a mounted
1564 * filesystem. For example we might want to move some data in
1565 * the background. Before the table can be swapped with
1566 * dm_bind_table, dm_suspend must be called to flush any in
1567 * flight bios and ensure that any further io gets deferred.
1568 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001569int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001571 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00001572 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001573 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001574 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Daniel Walkere61290a2008-02-08 02:10:08 +00001576 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001577
Milan Broz73d410c2008-02-08 02:10:25 +00001578 if (dm_suspended(md)) {
1579 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08001580 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00001581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 map = dm_get_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001585 /*
1586 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1587 * This flag is cleared before dm_suspend returns.
1588 */
1589 if (noflush)
1590 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1591
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001592 /* This does not get reverted if there's an error later. */
1593 dm_table_presuspend_targets(map);
1594
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001595 /* bdget() can stall if the pending I/Os are not flushed */
1596 if (!noflush) {
1597 md->suspended_bdev = bdget_disk(md->disk, 0);
1598 if (!md->suspended_bdev) {
1599 DMWARN("bdget failed in dm_suspend");
1600 r = -ENOMEM;
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01001601 goto out;
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001602 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001603
Milan Broz6d6f10d2008-02-08 02:10:22 +00001604 /*
1605 * Flush I/O to the device. noflush supersedes do_lockfs,
1606 * because lock_fs() needs to flush I/Os.
1607 */
1608 if (do_lockfs) {
1609 r = lock_fs(md);
1610 if (r)
1611 goto out;
1612 }
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001616 * Here we must make sure that no processes are submitting requests
1617 * to target drivers i.e. no one may be executing
1618 * __split_and_process_bio. This is called from dm_request and
1619 * dm_wq_work.
1620 *
1621 * To get all processes out of __split_and_process_bio in dm_request,
1622 * we take the write lock. To prevent any process from reentering
1623 * __split_and_process_bio from dm_request, we set
1624 * DMF_QUEUE_IO_TO_THREAD.
1625 *
1626 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1627 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1628 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1629 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001631 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001632 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1633 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001634 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001636 flush_workqueue(md->wq);
1637
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001639 * At this point no more requests are entering target request routines.
1640 * We call dm_wait_for_completion to wait for all existing requests
1641 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01001643 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001645 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001646 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01001647 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00001648 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00001651 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001652 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00001653
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001654 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001655 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001656 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001657
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001658 /*
1659 * If dm_wait_for_completion returned 0, the device is completely
1660 * quiescent now. There is no request-processing activity. All new
1661 * requests are being added to md->deferred list.
1662 */
1663
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001664 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
1666 set_bit(DMF_SUSPENDED, &md->flags);
1667
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001668out:
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001669 if (r && md->suspended_bdev) {
1670 bdput(md->suspended_bdev);
1671 md->suspended_bdev = NULL;
1672 }
1673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08001675
1676out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00001677 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001678 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679}
1680
1681int dm_resume(struct mapped_device *md)
1682{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001683 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001684 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Daniel Walkere61290a2008-02-08 02:10:08 +00001686 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001687 if (!dm_suspended(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001688 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001689
1690 map = dm_get_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001691 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001692 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Milan Broz8757b772006-10-03 01:15:36 -07001694 r = dm_table_resume_targets(map);
1695 if (r)
1696 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001697
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001698 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001699
1700 unlock_fs(md);
1701
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001702 if (md->suspended_bdev) {
1703 bdput(md->suspended_bdev);
1704 md->suspended_bdev = NULL;
1705 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001706
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001707 clear_bit(DMF_SUSPENDED, &md->flags);
1708
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 dm_table_unplug_all(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001711 dm_kobject_uevent(md);
Hannes Reinecke8560ed62006-10-03 01:15:35 -07001712
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001713 r = 0;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001714
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001715out:
1716 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00001717 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001718
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001719 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720}
1721
1722/*-----------------------------------------------------------------
1723 * Event notification.
1724 *---------------------------------------------------------------*/
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001725void dm_kobject_uevent(struct mapped_device *md)
1726{
Tejun Heoed9e1982008-08-25 19:56:05 +09001727 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001728}
1729
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001730uint32_t dm_next_uevent_seq(struct mapped_device *md)
1731{
1732 return atomic_add_return(1, &md->uevent_seq);
1733}
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735uint32_t dm_get_event_nr(struct mapped_device *md)
1736{
1737 return atomic_read(&md->event_nr);
1738}
1739
1740int dm_wait_event(struct mapped_device *md, int event_nr)
1741{
1742 return wait_event_interruptible(md->eventq,
1743 (event_nr != atomic_read(&md->event_nr)));
1744}
1745
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001746void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1747{
1748 unsigned long flags;
1749
1750 spin_lock_irqsave(&md->uevent_lock, flags);
1751 list_add(elist, &md->uevent_list);
1752 spin_unlock_irqrestore(&md->uevent_lock, flags);
1753}
1754
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755/*
1756 * The gendisk is only valid as long as you have a reference
1757 * count on 'md'.
1758 */
1759struct gendisk *dm_disk(struct mapped_device *md)
1760{
1761 return md->disk;
1762}
1763
Milan Broz784aae72009-01-06 03:05:12 +00001764struct kobject *dm_kobject(struct mapped_device *md)
1765{
1766 return &md->kobj;
1767}
1768
1769/*
1770 * struct mapped_device should not be exported outside of dm.c
1771 * so use this check to verify that kobj is part of md structure
1772 */
1773struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1774{
1775 struct mapped_device *md;
1776
1777 md = container_of(kobj, struct mapped_device, kobj);
1778 if (&md->kobj != kobj)
1779 return NULL;
1780
1781 dm_get(md);
1782 return md;
1783}
1784
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785int dm_suspended(struct mapped_device *md)
1786{
1787 return test_bit(DMF_SUSPENDED, &md->flags);
1788}
1789
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001790int dm_noflush_suspending(struct dm_target *ti)
1791{
1792 struct mapped_device *md = dm_table_get_md(ti->table);
1793 int r = __noflush_suspending(md);
1794
1795 dm_put(md);
1796
1797 return r;
1798}
1799EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801static struct block_device_operations dm_blk_dops = {
1802 .open = dm_blk_open,
1803 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07001804 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001805 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 .owner = THIS_MODULE
1807};
1808
1809EXPORT_SYMBOL(dm_get_mapinfo);
1810
1811/*
1812 * module hooks
1813 */
1814module_init(dm_init);
1815module_exit(dm_exit);
1816
1817module_param(major, uint, 0);
1818MODULE_PARM_DESC(major, "The major number of the device mapper");
1819MODULE_DESCRIPTION(DM_NAME " driver");
1820MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1821MODULE_LICENSE("GPL");