blob: b01514afb6b5da222a442dba243678a3ded18690 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9#include "dm-bio-list.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +010010#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/init.h>
13#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080014#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/moduleparam.h>
16#include <linux/blkpg.h>
17#include <linux/bio.h>
18#include <linux/buffer_head.h>
19#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080022#include <linux/hdreg.h>
Jens Axboe2056a782006-03-23 20:00:26 +010023#include <linux/blktrace_api.h>
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +010024#include <trace/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "core"
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028static const char *_name = DM_NAME;
29
30static unsigned int major = 0;
31static unsigned int _major = 0;
32
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070033static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000035 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * One of these is allocated per bio.
37 */
38struct dm_io {
39 struct mapped_device *md;
40 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010042 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080043 unsigned long start_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044};
45
46/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000047 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * One of these is allocated per target within a bio. Hopefully
49 * this will be simplified out one day.
50 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010051struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 struct dm_io *io;
53 struct dm_target *ti;
54 union map_info info;
55};
56
Ingo Molnar0bfc2452008-11-26 11:59:56 +010057DEFINE_TRACE(block_bio_complete);
58
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000059/*
60 * For request-based dm.
61 * One of these is allocated per request.
62 */
63struct dm_rq_target_io {
64 struct mapped_device *md;
65 struct dm_target *ti;
66 struct request *orig, clone;
67 int error;
68 union map_info info;
69};
70
71/*
72 * For request-based dm.
73 * One of these is allocated per bio.
74 */
75struct dm_rq_clone_bio_info {
76 struct bio *orig;
77 struct request *rq;
78};
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080union map_info *dm_get_mapinfo(struct bio *bio)
81{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070082 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010083 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070084 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -070087#define MINOR_ALLOCED ((void *)-1)
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/*
90 * Bits for the md->flags field.
91 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010092#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -080094#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -070095#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -070096#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -080097#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010098#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Milan Broz304f3f62008-02-08 02:11:17 +0000100/*
101 * Work processed by per-device workqueue.
102 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700104 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000105 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 rwlock_t map_lock;
107 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700108 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 unsigned long flags;
111
Jens Axboe165125e2007-07-24 09:28:11 +0200112 struct request_queue *queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800114 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116 void *interface_ptr;
117
118 /*
119 * A list of ios that arrived while we were suspended.
120 */
121 atomic_t pending;
122 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100123 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800124 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100125 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 /*
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100128 * An error from the barrier request currently being processed.
129 */
130 int barrier_error;
131
132 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000133 * Processing queue (flush/barriers)
134 */
135 struct workqueue_struct *wq;
136
137 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 * The current mapping.
139 */
140 struct dm_table *map;
141
142 /*
143 * io objects are allocated from here.
144 */
145 mempool_t *io_pool;
146 mempool_t *tio_pool;
147
Stefan Bader9faf4002006-10-03 01:15:41 -0700148 struct bio_set *bs;
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 /*
151 * Event handling.
152 */
153 atomic_t event_nr;
154 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100155 atomic_t uevent_seq;
156 struct list_head uevent_list;
157 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 /*
160 * freeze/thaw support require holding onto a super block
161 */
162 struct super_block *frozen_sb;
Alasdair G Kergone39e2e92006-01-06 00:20:05 -0800163 struct block_device *suspended_bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800164
165 /* forced geometry settings */
166 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000167
168 /* sysfs handle */
169 struct kobject kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170};
171
172#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800173static struct kmem_cache *_io_cache;
174static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000175static struct kmem_cache *_rq_tio_cache;
176static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static int __init local_init(void)
179{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100180 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100183 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100185 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100188 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100189 if (!_tio_cache)
190 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000192 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
193 if (!_rq_tio_cache)
194 goto out_free_tio_cache;
195
196 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
197 if (!_rq_bio_info_cache)
198 goto out_free_rq_tio_cache;
199
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100200 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100201 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000202 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 _major = major;
205 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100206 if (r < 0)
207 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 if (!_major)
210 _major = r;
211
212 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100213
214out_uevent_exit:
215 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000216out_free_rq_bio_info_cache:
217 kmem_cache_destroy(_rq_bio_info_cache);
218out_free_rq_tio_cache:
219 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100220out_free_tio_cache:
221 kmem_cache_destroy(_tio_cache);
222out_free_io_cache:
223 kmem_cache_destroy(_io_cache);
224
225 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227
228static void local_exit(void)
229{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000230 kmem_cache_destroy(_rq_bio_info_cache);
231 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 kmem_cache_destroy(_tio_cache);
233 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700234 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100235 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237 _major = 0;
238
239 DMINFO("cleaned up");
240}
241
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000242static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 local_init,
244 dm_target_init,
245 dm_linear_init,
246 dm_stripe_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100247 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 dm_interface_init,
249};
250
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000251static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 local_exit,
253 dm_target_exit,
254 dm_linear_exit,
255 dm_stripe_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100256 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 dm_interface_exit,
258};
259
260static int __init dm_init(void)
261{
262 const int count = ARRAY_SIZE(_inits);
263
264 int r, i;
265
266 for (i = 0; i < count; i++) {
267 r = _inits[i]();
268 if (r)
269 goto bad;
270 }
271
272 return 0;
273
274 bad:
275 while (i--)
276 _exits[i]();
277
278 return r;
279}
280
281static void __exit dm_exit(void)
282{
283 int i = ARRAY_SIZE(_exits);
284
285 while (i--)
286 _exits[i]();
287}
288
289/*
290 * Block device functions
291 */
Al Virofe5f9f22008-03-02 10:29:31 -0500292static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293{
294 struct mapped_device *md;
295
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700296 spin_lock(&_minor_lock);
297
Al Virofe5f9f22008-03-02 10:29:31 -0500298 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700299 if (!md)
300 goto out;
301
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700302 if (test_bit(DMF_FREEING, &md->flags) ||
303 test_bit(DMF_DELETING, &md->flags)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700304 md = NULL;
305 goto out;
306 }
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700309 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700310
311out:
312 spin_unlock(&_minor_lock);
313
314 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315}
316
Al Virofe5f9f22008-03-02 10:29:31 -0500317static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Al Virofe5f9f22008-03-02 10:29:31 -0500319 struct mapped_device *md = disk->private_data;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700320 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 dm_put(md);
322 return 0;
323}
324
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700325int dm_open_count(struct mapped_device *md)
326{
327 return atomic_read(&md->open_count);
328}
329
330/*
331 * Guarantees nothing is using the device before it's deleted.
332 */
333int dm_lock_for_deletion(struct mapped_device *md)
334{
335 int r = 0;
336
337 spin_lock(&_minor_lock);
338
339 if (dm_open_count(md))
340 r = -EBUSY;
341 else
342 set_bit(DMF_DELETING, &md->flags);
343
344 spin_unlock(&_minor_lock);
345
346 return r;
347}
348
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800349static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
350{
351 struct mapped_device *md = bdev->bd_disk->private_data;
352
353 return dm_get_geometry(md, geo);
354}
355
Al Virofe5f9f22008-03-02 10:29:31 -0500356static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700357 unsigned int cmd, unsigned long arg)
358{
Al Virofe5f9f22008-03-02 10:29:31 -0500359 struct mapped_device *md = bdev->bd_disk->private_data;
360 struct dm_table *map = dm_get_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700361 struct dm_target *tgt;
362 int r = -ENOTTY;
363
Milan Brozaa129a22006-10-03 01:15:15 -0700364 if (!map || !dm_table_get_size(map))
365 goto out;
366
367 /* We only support devices that have a single target */
368 if (dm_table_get_num_targets(map) != 1)
369 goto out;
370
371 tgt = dm_table_get_target(map, 0);
372
373 if (dm_suspended(md)) {
374 r = -EAGAIN;
375 goto out;
376 }
377
378 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400379 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700380
381out:
382 dm_table_put(map);
383
Milan Brozaa129a22006-10-03 01:15:15 -0700384 return r;
385}
386
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100387static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 return mempool_alloc(md->io_pool, GFP_NOIO);
390}
391
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100392static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
394 mempool_free(io, md->io_pool);
395}
396
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100397static struct dm_target_io *alloc_tio(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 return mempool_alloc(md->tio_pool, GFP_NOIO);
400}
401
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100402static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
404 mempool_free(tio, md->tio_pool);
405}
406
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800407static void start_io_acct(struct dm_io *io)
408{
409 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900410 int cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800411
412 io->start_time = jiffies;
413
Tejun Heo074a7ac2008-08-25 19:56:14 +0900414 cpu = part_stat_lock();
415 part_round_stats(cpu, &dm_disk(md)->part0);
416 part_stat_unlock();
417 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800418}
419
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000420static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800421{
422 struct mapped_device *md = io->md;
423 struct bio *bio = io->bio;
424 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900425 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800426 int rw = bio_data_dir(bio);
427
Tejun Heo074a7ac2008-08-25 19:56:14 +0900428 cpu = part_stat_lock();
429 part_round_stats(cpu, &dm_disk(md)->part0);
430 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
431 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800432
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100433 /*
434 * After this is decremented the bio must not be touched if it is
435 * a barrier.
436 */
Tejun Heo074a7ac2008-08-25 19:56:14 +0900437 dm_disk(md)->part0.in_flight = pending =
438 atomic_dec_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800439
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000440 /* nudge anyone waiting on suspend queue */
441 if (!pending)
442 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800443}
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445/*
446 * Add the bio to the list of deferred io.
447 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100448static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700450 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Mikulas Patocka022c2612009-04-02 19:55:39 +0100452 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100454 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Mikulas Patocka92c63902009-04-09 00:27:15 +0100456 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
457 queue_work(md->wq, &md->work);
458
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700459 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460}
461
462/*
463 * Everyone (including functions in this file), should use this
464 * function to access the md->map field, and make sure they call
465 * dm_table_put() when finished.
466 */
467struct dm_table *dm_get_table(struct mapped_device *md)
468{
469 struct dm_table *t;
470
471 read_lock(&md->map_lock);
472 t = md->map;
473 if (t)
474 dm_table_get(t);
475 read_unlock(&md->map_lock);
476
477 return t;
478}
479
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800480/*
481 * Get the geometry associated with a dm device
482 */
483int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
484{
485 *geo = md->geometry;
486
487 return 0;
488}
489
490/*
491 * Set the geometry of a device.
492 */
493int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
494{
495 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
496
497 if (geo->start > sz) {
498 DMWARN("Start sector is beyond the geometry limits.");
499 return -EINVAL;
500 }
501
502 md->geometry = *geo;
503
504 return 0;
505}
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507/*-----------------------------------------------------------------
508 * CRUD START:
509 * A more elegant soln is in the works that uses the queue
510 * merge fn, unfortunately there are a couple of changes to
511 * the block layer that I want to make for this. So in the
512 * interests of getting something for people to use I give
513 * you this clearly demarcated crap.
514 *---------------------------------------------------------------*/
515
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800516static int __noflush_suspending(struct mapped_device *md)
517{
518 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
519}
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521/*
522 * Decrements the number of outstanding ios that a bio has been
523 * cloned into, completing the original io if necc.
524 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800525static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800527 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000528 int io_error;
529 struct bio *bio;
530 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800531
532 /* Push-back supersedes any I/O errors */
Milan Brozb35f8ca2009-03-16 17:44:36 +0000533 if (error && !(io->error > 0 && __noflush_suspending(md)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 io->error = error;
535
536 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800537 if (io->error == DM_ENDIO_REQUEUE) {
538 /*
539 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800540 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100541 spin_lock_irqsave(&md->deferred_lock, flags);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000542 if (__noflush_suspending(md))
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100543 bio_list_add_head(&md->deferred, io->bio);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800544 else
545 /* noflush suspend was interrupted. */
546 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100547 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800548 }
549
Milan Brozb35f8ca2009-03-16 17:44:36 +0000550 io_error = io->error;
551 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100552
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100553 if (bio_barrier(bio)) {
554 /*
555 * There can be just one barrier request so we use
556 * a per-device variable for error reporting.
557 * Note that you can't touch the bio after end_io_acct
558 */
559 md->barrier_error = io_error;
560 end_io_acct(io);
561 } else {
562 end_io_acct(io);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000563
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100564 if (io_error != DM_ENDIO_REQUEUE) {
565 trace_block_bio_complete(md->queue, bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000566
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100567 bio_endio(bio, io_error);
568 }
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800569 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100570
571 free_io(md, io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 }
573}
574
NeilBrown6712ecf2007-09-27 12:47:43 +0200575static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
577 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100578 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000579 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700580 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 dm_endio_fn endio = tio->ti->type->end_io;
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
584 error = -EIO;
585
586 if (endio) {
587 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800588 if (r < 0 || r == DM_ENDIO_REQUEUE)
589 /*
590 * error and requeue request are handled
591 * in dec_pending().
592 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800594 else if (r == DM_ENDIO_INCOMPLETE)
595 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200596 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800597 else if (r) {
598 DMWARN("unimplemented target endio return value: %d", r);
599 BUG();
600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 }
602
Stefan Bader9faf4002006-10-03 01:15:41 -0700603 /*
604 * Store md for cleanup instead of tio which is about to get freed.
605 */
606 bio->bi_private = md->bs;
607
Stefan Bader9faf4002006-10-03 01:15:41 -0700608 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000609 bio_put(bio);
610 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611}
612
613static sector_t max_io_len(struct mapped_device *md,
614 sector_t sector, struct dm_target *ti)
615{
616 sector_t offset = sector - ti->begin;
617 sector_t len = ti->len - offset;
618
619 /*
620 * Does the target need to split even further ?
621 */
622 if (ti->split_io) {
623 sector_t boundary;
624 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
625 - offset;
626 if (len > boundary)
627 len = boundary;
628 }
629
630 return len;
631}
632
633static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100634 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
636 int r;
Jens Axboe2056a782006-03-23 20:00:26 +0100637 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -0700638 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 /*
641 * Sanity checks.
642 */
643 BUG_ON(!clone->bi_size);
644
645 clone->bi_end_io = clone_endio;
646 clone->bi_private = tio;
647
648 /*
649 * Map the clone. If r == 0 we don't need to do
650 * anything, the target has assumed ownership of
651 * this io.
652 */
653 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +0100654 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800656 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +0100658
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100659 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -0400660 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +0100661
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800663 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
664 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -0700665 md = tio->io->md;
666 dec_pending(tio->io, r);
667 /*
668 * Store bio_set for cleanup.
669 */
670 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -0700672 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800673 } else if (r) {
674 DMWARN("unimplemented target map return value: %d", r);
675 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 }
677}
678
679struct clone_info {
680 struct mapped_device *md;
681 struct dm_table *map;
682 struct bio *bio;
683 struct dm_io *io;
684 sector_t sector;
685 sector_t sector_count;
686 unsigned short idx;
687};
688
Peter Osterlund36763472005-09-06 15:16:42 -0700689static void dm_bio_destructor(struct bio *bio)
690{
Stefan Bader9faf4002006-10-03 01:15:41 -0700691 struct bio_set *bs = bio->bi_private;
692
693 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700694}
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696/*
697 * Creates a little bio that is just does part of a bvec.
698 */
699static struct bio *split_bvec(struct bio *bio, sector_t sector,
700 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -0700701 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
703 struct bio *clone;
704 struct bio_vec *bv = bio->bi_io_vec + idx;
705
Stefan Bader9faf4002006-10-03 01:15:41 -0700706 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700707 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 *clone->bi_io_vec = *bv;
709
710 clone->bi_sector = sector;
711 clone->bi_bdev = bio->bi_bdev;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100712 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 clone->bi_vcnt = 1;
714 clone->bi_size = to_bytes(len);
715 clone->bi_io_vec->bv_offset = offset;
716 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +0100717 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Martin K. Petersen9c470082009-04-09 00:27:12 +0100719 if (bio_integrity(bio)) {
720 bio_integrity_clone(clone, bio, GFP_NOIO);
721 bio_integrity_trim(clone,
722 bio_sector_offset(bio, idx, offset), len);
723 }
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return clone;
726}
727
728/*
729 * Creates a bio that consists of range of complete bvecs.
730 */
731static struct bio *clone_bio(struct bio *bio, sector_t sector,
732 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -0700733 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
735 struct bio *clone;
736
Stefan Bader9faf4002006-10-03 01:15:41 -0700737 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
738 __bio_clone(clone, bio);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100739 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
Stefan Bader9faf4002006-10-03 01:15:41 -0700740 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 clone->bi_sector = sector;
742 clone->bi_idx = idx;
743 clone->bi_vcnt = idx + bv_count;
744 clone->bi_size = to_bytes(len);
745 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
746
Martin K. Petersen9c470082009-04-09 00:27:12 +0100747 if (bio_integrity(bio)) {
748 bio_integrity_clone(clone, bio, GFP_NOIO);
749
750 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
751 bio_integrity_trim(clone,
752 bio_sector_offset(bio, idx, 0), len);
753 }
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 return clone;
756}
757
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000758static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
760 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000761 struct dm_target *ti;
762 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100763 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000765 ti = dm_table_find_target(ci->map, ci->sector);
766 if (!dm_target_is_valid(ti))
767 return -EIO;
768
769 max = max_io_len(ci->md, ci->sector, ti);
770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 /*
772 * Allocate a target io object.
773 */
774 tio = alloc_tio(ci->md);
775 tio->io = ci->io;
776 tio->ti = ti;
777 memset(&tio->info, 0, sizeof(tio->info));
778
779 if (ci->sector_count <= max) {
780 /*
781 * Optimise for the simple case where we can do all of
782 * the remaining io with a single clone.
783 */
784 clone = clone_bio(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700785 bio->bi_vcnt - ci->idx, ci->sector_count,
786 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 __map_bio(ti, clone, tio);
788 ci->sector_count = 0;
789
790 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
791 /*
792 * There are some bvecs that don't span targets.
793 * Do as many of these as possible.
794 */
795 int i;
796 sector_t remaining = max;
797 sector_t bv_len;
798
799 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
800 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
801
802 if (bv_len > remaining)
803 break;
804
805 remaining -= bv_len;
806 len += bv_len;
807 }
808
Stefan Bader9faf4002006-10-03 01:15:41 -0700809 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
810 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 __map_bio(ti, clone, tio);
812
813 ci->sector += len;
814 ci->sector_count -= len;
815 ci->idx = i;
816
817 } else {
818 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800819 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 */
821 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800822 sector_t remaining = to_sector(bv->bv_len);
823 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800825 do {
826 if (offset) {
827 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000828 if (!dm_target_is_valid(ti))
829 return -EIO;
830
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800831 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800833 tio = alloc_tio(ci->md);
834 tio->io = ci->io;
835 tio->ti = ti;
836 memset(&tio->info, 0, sizeof(tio->info));
837 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800839 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800841 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700842 bv->bv_offset + offset, len,
843 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800844
845 __map_bio(ti, clone, tio);
846
847 ci->sector += len;
848 ci->sector_count -= len;
849 offset += to_bytes(len);
850 } while (remaining -= len);
851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 ci->idx++;
853 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000854
855 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856}
857
858/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +0100859 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100861static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862{
863 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000864 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866 ci.map = dm_get_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100867 if (unlikely(!ci.map)) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100868 if (!bio_barrier(bio))
869 bio_io_error(bio);
870 else
871 md->barrier_error = -EIO;
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100872 return;
873 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +0100874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 ci.md = md;
876 ci.bio = bio;
877 ci.io = alloc_io(md);
878 ci.io->error = 0;
879 atomic_set(&ci.io->io_count, 1);
880 ci.io->bio = bio;
881 ci.io->md = md;
882 ci.sector = bio->bi_sector;
883 ci.sector_count = bio_sectors(bio);
884 ci.idx = bio->bi_idx;
885
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800886 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000887 while (ci.sector_count && !error)
888 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000891 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 dm_table_put(ci.map);
893}
894/*-----------------------------------------------------------------
895 * CRUD END
896 *---------------------------------------------------------------*/
897
Milan Brozf6fccb12008-07-21 12:00:37 +0100898static int dm_merge_bvec(struct request_queue *q,
899 struct bvec_merge_data *bvm,
900 struct bio_vec *biovec)
901{
902 struct mapped_device *md = q->queuedata;
903 struct dm_table *map = dm_get_table(md);
904 struct dm_target *ti;
905 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +0100906 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +0100907
908 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +0100909 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +0100910
911 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100912 if (!dm_target_is_valid(ti))
913 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +0100914
915 /*
916 * Find maximum amount of I/O that won't need splitting
917 */
918 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
919 (sector_t) BIO_MAX_SECTORS);
920 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
921 if (max_size < 0)
922 max_size = 0;
923
924 /*
925 * merge_bvec_fn() returns number of bytes
926 * it can accept at this offset
927 * max is precomputed maximal io size
928 */
929 if (max_size && ti->type->merge)
930 max_size = ti->type->merge(ti, bvm, biovec, max_size);
931
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100932out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +0100933 dm_table_put(map);
934
935out:
Milan Brozf6fccb12008-07-21 12:00:37 +0100936 /*
937 * Always allow an entire first page
938 */
939 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
940 max_size = biovec->bv_len;
941
Milan Brozf6fccb12008-07-21 12:00:37 +0100942 return max_size;
943}
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945/*
946 * The request function that just remaps the bio built up by
947 * dm_merge_bvec.
948 */
Jens Axboe165125e2007-07-24 09:28:11 +0200949static int dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950{
Kevin Corry12f03a42006-02-01 03:04:52 -0800951 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +0900953 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700955 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
Tejun Heo074a7ac2008-08-25 19:56:14 +0900957 cpu = part_stat_lock();
958 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
959 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
960 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -0800961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100963 * If we're suspended or the thread is processing barriers
964 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 */
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100966 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
967 unlikely(bio_barrier(bio))) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700968 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +0100970 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
971 bio_rw(bio) == READA) {
972 bio_io_error(bio);
973 return 0;
974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
Mikulas Patocka92c63902009-04-09 00:27:15 +0100976 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Mikulas Patocka92c63902009-04-09 00:27:15 +0100978 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 }
980
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100981 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700982 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100983 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
Jens Axboe165125e2007-07-24 09:28:11 +0200986static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987{
988 struct mapped_device *md = q->queuedata;
989 struct dm_table *map = dm_get_table(md);
990
991 if (map) {
992 dm_table_unplug_all(map);
993 dm_table_put(map);
994 }
995}
996
997static int dm_any_congested(void *congested_data, int bdi_bits)
998{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +0000999 int r = bdi_bits;
1000 struct mapped_device *md = congested_data;
1001 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001003 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001004 map = dm_get_table(md);
1005 if (map) {
1006 r = dm_table_any_congested(map, bdi_bits);
1007 dm_table_put(map);
1008 }
1009 }
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 return r;
1012}
1013
1014/*-----------------------------------------------------------------
1015 * An IDR is used to keep track of allocated minor numbers.
1016 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017static DEFINE_IDR(_minor_idr);
1018
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001019static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001021 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001023 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024}
1025
1026/*
1027 * See if the device with a specific minor # is free.
1028 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001029static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
1031 int r, m;
1032
1033 if (minor >= (1 << MINORBITS))
1034 return -EINVAL;
1035
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001036 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1037 if (!r)
1038 return -ENOMEM;
1039
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001040 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042 if (idr_find(&_minor_idr, minor)) {
1043 r = -EBUSY;
1044 goto out;
1045 }
1046
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001047 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001048 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 if (m != minor) {
1052 idr_remove(&_minor_idr, m);
1053 r = -EBUSY;
1054 goto out;
1055 }
1056
1057out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001058 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 return r;
1060}
1061
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001062static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001064 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001067 if (!r)
1068 return -ENOMEM;
1069
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001070 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001072 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001073 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076 if (m >= (1 << MINORBITS)) {
1077 idr_remove(&_minor_idr, m);
1078 r = -ENOSPC;
1079 goto out;
1080 }
1081
1082 *minor = m;
1083
1084out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001085 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 return r;
1087}
1088
1089static struct block_device_operations dm_blk_dops;
1090
Mikulas Patocka53d59142009-04-02 19:55:37 +01001091static void dm_wq_work(struct work_struct *work);
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/*
1094 * Allocate and initialise a blank device with a given minor.
1095 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001096static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097{
1098 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001099 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001100 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 if (!md) {
1103 DMWARN("unable to allocate device, out of memory.");
1104 return NULL;
1105 }
1106
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001107 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001108 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001111 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001112 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001113 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001114 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001116 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001118 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001119 mutex_init(&md->suspend_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001120 spin_lock_init(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 rwlock_init(&md->map_lock);
1122 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001123 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001125 atomic_set(&md->uevent_seq, 0);
1126 INIT_LIST_HEAD(&md->uevent_list);
1127 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
1129 md->queue = blk_alloc_queue(GFP_KERNEL);
1130 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001131 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
1133 md->queue->queuedata = md;
1134 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1135 md->queue->backing_dev_info.congested_data = md;
1136 blk_queue_make_request(md->queue, dm_request);
Mikulas Patocka99360b42009-04-02 19:55:39 +01001137 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
Jens Axboedaef2652006-01-10 10:48:02 +01001138 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 md->queue->unplug_fn = dm_unplug_all;
Milan Brozf6fccb12008-07-21 12:00:37 +01001140 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Matthew Dobson93d23412006-03-26 01:37:50 -08001142 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
Kiyoshi Ueda74859362006-12-08 02:41:02 -08001143 if (!md->io_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001144 goto bad_io_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Matthew Dobson93d23412006-03-26 01:37:50 -08001146 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 if (!md->tio_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001148 goto bad_tio_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Jens Axboebb799ca2008-12-10 15:35:05 +01001150 md->bs = bioset_create(16, 0);
Stefan Bader9faf4002006-10-03 01:15:41 -07001151 if (!md->bs)
1152 goto bad_no_bioset;
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 md->disk = alloc_disk(1);
1155 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001156 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001158 atomic_set(&md->pending, 0);
1159 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001160 INIT_WORK(&md->work, dm_wq_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001161 init_waitqueue_head(&md->eventq);
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 md->disk->major = _major;
1164 md->disk->first_minor = minor;
1165 md->disk->fops = &dm_blk_dops;
1166 md->disk->queue = md->queue;
1167 md->disk->private_data = md;
1168 sprintf(md->disk->disk_name, "dm-%d", minor);
1169 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001170 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
Milan Broz304f3f62008-02-08 02:11:17 +00001172 md->wq = create_singlethread_workqueue("kdmflush");
1173 if (!md->wq)
1174 goto bad_thread;
1175
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001176 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001177 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001178 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001179 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001180
1181 BUG_ON(old_md != MINOR_ALLOCED);
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 return md;
1184
Milan Broz304f3f62008-02-08 02:11:17 +00001185bad_thread:
1186 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001187bad_disk:
Stefan Bader9faf4002006-10-03 01:15:41 -07001188 bioset_free(md->bs);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001189bad_no_bioset:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 mempool_destroy(md->tio_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001191bad_tio_pool:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 mempool_destroy(md->io_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001193bad_io_pool:
Al Viro1312f402006-03-12 11:02:03 -05001194 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001195bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001197bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001198 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001199bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 kfree(md);
1201 return NULL;
1202}
1203
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001204static void unlock_fs(struct mapped_device *md);
1205
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206static void free_dev(struct mapped_device *md)
1207{
Tejun Heof331c022008-09-03 09:01:48 +02001208 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001209
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001210 if (md->suspended_bdev) {
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001211 unlock_fs(md);
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001212 bdput(md->suspended_bdev);
1213 }
Milan Broz304f3f62008-02-08 02:11:17 +00001214 destroy_workqueue(md->wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 mempool_destroy(md->tio_pool);
1216 mempool_destroy(md->io_pool);
Stefan Bader9faf4002006-10-03 01:15:41 -07001217 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001218 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001220 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001221
1222 spin_lock(&_minor_lock);
1223 md->disk->private_data = NULL;
1224 spin_unlock(&_minor_lock);
1225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001227 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001228 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 kfree(md);
1230}
1231
1232/*
1233 * Bind a table to the device.
1234 */
1235static void event_callback(void *context)
1236{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001237 unsigned long flags;
1238 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 struct mapped_device *md = (struct mapped_device *) context;
1240
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001241 spin_lock_irqsave(&md->uevent_lock, flags);
1242 list_splice_init(&md->uevent_list, &uevents);
1243 spin_unlock_irqrestore(&md->uevent_lock, flags);
1244
Tejun Heoed9e1982008-08-25 19:56:05 +09001245 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 atomic_inc(&md->event_nr);
1248 wake_up(&md->eventq);
1249}
1250
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07001251static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252{
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07001253 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001255 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001256 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001257 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258}
1259
1260static int __bind(struct mapped_device *md, struct dm_table *t)
1261{
Jens Axboe165125e2007-07-24 09:28:11 +02001262 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 sector_t size;
1264
1265 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001266
1267 /*
1268 * Wipe any geometry if the size of the table changed.
1269 */
1270 if (size != get_capacity(md->disk))
1271 memset(&md->geometry, 0, sizeof(md->geometry));
1272
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001273 if (md->suspended_bdev)
1274 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Mikulas Patockad5816872009-01-06 03:05:10 +00001276 if (!size) {
1277 dm_table_destroy(t);
1278 return 0;
1279 }
1280
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001281 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001282
1283 write_lock(&md->map_lock);
1284 md->map = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 dm_table_set_restrictions(t, q);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001286 write_unlock(&md->map_lock);
1287
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 return 0;
1289}
1290
1291static void __unbind(struct mapped_device *md)
1292{
1293 struct dm_table *map = md->map;
1294
1295 if (!map)
1296 return;
1297
1298 dm_table_event_callback(map, NULL, NULL);
1299 write_lock(&md->map_lock);
1300 md->map = NULL;
1301 write_unlock(&md->map_lock);
Mikulas Patockad5816872009-01-06 03:05:10 +00001302 dm_table_destroy(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303}
1304
1305/*
1306 * Constructor for a new device.
1307 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001308int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
1310 struct mapped_device *md;
1311
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001312 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 if (!md)
1314 return -ENXIO;
1315
Milan Broz784aae72009-01-06 03:05:12 +00001316 dm_sysfs_init(md);
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 *result = md;
1319 return 0;
1320}
1321
David Teigland637842c2006-01-06 00:20:00 -08001322static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323{
1324 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 unsigned minor = MINOR(dev);
1326
1327 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1328 return NULL;
1329
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001330 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
1332 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001333 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02001334 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07001335 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08001336 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001337 goto out;
1338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001340out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001341 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
David Teigland637842c2006-01-06 00:20:00 -08001343 return md;
1344}
1345
David Teiglandd229a952006-01-06 00:20:01 -08001346struct mapped_device *dm_get_md(dev_t dev)
1347{
1348 struct mapped_device *md = dm_find_md(dev);
1349
1350 if (md)
1351 dm_get(md);
1352
1353 return md;
1354}
1355
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001356void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08001357{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001358 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359}
1360
1361void dm_set_mdptr(struct mapped_device *md, void *ptr)
1362{
1363 md->interface_ptr = ptr;
1364}
1365
1366void dm_get(struct mapped_device *md)
1367{
1368 atomic_inc(&md->holders);
1369}
1370
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001371const char *dm_device_name(struct mapped_device *md)
1372{
1373 return md->name;
1374}
1375EXPORT_SYMBOL_GPL(dm_device_name);
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377void dm_put(struct mapped_device *md)
1378{
Mike Anderson1134e5a2006-03-27 01:17:54 -08001379 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001381 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1382
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001383 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
Mike Anderson1134e5a2006-03-27 01:17:54 -08001384 map = dm_get_table(md);
Tejun Heof331c022008-09-03 09:01:48 +02001385 idr_replace(&_minor_idr, MINOR_ALLOCED,
1386 MINOR(disk_devt(dm_disk(md))));
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001387 set_bit(DMF_FREEING, &md->flags);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001388 spin_unlock(&_minor_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001389 if (!dm_suspended(md)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 dm_table_presuspend_targets(map);
1391 dm_table_postsuspend_targets(map);
1392 }
Milan Broz784aae72009-01-06 03:05:12 +00001393 dm_sysfs_exit(md);
Mike Anderson1134e5a2006-03-27 01:17:54 -08001394 dm_table_put(map);
Mikulas Patockaa1b51e92009-01-06 03:04:53 +00001395 __unbind(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 free_dev(md);
1397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398}
Edward Goggin79eb8852007-05-09 02:32:56 -07001399EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
Mikulas Patocka401600d2009-04-02 19:55:38 +01001401static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00001402{
1403 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001404 DECLARE_WAITQUEUE(wait, current);
1405
1406 dm_unplug_all(md->queue);
1407
1408 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00001409
1410 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01001411 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00001412
1413 smp_mb();
1414 if (!atomic_read(&md->pending))
1415 break;
1416
Mikulas Patocka401600d2009-04-02 19:55:38 +01001417 if (interruptible == TASK_INTERRUPTIBLE &&
1418 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00001419 r = -EINTR;
1420 break;
1421 }
1422
1423 io_schedule();
1424 }
1425 set_current_state(TASK_RUNNING);
1426
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001427 remove_wait_queue(&md->wait, &wait);
1428
Milan Broz46125c12008-02-08 02:10:30 +00001429 return r;
1430}
1431
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001432static int dm_flush(struct mapped_device *md)
1433{
1434 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1435 return 0;
1436}
1437
1438static void process_barrier(struct mapped_device *md, struct bio *bio)
1439{
1440 int error = dm_flush(md);
1441
1442 if (unlikely(error)) {
1443 bio_endio(bio, error);
1444 return;
1445 }
1446 if (bio_empty_barrier(bio)) {
1447 bio_endio(bio, 0);
1448 return;
1449 }
1450
1451 __split_and_process_bio(md, bio);
1452
1453 error = dm_flush(md);
1454
1455 if (!error && md->barrier_error)
1456 error = md->barrier_error;
1457
1458 if (md->barrier_error != DM_ENDIO_REQUEUE)
1459 bio_endio(bio, error);
1460}
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462/*
1463 * Process the deferred bios
1464 */
Mikulas Patockaef208582009-04-02 19:55:38 +01001465static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466{
Mikulas Patockaef208582009-04-02 19:55:38 +01001467 struct mapped_device *md = container_of(work, struct mapped_device,
1468 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001469 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
Mikulas Patockaef208582009-04-02 19:55:38 +01001471 down_write(&md->io_lock);
1472
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001473 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001474 spin_lock_irq(&md->deferred_lock);
1475 c = bio_list_pop(&md->deferred);
1476 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001477
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001478 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001479 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001480 break;
1481 }
1482
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001483 up_write(&md->io_lock);
1484
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001485 if (bio_barrier(c))
1486 process_barrier(md, c);
1487 else
1488 __split_and_process_bio(md, c);
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001489
1490 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001491 }
Milan Broz73d410c2008-02-08 02:10:25 +00001492
Mikulas Patockaef208582009-04-02 19:55:38 +01001493 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494}
1495
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001496static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00001497{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001498 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1499 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01001500 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00001501}
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503/*
1504 * Swap in a new table (destroying old one).
1505 */
1506int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1507{
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001508 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Daniel Walkere61290a2008-02-08 02:10:08 +00001510 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511
1512 /* device must be suspended */
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001513 if (!dm_suspended(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001514 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001516 /* without bdev, the device size cannot be changed */
1517 if (!md->suspended_bdev)
1518 if (get_capacity(md->disk) != dm_table_get_size(table))
1519 goto out;
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 __unbind(md);
1522 r = __bind(md, table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001524out:
Daniel Walkere61290a2008-02-08 02:10:08 +00001525 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001526 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527}
1528
1529/*
1530 * Functions to lock and unlock any filesystem running on the
1531 * device.
1532 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001533static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001535 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001538
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001539 md->frozen_sb = freeze_bdev(md->suspended_bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001540 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001541 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001542 md->frozen_sb = NULL;
1543 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001544 }
1545
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001546 set_bit(DMF_FROZEN, &md->flags);
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 /* don't bdput right now, we don't want the bdev
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001549 * to go away while it is locked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 */
1551 return 0;
1552}
1553
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001554static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001556 if (!test_bit(DMF_FROZEN, &md->flags))
1557 return;
1558
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001559 thaw_bdev(md->suspended_bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001561 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562}
1563
1564/*
1565 * We need to be able to change a mapping table under a mounted
1566 * filesystem. For example we might want to move some data in
1567 * the background. Before the table can be swapped with
1568 * dm_bind_table, dm_suspend must be called to flush any in
1569 * flight bios and ensure that any further io gets deferred.
1570 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001571int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001573 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00001574 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001575 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001576 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
Daniel Walkere61290a2008-02-08 02:10:08 +00001578 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001579
Milan Broz73d410c2008-02-08 02:10:25 +00001580 if (dm_suspended(md)) {
1581 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08001582 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00001583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
1585 map = dm_get_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001587 /*
1588 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1589 * This flag is cleared before dm_suspend returns.
1590 */
1591 if (noflush)
1592 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1593
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001594 /* This does not get reverted if there's an error later. */
1595 dm_table_presuspend_targets(map);
1596
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001597 /* bdget() can stall if the pending I/Os are not flushed */
1598 if (!noflush) {
1599 md->suspended_bdev = bdget_disk(md->disk, 0);
1600 if (!md->suspended_bdev) {
1601 DMWARN("bdget failed in dm_suspend");
1602 r = -ENOMEM;
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01001603 goto out;
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001604 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001605
Milan Broz6d6f10d2008-02-08 02:10:22 +00001606 /*
1607 * Flush I/O to the device. noflush supersedes do_lockfs,
1608 * because lock_fs() needs to flush I/Os.
1609 */
1610 if (do_lockfs) {
1611 r = lock_fs(md);
1612 if (r)
1613 goto out;
1614 }
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
1617 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001618 * Here we must make sure that no processes are submitting requests
1619 * to target drivers i.e. no one may be executing
1620 * __split_and_process_bio. This is called from dm_request and
1621 * dm_wq_work.
1622 *
1623 * To get all processes out of __split_and_process_bio in dm_request,
1624 * we take the write lock. To prevent any process from reentering
1625 * __split_and_process_bio from dm_request, we set
1626 * DMF_QUEUE_IO_TO_THREAD.
1627 *
1628 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1629 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1630 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1631 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001633 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001634 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1635 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001636 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001638 flush_workqueue(md->wq);
1639
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001641 * At this point no more requests are entering target request routines.
1642 * We call dm_wait_for_completion to wait for all existing requests
1643 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01001645 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001647 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001648 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01001649 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00001650 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00001653 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001654 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00001655
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001656 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001657 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001658 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001659
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001660 /*
1661 * If dm_wait_for_completion returned 0, the device is completely
1662 * quiescent now. There is no request-processing activity. All new
1663 * requests are being added to md->deferred list.
1664 */
1665
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001666 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
1668 set_bit(DMF_SUSPENDED, &md->flags);
1669
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001670out:
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001671 if (r && md->suspended_bdev) {
1672 bdput(md->suspended_bdev);
1673 md->suspended_bdev = NULL;
1674 }
1675
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08001677
1678out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00001679 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001680 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681}
1682
1683int dm_resume(struct mapped_device *md)
1684{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001685 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001686 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
Daniel Walkere61290a2008-02-08 02:10:08 +00001688 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001689 if (!dm_suspended(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001690 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001691
1692 map = dm_get_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001693 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001694 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
Milan Broz8757b772006-10-03 01:15:36 -07001696 r = dm_table_resume_targets(map);
1697 if (r)
1698 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001699
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001700 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001701
1702 unlock_fs(md);
1703
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001704 if (md->suspended_bdev) {
1705 bdput(md->suspended_bdev);
1706 md->suspended_bdev = NULL;
1707 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001708
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001709 clear_bit(DMF_SUSPENDED, &md->flags);
1710
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 dm_table_unplug_all(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001713 dm_kobject_uevent(md);
Hannes Reinecke8560ed62006-10-03 01:15:35 -07001714
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001715 r = 0;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001716
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001717out:
1718 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00001719 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001720
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001721 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722}
1723
1724/*-----------------------------------------------------------------
1725 * Event notification.
1726 *---------------------------------------------------------------*/
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001727void dm_kobject_uevent(struct mapped_device *md)
1728{
Tejun Heoed9e1982008-08-25 19:56:05 +09001729 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001730}
1731
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001732uint32_t dm_next_uevent_seq(struct mapped_device *md)
1733{
1734 return atomic_add_return(1, &md->uevent_seq);
1735}
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737uint32_t dm_get_event_nr(struct mapped_device *md)
1738{
1739 return atomic_read(&md->event_nr);
1740}
1741
1742int dm_wait_event(struct mapped_device *md, int event_nr)
1743{
1744 return wait_event_interruptible(md->eventq,
1745 (event_nr != atomic_read(&md->event_nr)));
1746}
1747
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001748void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1749{
1750 unsigned long flags;
1751
1752 spin_lock_irqsave(&md->uevent_lock, flags);
1753 list_add(elist, &md->uevent_list);
1754 spin_unlock_irqrestore(&md->uevent_lock, flags);
1755}
1756
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757/*
1758 * The gendisk is only valid as long as you have a reference
1759 * count on 'md'.
1760 */
1761struct gendisk *dm_disk(struct mapped_device *md)
1762{
1763 return md->disk;
1764}
1765
Milan Broz784aae72009-01-06 03:05:12 +00001766struct kobject *dm_kobject(struct mapped_device *md)
1767{
1768 return &md->kobj;
1769}
1770
1771/*
1772 * struct mapped_device should not be exported outside of dm.c
1773 * so use this check to verify that kobj is part of md structure
1774 */
1775struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1776{
1777 struct mapped_device *md;
1778
1779 md = container_of(kobj, struct mapped_device, kobj);
1780 if (&md->kobj != kobj)
1781 return NULL;
1782
1783 dm_get(md);
1784 return md;
1785}
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787int dm_suspended(struct mapped_device *md)
1788{
1789 return test_bit(DMF_SUSPENDED, &md->flags);
1790}
1791
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001792int dm_noflush_suspending(struct dm_target *ti)
1793{
1794 struct mapped_device *md = dm_table_get_md(ti->table);
1795 int r = __noflush_suspending(md);
1796
1797 dm_put(md);
1798
1799 return r;
1800}
1801EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803static struct block_device_operations dm_blk_dops = {
1804 .open = dm_blk_open,
1805 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07001806 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001807 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 .owner = THIS_MODULE
1809};
1810
1811EXPORT_SYMBOL(dm_get_mapinfo);
1812
1813/*
1814 * module hooks
1815 */
1816module_init(dm_init);
1817module_exit(dm_exit);
1818
1819module_param(major, uint, 0);
1820MODULE_PARM_DESC(major, "The major number of the device mapper");
1821MODULE_DESCRIPTION(DM_NAME " driver");
1822MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1823MODULE_LICENSE("GPL");