blob: a503b95ecbfb515e863e7539a5f516bbef3e9fe6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +01009#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <linux/init.h>
12#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080013#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
Arnd Bergmann6e9624b2010-08-07 18:25:34 +020018#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080022#include <linux/hdreg.h>
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +010023#include <linux/delay.h>
Li Zefan55782132009-06-09 13:43:05 +080024
25#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027#define DM_MSG_PREFIX "core"
28
Milan Broz60935eb2009-06-22 10:12:30 +010029/*
30 * Cookies are numeric values sent with CHANGE and REMOVE
31 * uevents while resuming, removing or renaming the device.
32 */
33#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
34#define DM_COOKIE_LENGTH 24
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036static const char *_name = DM_NAME;
37
38static unsigned int major = 0;
39static unsigned int _major = 0;
40
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070041static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000043 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * One of these is allocated per bio.
45 */
46struct dm_io {
47 struct mapped_device *md;
48 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010050 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080051 unsigned long start_time;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +010052 spinlock_t endio_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053};
54
55/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000056 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 * One of these is allocated per target within a bio. Hopefully
58 * this will be simplified out one day.
59 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010060struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 struct dm_io *io;
62 struct dm_target *ti;
63 union map_info info;
64};
65
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000066/*
67 * For request-based dm.
68 * One of these is allocated per request.
69 */
70struct dm_rq_target_io {
71 struct mapped_device *md;
72 struct dm_target *ti;
73 struct request *orig, clone;
74 int error;
75 union map_info info;
76};
77
78/*
79 * For request-based dm.
80 * One of these is allocated per bio.
81 */
82struct dm_rq_clone_bio_info {
83 struct bio *orig;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010084 struct dm_rq_target_io *tio;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000085};
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087union map_info *dm_get_mapinfo(struct bio *bio)
88{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070089 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010090 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070091 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010094union map_info *dm_get_rq_mapinfo(struct request *rq)
95{
96 if (rq && rq->end_io_data)
97 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
98 return NULL;
99}
100EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
101
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -0700102#define MINOR_ALLOCED ((void *)-1)
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/*
105 * Bits for the md->flags field.
106 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100107#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -0800109#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700110#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700111#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800112#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100113#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Milan Broz304f3f62008-02-08 02:11:17 +0000115/*
116 * Work processed by per-device workqueue.
117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700119 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000120 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 rwlock_t map_lock;
122 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700123 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 unsigned long flags;
126
Jens Axboe165125e2007-07-24 09:28:11 +0200127 struct request_queue *queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800129 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131 void *interface_ptr;
132
133 /*
134 * A list of ios that arrived while we were suspended.
135 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200136 atomic_t pending[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100138 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800139 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100140 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 /*
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100143 * An error from the barrier request currently being processed.
144 */
145 int barrier_error;
146
147 /*
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000148 * Protect barrier_error from concurrent endio processing
149 * in request-based dm.
150 */
151 spinlock_t barrier_error_lock;
152
153 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000154 * Processing queue (flush/barriers)
155 */
156 struct workqueue_struct *wq;
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000157 struct work_struct barrier_work;
158
159 /* A pointer to the currently processing pre/post flush request */
160 struct request *flush_request;
Milan Broz304f3f62008-02-08 02:11:17 +0000161
162 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * The current mapping.
164 */
165 struct dm_table *map;
166
167 /*
168 * io objects are allocated from here.
169 */
170 mempool_t *io_pool;
171 mempool_t *tio_pool;
172
Stefan Bader9faf4002006-10-03 01:15:41 -0700173 struct bio_set *bs;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /*
176 * Event handling.
177 */
178 atomic_t event_nr;
179 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100180 atomic_t uevent_seq;
181 struct list_head uevent_list;
182 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184 /*
185 * freeze/thaw support require holding onto a super block
186 */
187 struct super_block *frozen_sb;
Mikulas Patockadb8fef42009-06-22 10:12:15 +0100188 struct block_device *bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800189
190 /* forced geometry settings */
191 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000192
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100193 /* For saving the address of __make_request for request based dm */
194 make_request_fn *saved_make_request_fn;
195
Milan Broz784aae72009-01-06 03:05:12 +0000196 /* sysfs handle */
197 struct kobject kobj;
Mikulas Patocka52b1fd52009-06-22 10:12:21 +0100198
199 /* zero-length barrier that will be cloned and submitted to targets */
200 struct bio barrier_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201};
202
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100203/*
204 * For mempools pre-allocation at the table loading time.
205 */
206struct dm_md_mempools {
207 mempool_t *io_pool;
208 mempool_t *tio_pool;
209 struct bio_set *bs;
210};
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800213static struct kmem_cache *_io_cache;
214static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000215static struct kmem_cache *_rq_tio_cache;
216static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218static int __init local_init(void)
219{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100220 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100223 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100225 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100228 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100229 if (!_tio_cache)
230 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000232 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
233 if (!_rq_tio_cache)
234 goto out_free_tio_cache;
235
236 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
237 if (!_rq_bio_info_cache)
238 goto out_free_rq_tio_cache;
239
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100240 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100241 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000242 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 _major = major;
245 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100246 if (r < 0)
247 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 if (!_major)
250 _major = r;
251
252 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100253
254out_uevent_exit:
255 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000256out_free_rq_bio_info_cache:
257 kmem_cache_destroy(_rq_bio_info_cache);
258out_free_rq_tio_cache:
259 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100260out_free_tio_cache:
261 kmem_cache_destroy(_tio_cache);
262out_free_io_cache:
263 kmem_cache_destroy(_io_cache);
264
265 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267
268static void local_exit(void)
269{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000270 kmem_cache_destroy(_rq_bio_info_cache);
271 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 kmem_cache_destroy(_tio_cache);
273 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700274 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100275 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277 _major = 0;
278
279 DMINFO("cleaned up");
280}
281
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000282static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 local_init,
284 dm_target_init,
285 dm_linear_init,
286 dm_stripe_init,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000287 dm_io_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100288 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 dm_interface_init,
290};
291
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000292static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 local_exit,
294 dm_target_exit,
295 dm_linear_exit,
296 dm_stripe_exit,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000297 dm_io_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100298 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 dm_interface_exit,
300};
301
302static int __init dm_init(void)
303{
304 const int count = ARRAY_SIZE(_inits);
305
306 int r, i;
307
308 for (i = 0; i < count; i++) {
309 r = _inits[i]();
310 if (r)
311 goto bad;
312 }
313
314 return 0;
315
316 bad:
317 while (i--)
318 _exits[i]();
319
320 return r;
321}
322
323static void __exit dm_exit(void)
324{
325 int i = ARRAY_SIZE(_exits);
326
327 while (i--)
328 _exits[i]();
329}
330
331/*
332 * Block device functions
333 */
Mike Anderson432a2122009-12-10 23:52:20 +0000334int dm_deleting_md(struct mapped_device *md)
335{
336 return test_bit(DMF_DELETING, &md->flags);
337}
338
Al Virofe5f9f22008-03-02 10:29:31 -0500339static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
341 struct mapped_device *md;
342
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200343 lock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700344 spin_lock(&_minor_lock);
345
Al Virofe5f9f22008-03-02 10:29:31 -0500346 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700347 if (!md)
348 goto out;
349
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700350 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +0000351 dm_deleting_md(md)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700352 md = NULL;
353 goto out;
354 }
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700357 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700358
359out:
360 spin_unlock(&_minor_lock);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200361 unlock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700362
363 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
Al Virofe5f9f22008-03-02 10:29:31 -0500366static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Al Virofe5f9f22008-03-02 10:29:31 -0500368 struct mapped_device *md = disk->private_data;
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200369
370 lock_kernel();
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700371 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 dm_put(md);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200373 unlock_kernel();
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 return 0;
376}
377
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700378int dm_open_count(struct mapped_device *md)
379{
380 return atomic_read(&md->open_count);
381}
382
383/*
384 * Guarantees nothing is using the device before it's deleted.
385 */
386int dm_lock_for_deletion(struct mapped_device *md)
387{
388 int r = 0;
389
390 spin_lock(&_minor_lock);
391
392 if (dm_open_count(md))
393 r = -EBUSY;
394 else
395 set_bit(DMF_DELETING, &md->flags);
396
397 spin_unlock(&_minor_lock);
398
399 return r;
400}
401
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800402static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
403{
404 struct mapped_device *md = bdev->bd_disk->private_data;
405
406 return dm_get_geometry(md, geo);
407}
408
Al Virofe5f9f22008-03-02 10:29:31 -0500409static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700410 unsigned int cmd, unsigned long arg)
411{
Al Virofe5f9f22008-03-02 10:29:31 -0500412 struct mapped_device *md = bdev->bd_disk->private_data;
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000413 struct dm_table *map = dm_get_live_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700414 struct dm_target *tgt;
415 int r = -ENOTTY;
416
Milan Brozaa129a22006-10-03 01:15:15 -0700417 if (!map || !dm_table_get_size(map))
418 goto out;
419
420 /* We only support devices that have a single target */
421 if (dm_table_get_num_targets(map) != 1)
422 goto out;
423
424 tgt = dm_table_get_target(map, 0);
425
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +0000426 if (dm_suspended_md(md)) {
Milan Brozaa129a22006-10-03 01:15:15 -0700427 r = -EAGAIN;
428 goto out;
429 }
430
431 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400432 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700433
434out:
435 dm_table_put(map);
436
Milan Brozaa129a22006-10-03 01:15:15 -0700437 return r;
438}
439
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100440static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
442 return mempool_alloc(md->io_pool, GFP_NOIO);
443}
444
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100445static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446{
447 mempool_free(io, md->io_pool);
448}
449
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100450static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
452 mempool_free(tio, md->tio_pool);
453}
454
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000455static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
456 gfp_t gfp_mask)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100457{
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000458 return mempool_alloc(md->tio_pool, gfp_mask);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100459}
460
461static void free_rq_tio(struct dm_rq_target_io *tio)
462{
463 mempool_free(tio, tio->md->tio_pool);
464}
465
466static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
467{
468 return mempool_alloc(md->io_pool, GFP_ATOMIC);
469}
470
471static void free_bio_info(struct dm_rq_clone_bio_info *info)
472{
473 mempool_free(info, info->tio->md->io_pool);
474}
475
Kiyoshi Ueda90abb8c2009-12-10 23:52:13 +0000476static int md_in_flight(struct mapped_device *md)
477{
478 return atomic_read(&md->pending[READ]) +
479 atomic_read(&md->pending[WRITE]);
480}
481
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800482static void start_io_acct(struct dm_io *io)
483{
484 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900485 int cpu;
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200486 int rw = bio_data_dir(io->bio);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800487
488 io->start_time = jiffies;
489
Tejun Heo074a7ac2008-08-25 19:56:14 +0900490 cpu = part_stat_lock();
491 part_round_stats(cpu, &dm_disk(md)->part0);
492 part_stat_unlock();
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200493 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800494}
495
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000496static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800497{
498 struct mapped_device *md = io->md;
499 struct bio *bio = io->bio;
500 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900501 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800502 int rw = bio_data_dir(bio);
503
Tejun Heo074a7ac2008-08-25 19:56:14 +0900504 cpu = part_stat_lock();
505 part_round_stats(cpu, &dm_disk(md)->part0);
506 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
507 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800508
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100509 /*
510 * After this is decremented the bio must not be touched if it is
511 * a barrier.
512 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200513 dm_disk(md)->part0.in_flight[rw] = pending =
514 atomic_dec_return(&md->pending[rw]);
515 pending += atomic_read(&md->pending[rw^0x1]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800516
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000517 /* nudge anyone waiting on suspend queue */
518 if (!pending)
519 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800520}
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522/*
523 * Add the bio to the list of deferred io.
524 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100525static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700527 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Mikulas Patocka022c2612009-04-02 19:55:39 +0100529 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100531 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Mikulas Patocka92c63902009-04-09 00:27:15 +0100533 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
534 queue_work(md->wq, &md->work);
535
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700536 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
539/*
540 * Everyone (including functions in this file), should use this
541 * function to access the md->map field, and make sure they call
542 * dm_table_put() when finished.
543 */
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000544struct dm_table *dm_get_live_table(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545{
546 struct dm_table *t;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100547 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100549 read_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 t = md->map;
551 if (t)
552 dm_table_get(t);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100553 read_unlock_irqrestore(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 return t;
556}
557
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800558/*
559 * Get the geometry associated with a dm device
560 */
561int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
562{
563 *geo = md->geometry;
564
565 return 0;
566}
567
568/*
569 * Set the geometry of a device.
570 */
571int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
572{
573 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
574
575 if (geo->start > sz) {
576 DMWARN("Start sector is beyond the geometry limits.");
577 return -EINVAL;
578 }
579
580 md->geometry = *geo;
581
582 return 0;
583}
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585/*-----------------------------------------------------------------
586 * CRUD START:
587 * A more elegant soln is in the works that uses the queue
588 * merge fn, unfortunately there are a couple of changes to
589 * the block layer that I want to make for this. So in the
590 * interests of getting something for people to use I give
591 * you this clearly demarcated crap.
592 *---------------------------------------------------------------*/
593
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800594static int __noflush_suspending(struct mapped_device *md)
595{
596 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
597}
598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599/*
600 * Decrements the number of outstanding ios that a bio has been
601 * cloned into, completing the original io if necc.
602 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800603static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800605 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000606 int io_error;
607 struct bio *bio;
608 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800609
610 /* Push-back supersedes any I/O errors */
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100611 if (unlikely(error)) {
612 spin_lock_irqsave(&io->endio_lock, flags);
613 if (!(io->error > 0 && __noflush_suspending(md)))
614 io->error = error;
615 spin_unlock_irqrestore(&io->endio_lock, flags);
616 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
618 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800619 if (io->error == DM_ENDIO_REQUEUE) {
620 /*
621 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800622 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100623 spin_lock_irqsave(&md->deferred_lock, flags);
Mikulas Patocka2761e952009-06-22 10:12:18 +0100624 if (__noflush_suspending(md)) {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200625 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
Mikulas Patocka2761e952009-06-22 10:12:18 +0100626 bio_list_add_head(&md->deferred,
627 io->bio);
628 } else
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800629 /* noflush suspend was interrupted. */
630 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100631 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800632 }
633
Milan Brozb35f8ca2009-03-16 17:44:36 +0000634 io_error = io->error;
635 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100636
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200637 if (bio->bi_rw & REQ_HARDBARRIER) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100638 /*
639 * There can be just one barrier request so we use
640 * a per-device variable for error reporting.
641 * Note that you can't touch the bio after end_io_acct
642 */
Mikulas Patockafdb95722009-06-22 10:12:19 +0100643 if (!md->barrier_error && io_error != -EOPNOTSUPP)
Mikulas Patocka5aa27812009-06-22 10:12:18 +0100644 md->barrier_error = io_error;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100645 end_io_acct(io);
Mikulas Patockaa97f9252010-03-06 02:32:29 +0000646 free_io(md, io);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100647 } else {
648 end_io_acct(io);
Mikulas Patockaa97f9252010-03-06 02:32:29 +0000649 free_io(md, io);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000650
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100651 if (io_error != DM_ENDIO_REQUEUE) {
652 trace_block_bio_complete(md->queue, bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000653
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100654 bio_endio(bio, io_error);
655 }
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 }
658}
659
NeilBrown6712ecf2007-09-27 12:47:43 +0200660static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
662 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100663 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000664 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700665 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 dm_endio_fn endio = tio->ti->type->end_io;
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
669 error = -EIO;
670
671 if (endio) {
672 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800673 if (r < 0 || r == DM_ENDIO_REQUEUE)
674 /*
675 * error and requeue request are handled
676 * in dec_pending().
677 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800679 else if (r == DM_ENDIO_INCOMPLETE)
680 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200681 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800682 else if (r) {
683 DMWARN("unimplemented target endio return value: %d", r);
684 BUG();
685 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
687
Stefan Bader9faf4002006-10-03 01:15:41 -0700688 /*
689 * Store md for cleanup instead of tio which is about to get freed.
690 */
691 bio->bi_private = md->bs;
692
Stefan Bader9faf4002006-10-03 01:15:41 -0700693 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000694 bio_put(bio);
695 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696}
697
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100698/*
699 * Partial completion handling for request-based dm
700 */
701static void end_clone_bio(struct bio *clone, int error)
702{
703 struct dm_rq_clone_bio_info *info = clone->bi_private;
704 struct dm_rq_target_io *tio = info->tio;
705 struct bio *bio = info->orig;
706 unsigned int nr_bytes = info->orig->bi_size;
707
708 bio_put(clone);
709
710 if (tio->error)
711 /*
712 * An error has already been detected on the request.
713 * Once error occurred, just let clone->end_io() handle
714 * the remainder.
715 */
716 return;
717 else if (error) {
718 /*
719 * Don't notice the error to the upper layer yet.
720 * The error handling decision is made by the target driver,
721 * when the request is completed.
722 */
723 tio->error = error;
724 return;
725 }
726
727 /*
728 * I/O for the bio successfully completed.
729 * Notice the data completion to the upper layer.
730 */
731
732 /*
733 * bios are processed from the head of the list.
734 * So the completing bio should always be rq->bio.
735 * If it's not, something wrong is happening.
736 */
737 if (tio->orig->bio != bio)
738 DMERR("bio completion is going in the middle of the request");
739
740 /*
741 * Update the original request.
742 * Do not use blk_end_request() here, because it may complete
743 * the original request before the clone, and break the ordering.
744 */
745 blk_update_request(tio->orig, 0, nr_bytes);
746}
747
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000748static void store_barrier_error(struct mapped_device *md, int error)
749{
750 unsigned long flags;
751
752 spin_lock_irqsave(&md->barrier_error_lock, flags);
753 /*
754 * Basically, the first error is taken, but:
755 * -EOPNOTSUPP supersedes any I/O error.
756 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
757 */
758 if (!md->barrier_error || error == -EOPNOTSUPP ||
759 (md->barrier_error != -EOPNOTSUPP &&
760 error == DM_ENDIO_REQUEUE))
761 md->barrier_error = error;
762 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
763}
764
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100765/*
766 * Don't touch any member of the md after calling this function because
767 * the md may be freed in dm_put() at the end of this function.
768 * Or do dm_get() before calling this function and dm_put() later.
769 */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000770static void rq_completed(struct mapped_device *md, int rw, int run_queue)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100771{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000772 atomic_dec(&md->pending[rw]);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100773
774 /* nudge anyone waiting on suspend queue */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000775 if (!md_in_flight(md))
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100776 wake_up(&md->wait);
777
778 if (run_queue)
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000779 blk_run_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100780
781 /*
782 * dm_put() must be at the end of this function. See the comment above
783 */
784 dm_put(md);
785}
786
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100787static void free_rq_clone(struct request *clone)
788{
789 struct dm_rq_target_io *tio = clone->end_io_data;
790
791 blk_rq_unprep_clone(clone);
792 free_rq_tio(tio);
793}
794
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000795/*
796 * Complete the clone and the original request.
797 * Must be called without queue lock.
798 */
799static void dm_end_request(struct request *clone, int error)
800{
801 int rw = rq_data_dir(clone);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000802 int run_queue = 1;
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200803 bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000804 struct dm_rq_target_io *tio = clone->end_io_data;
805 struct mapped_device *md = tio->md;
806 struct request *rq = tio->orig;
807
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200808 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000809 rq->errors = clone->errors;
810 rq->resid_len = clone->resid_len;
811
812 if (rq->sense)
813 /*
814 * We are using the sense buffer of the original
815 * request.
816 * So setting the length of the sense data is enough.
817 */
818 rq->sense_len = clone->sense_len;
819 }
820
821 free_rq_clone(clone);
822
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000823 if (unlikely(is_barrier)) {
824 if (unlikely(error))
825 store_barrier_error(md, error);
826 run_queue = 0;
827 } else
828 blk_end_request_all(rq, error);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000829
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000830 rq_completed(md, rw, run_queue);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000831}
832
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100833static void dm_unprep_request(struct request *rq)
834{
835 struct request *clone = rq->special;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100836
837 rq->special = NULL;
838 rq->cmd_flags &= ~REQ_DONTPREP;
839
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100840 free_rq_clone(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100841}
842
843/*
844 * Requeue the original request of a clone.
845 */
846void dm_requeue_unmapped_request(struct request *clone)
847{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000848 int rw = rq_data_dir(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100849 struct dm_rq_target_io *tio = clone->end_io_data;
850 struct mapped_device *md = tio->md;
851 struct request *rq = tio->orig;
852 struct request_queue *q = rq->q;
853 unsigned long flags;
854
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200855 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000856 /*
857 * Barrier clones share an original request.
858 * Leave it to dm_end_request(), which handles this special
859 * case.
860 */
861 dm_end_request(clone, DM_ENDIO_REQUEUE);
862 return;
863 }
864
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100865 dm_unprep_request(rq);
866
867 spin_lock_irqsave(q->queue_lock, flags);
868 if (elv_queue_empty(q))
869 blk_plug_device(q);
870 blk_requeue_request(q, rq);
871 spin_unlock_irqrestore(q->queue_lock, flags);
872
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000873 rq_completed(md, rw, 0);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100874}
875EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
876
877static void __stop_queue(struct request_queue *q)
878{
879 blk_stop_queue(q);
880}
881
882static void stop_queue(struct request_queue *q)
883{
884 unsigned long flags;
885
886 spin_lock_irqsave(q->queue_lock, flags);
887 __stop_queue(q);
888 spin_unlock_irqrestore(q->queue_lock, flags);
889}
890
891static void __start_queue(struct request_queue *q)
892{
893 if (blk_queue_stopped(q))
894 blk_start_queue(q);
895}
896
897static void start_queue(struct request_queue *q)
898{
899 unsigned long flags;
900
901 spin_lock_irqsave(q->queue_lock, flags);
902 __start_queue(q);
903 spin_unlock_irqrestore(q->queue_lock, flags);
904}
905
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000906static void dm_done(struct request *clone, int error, bool mapped)
907{
908 int r = error;
909 struct dm_rq_target_io *tio = clone->end_io_data;
910 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
911
912 if (mapped && rq_end_io)
913 r = rq_end_io(tio->ti, clone, error, &tio->info);
914
915 if (r <= 0)
916 /* The target wants to complete the I/O */
917 dm_end_request(clone, r);
918 else if (r == DM_ENDIO_INCOMPLETE)
919 /* The target will handle the I/O */
920 return;
921 else if (r == DM_ENDIO_REQUEUE)
922 /* The target wants to requeue the I/O */
923 dm_requeue_unmapped_request(clone);
924 else {
925 DMWARN("unimplemented target endio return value: %d", r);
926 BUG();
927 }
928}
929
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100930/*
931 * Request completion handler for request-based dm
932 */
933static void dm_softirq_done(struct request *rq)
934{
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000935 bool mapped = true;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100936 struct request *clone = rq->completion_data;
937 struct dm_rq_target_io *tio = clone->end_io_data;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100938
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000939 if (rq->cmd_flags & REQ_FAILED)
940 mapped = false;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100941
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000942 dm_done(clone, tio->error, mapped);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100943}
944
945/*
946 * Complete the clone and the original request with the error status
947 * through softirq context.
948 */
949static void dm_complete_request(struct request *clone, int error)
950{
951 struct dm_rq_target_io *tio = clone->end_io_data;
952 struct request *rq = tio->orig;
953
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200954 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000955 /*
956 * Barrier clones share an original request. So can't use
957 * softirq_done with the original.
958 * Pass the clone to dm_done() directly in this special case.
959 * It is safe (even if clone->q->queue_lock is held here)
960 * because there is no I/O dispatching during the completion
961 * of barrier clone.
962 */
963 dm_done(clone, error, true);
964 return;
965 }
966
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100967 tio->error = error;
968 rq->completion_data = clone;
969 blk_complete_request(rq);
970}
971
972/*
973 * Complete the not-mapped clone and the original request with the error status
974 * through softirq context.
975 * Target's rq_end_io() function isn't called.
976 * This may be used when the target's map_rq() function fails.
977 */
978void dm_kill_unmapped_request(struct request *clone, int error)
979{
980 struct dm_rq_target_io *tio = clone->end_io_data;
981 struct request *rq = tio->orig;
982
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200983 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000984 /*
985 * Barrier clones share an original request.
986 * Leave it to dm_end_request(), which handles this special
987 * case.
988 */
989 BUG_ON(error > 0);
990 dm_end_request(clone, error);
991 return;
992 }
993
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100994 rq->cmd_flags |= REQ_FAILED;
995 dm_complete_request(clone, error);
996}
997EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
998
999/*
1000 * Called with the queue lock held
1001 */
1002static void end_clone_request(struct request *clone, int error)
1003{
1004 /*
1005 * For just cleaning up the information of the queue in which
1006 * the clone was dispatched.
1007 * The clone is *NOT* freed actually here because it is alloced from
1008 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1009 */
1010 __blk_put_request(clone->q, clone);
1011
1012 /*
1013 * Actual request completion is done in a softirq context which doesn't
1014 * hold the queue lock. Otherwise, deadlock could occur because:
1015 * - another request may be submitted by the upper level driver
1016 * of the stacking during the completion
1017 * - the submission which requires queue lock may be done
1018 * against this queue
1019 */
1020 dm_complete_request(clone, error);
1021}
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023static sector_t max_io_len(struct mapped_device *md,
1024 sector_t sector, struct dm_target *ti)
1025{
1026 sector_t offset = sector - ti->begin;
1027 sector_t len = ti->len - offset;
1028
1029 /*
1030 * Does the target need to split even further ?
1031 */
1032 if (ti->split_io) {
1033 sector_t boundary;
1034 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
1035 - offset;
1036 if (len > boundary)
1037 len = boundary;
1038 }
1039
1040 return len;
1041}
1042
1043static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001044 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045{
1046 int r;
Jens Axboe2056a782006-03-23 20:00:26 +01001047 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -07001048 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 clone->bi_end_io = clone_endio;
1051 clone->bi_private = tio;
1052
1053 /*
1054 * Map the clone. If r == 0 we don't need to do
1055 * anything, the target has assumed ownership of
1056 * this io.
1057 */
1058 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +01001059 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001061 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +01001063
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +01001064 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -04001065 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +01001066
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001068 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1069 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -07001070 md = tio->io->md;
1071 dec_pending(tio->io, r);
1072 /*
1073 * Store bio_set for cleanup.
1074 */
1075 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -07001077 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001078 } else if (r) {
1079 DMWARN("unimplemented target map return value: %d", r);
1080 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 }
1082}
1083
1084struct clone_info {
1085 struct mapped_device *md;
1086 struct dm_table *map;
1087 struct bio *bio;
1088 struct dm_io *io;
1089 sector_t sector;
1090 sector_t sector_count;
1091 unsigned short idx;
1092};
1093
Peter Osterlund36763472005-09-06 15:16:42 -07001094static void dm_bio_destructor(struct bio *bio)
1095{
Stefan Bader9faf4002006-10-03 01:15:41 -07001096 struct bio_set *bs = bio->bi_private;
1097
1098 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001099}
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101/*
1102 * Creates a little bio that is just does part of a bvec.
1103 */
1104static struct bio *split_bvec(struct bio *bio, sector_t sector,
1105 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -07001106 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
1108 struct bio *clone;
1109 struct bio_vec *bv = bio->bi_io_vec + idx;
1110
Stefan Bader9faf4002006-10-03 01:15:41 -07001111 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001112 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 *clone->bi_io_vec = *bv;
1114
1115 clone->bi_sector = sector;
1116 clone->bi_bdev = bio->bi_bdev;
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001117 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 clone->bi_vcnt = 1;
1119 clone->bi_size = to_bytes(len);
1120 clone->bi_io_vec->bv_offset = offset;
1121 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +01001122 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
Martin K. Petersen9c470082009-04-09 00:27:12 +01001124 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001125 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001126 bio_integrity_trim(clone,
1127 bio_sector_offset(bio, idx, offset), len);
1128 }
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 return clone;
1131}
1132
1133/*
1134 * Creates a bio that consists of range of complete bvecs.
1135 */
1136static struct bio *clone_bio(struct bio *bio, sector_t sector,
1137 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -07001138 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139{
1140 struct bio *clone;
1141
Stefan Bader9faf4002006-10-03 01:15:41 -07001142 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1143 __bio_clone(clone, bio);
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001144 clone->bi_rw &= ~REQ_HARDBARRIER;
Stefan Bader9faf4002006-10-03 01:15:41 -07001145 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 clone->bi_sector = sector;
1147 clone->bi_idx = idx;
1148 clone->bi_vcnt = idx + bv_count;
1149 clone->bi_size = to_bytes(len);
1150 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1151
Martin K. Petersen9c470082009-04-09 00:27:12 +01001152 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001153 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001154
1155 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1156 bio_integrity_trim(clone,
1157 bio_sector_offset(bio, idx, 0), len);
1158 }
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 return clone;
1161}
1162
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001163static struct dm_target_io *alloc_tio(struct clone_info *ci,
1164 struct dm_target *ti)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001165{
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001166 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001167
1168 tio->io = ci->io;
1169 tio->ti = ti;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001170 memset(&tio->info, 0, sizeof(tio->info));
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001171
1172 return tio;
1173}
1174
1175static void __flush_target(struct clone_info *ci, struct dm_target *ti,
1176 unsigned flush_nr)
1177{
1178 struct dm_target_io *tio = alloc_tio(ci, ti);
1179 struct bio *clone;
1180
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001181 tio->info.flush_request = flush_nr;
1182
1183 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1184 __bio_clone(clone, ci->bio);
1185 clone->bi_destructor = dm_bio_destructor;
1186
1187 __map_bio(ti, clone, tio);
1188}
1189
1190static int __clone_and_map_empty_barrier(struct clone_info *ci)
1191{
1192 unsigned target_nr = 0, flush_nr;
1193 struct dm_target *ti;
1194
1195 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1196 for (flush_nr = 0; flush_nr < ti->num_flush_requests;
1197 flush_nr++)
1198 __flush_target(ci, ti, flush_nr);
1199
1200 ci->sector_count = 0;
1201
1202 return 0;
1203}
1204
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001205static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206{
1207 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001208 struct dm_target *ti;
1209 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001210 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001212 if (unlikely(bio_empty_barrier(bio)))
1213 return __clone_and_map_empty_barrier(ci);
1214
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001215 ti = dm_table_find_target(ci->map, ci->sector);
1216 if (!dm_target_is_valid(ti))
1217 return -EIO;
1218
1219 max = max_io_len(ci->md, ci->sector, ti);
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 /*
1222 * Allocate a target io object.
1223 */
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001224 tio = alloc_tio(ci, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 if (ci->sector_count <= max) {
1227 /*
1228 * Optimise for the simple case where we can do all of
1229 * the remaining io with a single clone.
1230 */
1231 clone = clone_bio(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001232 bio->bi_vcnt - ci->idx, ci->sector_count,
1233 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 __map_bio(ti, clone, tio);
1235 ci->sector_count = 0;
1236
1237 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1238 /*
1239 * There are some bvecs that don't span targets.
1240 * Do as many of these as possible.
1241 */
1242 int i;
1243 sector_t remaining = max;
1244 sector_t bv_len;
1245
1246 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1247 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1248
1249 if (bv_len > remaining)
1250 break;
1251
1252 remaining -= bv_len;
1253 len += bv_len;
1254 }
1255
Stefan Bader9faf4002006-10-03 01:15:41 -07001256 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1257 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 __map_bio(ti, clone, tio);
1259
1260 ci->sector += len;
1261 ci->sector_count -= len;
1262 ci->idx = i;
1263
1264 } else {
1265 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001266 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 */
1268 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001269 sector_t remaining = to_sector(bv->bv_len);
1270 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001272 do {
1273 if (offset) {
1274 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001275 if (!dm_target_is_valid(ti))
1276 return -EIO;
1277
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001278 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001280 tio = alloc_tio(ci, ti);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001281 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001283 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001285 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001286 bv->bv_offset + offset, len,
1287 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001288
1289 __map_bio(ti, clone, tio);
1290
1291 ci->sector += len;
1292 ci->sector_count -= len;
1293 offset += to_bytes(len);
1294 } while (remaining -= len);
1295
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 ci->idx++;
1297 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001298
1299 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300}
1301
1302/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +01001303 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001305static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306{
1307 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001308 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001310 ci.map = dm_get_live_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001311 if (unlikely(!ci.map)) {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001312 if (!(bio->bi_rw & REQ_HARDBARRIER))
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001313 bio_io_error(bio);
1314 else
Mikulas Patocka5aa27812009-06-22 10:12:18 +01001315 if (!md->barrier_error)
1316 md->barrier_error = -EIO;
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001317 return;
1318 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +01001319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 ci.md = md;
1321 ci.bio = bio;
1322 ci.io = alloc_io(md);
1323 ci.io->error = 0;
1324 atomic_set(&ci.io->io_count, 1);
1325 ci.io->bio = bio;
1326 ci.io->md = md;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +01001327 spin_lock_init(&ci.io->endio_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 ci.sector = bio->bi_sector;
1329 ci.sector_count = bio_sectors(bio);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001330 if (unlikely(bio_empty_barrier(bio)))
1331 ci.sector_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 ci.idx = bio->bi_idx;
1333
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -08001334 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001335 while (ci.sector_count && !error)
1336 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001339 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 dm_table_put(ci.map);
1341}
1342/*-----------------------------------------------------------------
1343 * CRUD END
1344 *---------------------------------------------------------------*/
1345
Milan Brozf6fccb12008-07-21 12:00:37 +01001346static int dm_merge_bvec(struct request_queue *q,
1347 struct bvec_merge_data *bvm,
1348 struct bio_vec *biovec)
1349{
1350 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001351 struct dm_table *map = dm_get_live_table(md);
Milan Brozf6fccb12008-07-21 12:00:37 +01001352 struct dm_target *ti;
1353 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +01001354 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001355
1356 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +01001357 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +01001358
1359 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001360 if (!dm_target_is_valid(ti))
1361 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +01001362
1363 /*
1364 * Find maximum amount of I/O that won't need splitting
1365 */
1366 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1367 (sector_t) BIO_MAX_SECTORS);
1368 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1369 if (max_size < 0)
1370 max_size = 0;
1371
1372 /*
1373 * merge_bvec_fn() returns number of bytes
1374 * it can accept at this offset
1375 * max is precomputed maximal io size
1376 */
1377 if (max_size && ti->type->merge)
1378 max_size = ti->type->merge(ti, bvm, biovec, max_size);
Mikulas Patocka8cbeb672009-06-22 10:12:14 +01001379 /*
1380 * If the target doesn't support merge method and some of the devices
1381 * provided their merge_bvec method (we know this by looking at
1382 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1383 * entries. So always set max_size to 0, and the code below allows
1384 * just one page.
1385 */
1386 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1387
1388 max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001389
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001390out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +01001391 dm_table_put(map);
1392
1393out:
Milan Brozf6fccb12008-07-21 12:00:37 +01001394 /*
1395 * Always allow an entire first page
1396 */
1397 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1398 max_size = biovec->bv_len;
1399
Milan Brozf6fccb12008-07-21 12:00:37 +01001400 return max_size;
1401}
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403/*
1404 * The request function that just remaps the bio built up by
1405 * dm_merge_bvec.
1406 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001407static int _dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408{
Kevin Corry12f03a42006-02-01 03:04:52 -08001409 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +09001411 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001413 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
Tejun Heo074a7ac2008-08-25 19:56:14 +09001415 cpu = part_stat_lock();
1416 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1417 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1418 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -08001419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001421 * If we're suspended or the thread is processing barriers
1422 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 */
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001424 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001425 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001426 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +01001428 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1429 bio_rw(bio) == READA) {
1430 bio_io_error(bio);
1431 return 0;
1432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Mikulas Patocka92c63902009-04-09 00:27:15 +01001434 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
Mikulas Patocka92c63902009-04-09 00:27:15 +01001436 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 }
1438
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001439 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001440 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001441 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442}
1443
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001444static int dm_make_request(struct request_queue *q, struct bio *bio)
1445{
1446 struct mapped_device *md = q->queuedata;
1447
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001448 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1449}
1450
1451static int dm_request_based(struct mapped_device *md)
1452{
1453 return blk_queue_stackable(md->queue);
1454}
1455
1456static int dm_request(struct request_queue *q, struct bio *bio)
1457{
1458 struct mapped_device *md = q->queuedata;
1459
1460 if (dm_request_based(md))
1461 return dm_make_request(q, bio);
1462
1463 return _dm_request(q, bio);
1464}
1465
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001466static bool dm_rq_is_flush_request(struct request *rq)
1467{
FUJITA Tomonori144d6ed2010-07-03 17:45:37 +09001468 if (rq->cmd_flags & REQ_FLUSH)
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001469 return true;
1470 else
1471 return false;
1472}
1473
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001474void dm_dispatch_request(struct request *rq)
1475{
1476 int r;
1477
1478 if (blk_queue_io_stat(rq->q))
1479 rq->cmd_flags |= REQ_IO_STAT;
1480
1481 rq->start_time = jiffies;
1482 r = blk_insert_cloned_request(rq->q, rq);
1483 if (r)
1484 dm_complete_request(rq, r);
1485}
1486EXPORT_SYMBOL_GPL(dm_dispatch_request);
1487
1488static void dm_rq_bio_destructor(struct bio *bio)
1489{
1490 struct dm_rq_clone_bio_info *info = bio->bi_private;
1491 struct mapped_device *md = info->tio->md;
1492
1493 free_bio_info(info);
1494 bio_free(bio, md->bs);
1495}
1496
1497static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1498 void *data)
1499{
1500 struct dm_rq_target_io *tio = data;
1501 struct mapped_device *md = tio->md;
1502 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1503
1504 if (!info)
1505 return -ENOMEM;
1506
1507 info->orig = bio_orig;
1508 info->tio = tio;
1509 bio->bi_end_io = end_clone_bio;
1510 bio->bi_private = info;
1511 bio->bi_destructor = dm_rq_bio_destructor;
1512
1513 return 0;
1514}
1515
1516static int setup_clone(struct request *clone, struct request *rq,
1517 struct dm_rq_target_io *tio)
1518{
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001519 int r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001520
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001521 if (dm_rq_is_flush_request(rq)) {
1522 blk_rq_init(NULL, clone);
1523 clone->cmd_type = REQ_TYPE_FS;
1524 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1525 } else {
1526 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1527 dm_rq_bio_constructor, tio);
1528 if (r)
1529 return r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001530
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001531 clone->cmd = rq->cmd;
1532 clone->cmd_len = rq->cmd_len;
1533 clone->sense = rq->sense;
1534 clone->buffer = rq->buffer;
1535 }
1536
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001537 clone->end_io = end_clone_request;
1538 clone->end_io_data = tio;
1539
1540 return 0;
1541}
1542
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001543static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1544 gfp_t gfp_mask)
1545{
1546 struct request *clone;
1547 struct dm_rq_target_io *tio;
1548
1549 tio = alloc_rq_tio(md, gfp_mask);
1550 if (!tio)
1551 return NULL;
1552
1553 tio->md = md;
1554 tio->ti = NULL;
1555 tio->orig = rq;
1556 tio->error = 0;
1557 memset(&tio->info, 0, sizeof(tio->info));
1558
1559 clone = &tio->clone;
1560 if (setup_clone(clone, rq, tio)) {
1561 /* -ENOMEM */
1562 free_rq_tio(tio);
1563 return NULL;
1564 }
1565
1566 return clone;
1567}
1568
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001569/*
1570 * Called with the queue lock held.
1571 */
1572static int dm_prep_fn(struct request_queue *q, struct request *rq)
1573{
1574 struct mapped_device *md = q->queuedata;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001575 struct request *clone;
1576
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001577 if (unlikely(dm_rq_is_flush_request(rq)))
1578 return BLKPREP_OK;
1579
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001580 if (unlikely(rq->special)) {
1581 DMWARN("Already has something in rq->special.");
1582 return BLKPREP_KILL;
1583 }
1584
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001585 clone = clone_rq(rq, md, GFP_ATOMIC);
1586 if (!clone)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001587 return BLKPREP_DEFER;
1588
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001589 rq->special = clone;
1590 rq->cmd_flags |= REQ_DONTPREP;
1591
1592 return BLKPREP_OK;
1593}
1594
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001595/*
1596 * Returns:
1597 * 0 : the request has been processed (not requeued)
1598 * !0 : the request has been requeued
1599 */
1600static int map_request(struct dm_target *ti, struct request *clone,
1601 struct mapped_device *md)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001602{
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001603 int r, requeued = 0;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001604 struct dm_rq_target_io *tio = clone->end_io_data;
1605
1606 /*
1607 * Hold the md reference here for the in-flight I/O.
1608 * We can't rely on the reference count by device opener,
1609 * because the device may be closed during the request completion
1610 * when all bios are completed.
1611 * See the comment in rq_completed() too.
1612 */
1613 dm_get(md);
1614
1615 tio->ti = ti;
1616 r = ti->type->map_rq(ti, clone, &tio->info);
1617 switch (r) {
1618 case DM_MAPIO_SUBMITTED:
1619 /* The target has taken the I/O to submit by itself later */
1620 break;
1621 case DM_MAPIO_REMAPPED:
1622 /* The target has remapped the I/O so dispatch it */
Jun'ichi Nomura6db4ccd2009-12-10 23:52:25 +00001623 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1624 blk_rq_pos(tio->orig));
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001625 dm_dispatch_request(clone);
1626 break;
1627 case DM_MAPIO_REQUEUE:
1628 /* The target wants to requeue the I/O */
1629 dm_requeue_unmapped_request(clone);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001630 requeued = 1;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001631 break;
1632 default:
1633 if (r > 0) {
1634 DMWARN("unimplemented target map return value: %d", r);
1635 BUG();
1636 }
1637
1638 /* The target wants to complete the I/O */
1639 dm_kill_unmapped_request(clone, r);
1640 break;
1641 }
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001642
1643 return requeued;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001644}
1645
1646/*
1647 * q->request_fn for request-based dm.
1648 * Called with the queue lock held.
1649 */
1650static void dm_request_fn(struct request_queue *q)
1651{
1652 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001653 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001654 struct dm_target *ti;
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001655 struct request *rq, *clone;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001656
1657 /*
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001658 * For suspend, check blk_queue_stopped() and increment
1659 * ->pending within a single queue_lock not to increment the
1660 * number of in-flight I/Os after the queue is stopped in
1661 * dm_suspend().
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001662 */
1663 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1664 rq = blk_peek_request(q);
1665 if (!rq)
1666 goto plug_and_out;
1667
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001668 if (unlikely(dm_rq_is_flush_request(rq))) {
1669 BUG_ON(md->flush_request);
1670 md->flush_request = rq;
1671 blk_start_request(rq);
1672 queue_work(md->wq, &md->barrier_work);
1673 goto out;
1674 }
1675
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001676 ti = dm_table_find_target(map, blk_rq_pos(rq));
1677 if (ti->type->busy && ti->type->busy(ti))
1678 goto plug_and_out;
1679
1680 blk_start_request(rq);
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001681 clone = rq->special;
1682 atomic_inc(&md->pending[rq_data_dir(clone)]);
1683
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001684 spin_unlock(q->queue_lock);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001685 if (map_request(ti, clone, md))
1686 goto requeued;
1687
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001688 spin_lock_irq(q->queue_lock);
1689 }
1690
1691 goto out;
1692
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001693requeued:
1694 spin_lock_irq(q->queue_lock);
1695
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001696plug_and_out:
1697 if (!elv_queue_empty(q))
1698 /* Some requests still remain, retry later */
1699 blk_plug_device(q);
1700
1701out:
1702 dm_table_put(map);
1703
1704 return;
1705}
1706
1707int dm_underlying_device_busy(struct request_queue *q)
1708{
1709 return blk_lld_busy(q);
1710}
1711EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1712
1713static int dm_lld_busy(struct request_queue *q)
1714{
1715 int r;
1716 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001717 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001718
1719 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1720 r = 1;
1721 else
1722 r = dm_table_any_busy_target(map);
1723
1724 dm_table_put(map);
1725
1726 return r;
1727}
1728
Jens Axboe165125e2007-07-24 09:28:11 +02001729static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730{
1731 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001732 struct dm_table *map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
1734 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001735 if (dm_request_based(md))
1736 generic_unplug_device(q);
1737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 dm_table_unplug_all(map);
1739 dm_table_put(map);
1740 }
1741}
1742
1743static int dm_any_congested(void *congested_data, int bdi_bits)
1744{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001745 int r = bdi_bits;
1746 struct mapped_device *md = congested_data;
1747 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001749 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001750 map = dm_get_live_table(md);
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001751 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001752 /*
1753 * Request-based dm cares about only own queue for
1754 * the query about congestion status of request_queue
1755 */
1756 if (dm_request_based(md))
1757 r = md->queue->backing_dev_info.state &
1758 bdi_bits;
1759 else
1760 r = dm_table_any_congested(map, bdi_bits);
1761
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001762 dm_table_put(map);
1763 }
1764 }
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 return r;
1767}
1768
1769/*-----------------------------------------------------------------
1770 * An IDR is used to keep track of allocated minor numbers.
1771 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772static DEFINE_IDR(_minor_idr);
1773
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001774static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001776 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001778 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779}
1780
1781/*
1782 * See if the device with a specific minor # is free.
1783 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001784static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785{
1786 int r, m;
1787
1788 if (minor >= (1 << MINORBITS))
1789 return -EINVAL;
1790
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001791 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1792 if (!r)
1793 return -ENOMEM;
1794
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001795 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797 if (idr_find(&_minor_idr, minor)) {
1798 r = -EBUSY;
1799 goto out;
1800 }
1801
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001802 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001803 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
1806 if (m != minor) {
1807 idr_remove(&_minor_idr, m);
1808 r = -EBUSY;
1809 goto out;
1810 }
1811
1812out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001813 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 return r;
1815}
1816
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001817static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001819 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001822 if (!r)
1823 return -ENOMEM;
1824
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001825 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001827 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001828 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
1831 if (m >= (1 << MINORBITS)) {
1832 idr_remove(&_minor_idr, m);
1833 r = -ENOSPC;
1834 goto out;
1835 }
1836
1837 *minor = m;
1838
1839out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001840 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 return r;
1842}
1843
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001844static const struct block_device_operations dm_blk_dops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
Mikulas Patocka53d59142009-04-02 19:55:37 +01001846static void dm_wq_work(struct work_struct *work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001847static void dm_rq_barrier_work(struct work_struct *work);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849/*
1850 * Allocate and initialise a blank device with a given minor.
1851 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001852static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853{
1854 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001855 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001856 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
1858 if (!md) {
1859 DMWARN("unable to allocate device, out of memory.");
1860 return NULL;
1861 }
1862
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001863 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001864 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001867 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001868 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001869 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001870 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001872 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001874 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001875 mutex_init(&md->suspend_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001876 spin_lock_init(&md->deferred_lock);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001877 spin_lock_init(&md->barrier_error_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 rwlock_init(&md->map_lock);
1879 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001880 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001882 atomic_set(&md->uevent_seq, 0);
1883 INIT_LIST_HEAD(&md->uevent_list);
1884 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001886 md->queue = blk_init_queue(dm_request_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001888 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001890 /*
1891 * Request-based dm devices cannot be stacked on top of bio-based dm
1892 * devices. The type of this dm device has not been decided yet,
1893 * although we initialized the queue using blk_init_queue().
1894 * The type is decided at the first table loading time.
1895 * To prevent problematic device stacking, clear the queue flag
1896 * for request stacking support until then.
1897 *
1898 * This queue is new, so no concurrency on the queue_flags.
1899 */
1900 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1901 md->saved_make_request_fn = md->queue->make_request_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 md->queue->queuedata = md;
1903 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1904 md->queue->backing_dev_info.congested_data = md;
1905 blk_queue_make_request(md->queue, dm_request);
Jens Axboedaef2652006-01-10 10:48:02 +01001906 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 md->queue->unplug_fn = dm_unplug_all;
Milan Brozf6fccb12008-07-21 12:00:37 +01001908 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001909 blk_queue_softirq_done(md->queue, dm_softirq_done);
1910 blk_queue_prep_rq(md->queue, dm_prep_fn);
1911 blk_queue_lld_busy(md->queue, dm_lld_busy);
FUJITA Tomonori00fff262010-07-03 17:45:40 +09001912 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
Stefan Bader9faf4002006-10-03 01:15:41 -07001913
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 md->disk = alloc_disk(1);
1915 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001916 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02001918 atomic_set(&md->pending[0], 0);
1919 atomic_set(&md->pending[1], 0);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001920 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001921 INIT_WORK(&md->work, dm_wq_work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001922 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001923 init_waitqueue_head(&md->eventq);
1924
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 md->disk->major = _major;
1926 md->disk->first_minor = minor;
1927 md->disk->fops = &dm_blk_dops;
1928 md->disk->queue = md->queue;
1929 md->disk->private_data = md;
1930 sprintf(md->disk->disk_name, "dm-%d", minor);
1931 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001932 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
Milan Broz304f3f62008-02-08 02:11:17 +00001934 md->wq = create_singlethread_workqueue("kdmflush");
1935 if (!md->wq)
1936 goto bad_thread;
1937
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001938 md->bdev = bdget_disk(md->disk, 0);
1939 if (!md->bdev)
1940 goto bad_bdev;
1941
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001942 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001943 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001944 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001945 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001946
1947 BUG_ON(old_md != MINOR_ALLOCED);
1948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 return md;
1950
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001951bad_bdev:
1952 destroy_workqueue(md->wq);
Milan Broz304f3f62008-02-08 02:11:17 +00001953bad_thread:
Zdenek Kabelac03022c52009-10-16 23:18:15 +01001954 del_gendisk(md->disk);
Milan Broz304f3f62008-02-08 02:11:17 +00001955 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001956bad_disk:
Al Viro1312f402006-03-12 11:02:03 -05001957 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001958bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001960bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001961 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001962bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 kfree(md);
1964 return NULL;
1965}
1966
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001967static void unlock_fs(struct mapped_device *md);
1968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969static void free_dev(struct mapped_device *md)
1970{
Tejun Heof331c022008-09-03 09:01:48 +02001971 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001972
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001973 unlock_fs(md);
1974 bdput(md->bdev);
Milan Broz304f3f62008-02-08 02:11:17 +00001975 destroy_workqueue(md->wq);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001976 if (md->tio_pool)
1977 mempool_destroy(md->tio_pool);
1978 if (md->io_pool)
1979 mempool_destroy(md->io_pool);
1980 if (md->bs)
1981 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001982 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001984 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001985
1986 spin_lock(&_minor_lock);
1987 md->disk->private_data = NULL;
1988 spin_unlock(&_minor_lock);
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001991 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001992 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 kfree(md);
1994}
1995
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001996static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1997{
1998 struct dm_md_mempools *p;
1999
2000 if (md->io_pool && md->tio_pool && md->bs)
2001 /* the md already has necessary mempools */
2002 goto out;
2003
2004 p = dm_table_get_md_mempools(t);
2005 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2006
2007 md->io_pool = p->io_pool;
2008 p->io_pool = NULL;
2009 md->tio_pool = p->tio_pool;
2010 p->tio_pool = NULL;
2011 md->bs = p->bs;
2012 p->bs = NULL;
2013
2014out:
2015 /* mempool bind completed, now no need any mempools in the table */
2016 dm_table_free_md_mempools(t);
2017}
2018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019/*
2020 * Bind a table to the device.
2021 */
2022static void event_callback(void *context)
2023{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002024 unsigned long flags;
2025 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 struct mapped_device *md = (struct mapped_device *) context;
2027
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002028 spin_lock_irqsave(&md->uevent_lock, flags);
2029 list_splice_init(&md->uevent_list, &uevents);
2030 spin_unlock_irqrestore(&md->uevent_lock, flags);
2031
Tejun Heoed9e1982008-08-25 19:56:05 +09002032 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 atomic_inc(&md->event_nr);
2035 wake_up(&md->eventq);
2036}
2037
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002038static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039{
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002040 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002042 mutex_lock(&md->bdev->bd_inode->i_mutex);
2043 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2044 mutex_unlock(&md->bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045}
2046
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002047/*
2048 * Returns old map, which caller must destroy.
2049 */
2050static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2051 struct queue_limits *limits)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002053 struct dm_table *old_map;
Jens Axboe165125e2007-07-24 09:28:11 +02002054 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 sector_t size;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002056 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002059
2060 /*
2061 * Wipe any geometry if the size of the table changed.
2062 */
2063 if (size != get_capacity(md->disk))
2064 memset(&md->geometry, 0, sizeof(md->geometry));
2065
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002066 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002068 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002069
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002070 /*
2071 * The queue hasn't been stopped yet, if the old table type wasn't
2072 * for request-based during suspension. So stop it to prevent
2073 * I/O mapping before resume.
2074 * This must be done before setting the queue restrictions,
2075 * because request-based dm may be run just after the setting.
2076 */
2077 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2078 stop_queue(q);
2079
2080 __bind_mempools(md, t);
2081
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002082 write_lock_irqsave(&md->map_lock, flags);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002083 old_map = md->map;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002084 md->map = t;
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002085 dm_table_set_restrictions(t, q, limits);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002086 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002087
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002088 return old_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089}
2090
Alasdair G Kergona7940152009-12-10 23:52:23 +00002091/*
2092 * Returns unbound table for the caller to free.
2093 */
2094static struct dm_table *__unbind(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095{
2096 struct dm_table *map = md->map;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002097 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 if (!map)
Alasdair G Kergona7940152009-12-10 23:52:23 +00002100 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 dm_table_event_callback(map, NULL, NULL);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002103 write_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 md->map = NULL;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002105 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002106
2107 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108}
2109
2110/*
2111 * Constructor for a new device.
2112 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002113int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114{
2115 struct mapped_device *md;
2116
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002117 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 if (!md)
2119 return -ENXIO;
2120
Milan Broz784aae72009-01-06 03:05:12 +00002121 dm_sysfs_init(md);
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 *result = md;
2124 return 0;
2125}
2126
David Teigland637842c2006-01-06 00:20:00 -08002127static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128{
2129 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 unsigned minor = MINOR(dev);
2131
2132 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2133 return NULL;
2134
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002135 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
2137 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002138 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02002139 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Kiyoshi Uedaabdc5682010-08-12 04:13:54 +01002140 dm_deleting_md(md) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07002141 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08002142 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002143 goto out;
2144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002146out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002147 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
David Teigland637842c2006-01-06 00:20:00 -08002149 return md;
2150}
2151
David Teiglandd229a952006-01-06 00:20:01 -08002152struct mapped_device *dm_get_md(dev_t dev)
2153{
2154 struct mapped_device *md = dm_find_md(dev);
2155
2156 if (md)
2157 dm_get(md);
2158
2159 return md;
2160}
2161
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002162void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08002163{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002164 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165}
2166
2167void dm_set_mdptr(struct mapped_device *md, void *ptr)
2168{
2169 md->interface_ptr = ptr;
2170}
2171
2172void dm_get(struct mapped_device *md)
2173{
2174 atomic_inc(&md->holders);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002175 BUG_ON(test_bit(DMF_FREEING, &md->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176}
2177
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002178const char *dm_device_name(struct mapped_device *md)
2179{
2180 return md->name;
2181}
2182EXPORT_SYMBOL_GPL(dm_device_name);
2183
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002184static void __dm_destroy(struct mapped_device *md, bool wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
Mike Anderson1134e5a2006-03-27 01:17:54 -08002186 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002188 might_sleep();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002189
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002190 spin_lock(&_minor_lock);
2191 map = dm_get_live_table(md);
2192 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2193 set_bit(DMF_FREEING, &md->flags);
2194 spin_unlock(&_minor_lock);
2195
2196 if (!dm_suspended_md(md)) {
2197 dm_table_presuspend_targets(map);
2198 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 }
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002200
2201 /*
2202 * Rare, but there may be I/O requests still going to complete,
2203 * for example. Wait for all references to disappear.
2204 * No one should increment the reference count of the mapped_device,
2205 * after the mapped_device state becomes DMF_FREEING.
2206 */
2207 if (wait)
2208 while (atomic_read(&md->holders))
2209 msleep(1);
2210 else if (atomic_read(&md->holders))
2211 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2212 dm_device_name(md), atomic_read(&md->holders));
2213
2214 dm_sysfs_exit(md);
2215 dm_table_put(map);
2216 dm_table_destroy(__unbind(md));
2217 free_dev(md);
2218}
2219
2220void dm_destroy(struct mapped_device *md)
2221{
2222 __dm_destroy(md, true);
2223}
2224
2225void dm_destroy_immediate(struct mapped_device *md)
2226{
2227 __dm_destroy(md, false);
2228}
2229
2230void dm_put(struct mapped_device *md)
2231{
2232 atomic_dec(&md->holders);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233}
Edward Goggin79eb8852007-05-09 02:32:56 -07002234EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Mikulas Patocka401600d2009-04-02 19:55:38 +01002236static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00002237{
2238 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002239 DECLARE_WAITQUEUE(wait, current);
2240
2241 dm_unplug_all(md->queue);
2242
2243 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00002244
2245 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01002246 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00002247
2248 smp_mb();
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00002249 if (!md_in_flight(md))
Milan Broz46125c12008-02-08 02:10:30 +00002250 break;
2251
Mikulas Patocka401600d2009-04-02 19:55:38 +01002252 if (interruptible == TASK_INTERRUPTIBLE &&
2253 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00002254 r = -EINTR;
2255 break;
2256 }
2257
2258 io_schedule();
2259 }
2260 set_current_state(TASK_RUNNING);
2261
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002262 remove_wait_queue(&md->wait, &wait);
2263
Milan Broz46125c12008-02-08 02:10:30 +00002264 return r;
2265}
2266
Mikulas Patocka531fe962009-06-22 10:12:17 +01002267static void dm_flush(struct mapped_device *md)
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002268{
2269 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patocka52b1fd52009-06-22 10:12:21 +01002270
2271 bio_init(&md->barrier_bio);
2272 md->barrier_bio.bi_bdev = md->bdev;
2273 md->barrier_bio.bi_rw = WRITE_BARRIER;
2274 __split_and_process_bio(md, &md->barrier_bio);
2275
2276 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002277}
2278
2279static void process_barrier(struct mapped_device *md, struct bio *bio)
2280{
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002281 md->barrier_error = 0;
2282
Mikulas Patocka531fe962009-06-22 10:12:17 +01002283 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002284
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002285 if (!bio_empty_barrier(bio)) {
2286 __split_and_process_bio(md, bio);
2287 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002288 }
2289
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002290 if (md->barrier_error != DM_ENDIO_REQUEUE)
Mikulas Patocka531fe962009-06-22 10:12:17 +01002291 bio_endio(bio, md->barrier_error);
Mikulas Patocka2761e952009-06-22 10:12:18 +01002292 else {
2293 spin_lock_irq(&md->deferred_lock);
2294 bio_list_add_head(&md->deferred, bio);
2295 spin_unlock_irq(&md->deferred_lock);
2296 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002297}
2298
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299/*
2300 * Process the deferred bios
2301 */
Mikulas Patockaef208582009-04-02 19:55:38 +01002302static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303{
Mikulas Patockaef208582009-04-02 19:55:38 +01002304 struct mapped_device *md = container_of(work, struct mapped_device,
2305 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002306 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Mikulas Patockaef208582009-04-02 19:55:38 +01002308 down_write(&md->io_lock);
2309
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002310 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002311 spin_lock_irq(&md->deferred_lock);
2312 c = bio_list_pop(&md->deferred);
2313 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002314
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002315 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002316 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002317 break;
2318 }
2319
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002320 up_write(&md->io_lock);
2321
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002322 if (dm_request_based(md))
2323 generic_make_request(c);
2324 else {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02002325 if (c->bi_rw & REQ_HARDBARRIER)
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002326 process_barrier(md, c);
2327 else
2328 __split_and_process_bio(md, c);
2329 }
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002330
2331 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002332 }
Milan Broz73d410c2008-02-08 02:10:25 +00002333
Mikulas Patockaef208582009-04-02 19:55:38 +01002334 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335}
2336
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002337static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00002338{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002339 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2340 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01002341 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00002342}
2343
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002344static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
2345{
2346 struct dm_rq_target_io *tio = clone->end_io_data;
2347
2348 tio->info.flush_request = flush_nr;
2349}
2350
2351/* Issue barrier requests to targets and wait for their completion. */
2352static int dm_rq_barrier(struct mapped_device *md)
2353{
2354 int i, j;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002355 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002356 unsigned num_targets = dm_table_get_num_targets(map);
2357 struct dm_target *ti;
2358 struct request *clone;
2359
2360 md->barrier_error = 0;
2361
2362 for (i = 0; i < num_targets; i++) {
2363 ti = dm_table_get_target(map, i);
2364 for (j = 0; j < ti->num_flush_requests; j++) {
2365 clone = clone_rq(md->flush_request, md, GFP_NOIO);
2366 dm_rq_set_flush_nr(clone, j);
2367 atomic_inc(&md->pending[rq_data_dir(clone)]);
2368 map_request(ti, clone, md);
2369 }
2370 }
2371
2372 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2373 dm_table_put(map);
2374
2375 return md->barrier_error;
2376}
2377
2378static void dm_rq_barrier_work(struct work_struct *work)
2379{
2380 int error;
2381 struct mapped_device *md = container_of(work, struct mapped_device,
2382 barrier_work);
2383 struct request_queue *q = md->queue;
2384 struct request *rq;
2385 unsigned long flags;
2386
2387 /*
2388 * Hold the md reference here and leave it at the last part so that
2389 * the md can't be deleted by device opener when the barrier request
2390 * completes.
2391 */
2392 dm_get(md);
2393
2394 error = dm_rq_barrier(md);
2395
2396 rq = md->flush_request;
2397 md->flush_request = NULL;
2398
2399 if (error == DM_ENDIO_REQUEUE) {
2400 spin_lock_irqsave(q->queue_lock, flags);
2401 blk_requeue_request(q, rq);
2402 spin_unlock_irqrestore(q->queue_lock, flags);
2403 } else
2404 blk_end_request_all(rq, error);
2405
2406 blk_run_queue(q);
2407
2408 dm_put(md);
2409}
2410
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411/*
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002412 * Swap in a new table, returning the old one for the caller to destroy.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 */
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002414struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002416 struct dm_table *map = ERR_PTR(-EINVAL);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002417 struct queue_limits limits;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002418 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
Daniel Walkere61290a2008-02-08 02:10:08 +00002420 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
2422 /* device must be suspended */
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002423 if (!dm_suspended_md(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002424 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002426 r = dm_calculate_queue_limits(table, &limits);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002427 if (r) {
2428 map = ERR_PTR(r);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002429 goto out;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002430 }
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002431
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002432 /* cannot change the device type, once a table is bound */
2433 if (md->map &&
2434 (dm_table_get_type(md->map) != dm_table_get_type(table))) {
2435 DMWARN("can't change the device type after a table is bound");
2436 goto out;
2437 }
2438
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002439 map = __bind(md, table, &limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002441out:
Daniel Walkere61290a2008-02-08 02:10:08 +00002442 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002443 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444}
2445
2446/*
2447 * Functions to lock and unlock any filesystem running on the
2448 * device.
2449 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002450static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002452 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
2454 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002455
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002456 md->frozen_sb = freeze_bdev(md->bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002457 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002458 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002459 md->frozen_sb = NULL;
2460 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002461 }
2462
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002463 set_bit(DMF_FROZEN, &md->flags);
2464
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 return 0;
2466}
2467
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002468static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002470 if (!test_bit(DMF_FROZEN, &md->flags))
2471 return;
2472
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002473 thaw_bdev(md->bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002475 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476}
2477
2478/*
2479 * We need to be able to change a mapping table under a mounted
2480 * filesystem. For example we might want to move some data in
2481 * the background. Before the table can be swapped with
2482 * dm_bind_table, dm_suspend must be called to flush any in
2483 * flight bios and ensure that any further io gets deferred.
2484 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002485/*
2486 * Suspend mechanism in request-based dm.
2487 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002488 * 1. Flush all I/Os by lock_fs() if needed.
2489 * 2. Stop dispatching any I/O by stopping the request_queue.
2490 * 3. Wait for all in-flight I/Os to be completed or requeued.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002491 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002492 * To abort suspend, start the request_queue.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002493 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002494int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002496 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00002497 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002498 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002499 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
Daniel Walkere61290a2008-02-08 02:10:08 +00002501 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002502
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002503 if (dm_suspended_md(md)) {
Milan Broz73d410c2008-02-08 02:10:25 +00002504 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08002505 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00002506 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002508 map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002510 /*
2511 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2512 * This flag is cleared before dm_suspend returns.
2513 */
2514 if (noflush)
2515 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2516
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002517 /* This does not get reverted if there's an error later. */
2518 dm_table_presuspend_targets(map);
2519
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002520 /*
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002521 * Flush I/O to the device.
2522 * Any I/O submitted after lock_fs() may not be flushed.
2523 * noflush takes precedence over do_lockfs.
2524 * (lock_fs() flushes I/Os and waits for them to complete.)
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002525 */
2526 if (!noflush && do_lockfs) {
2527 r = lock_fs(md);
2528 if (r)
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01002529 goto out;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531
2532 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002533 * Here we must make sure that no processes are submitting requests
2534 * to target drivers i.e. no one may be executing
2535 * __split_and_process_bio. This is called from dm_request and
2536 * dm_wq_work.
2537 *
2538 * To get all processes out of __split_and_process_bio in dm_request,
2539 * we take the write lock. To prevent any process from reentering
2540 * __split_and_process_bio from dm_request, we set
2541 * DMF_QUEUE_IO_TO_THREAD.
2542 *
2543 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2544 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2545 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2546 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002548 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002549 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2550 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002551 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002553 /*
2554 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2555 * can be kicked until md->queue is stopped. So stop md->queue before
2556 * flushing md->wq.
2557 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002558 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002559 stop_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002560
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002561 flush_workqueue(md->wq);
2562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002564 * At this point no more requests are entering target request routines.
2565 * We call dm_wait_for_completion to wait for all existing requests
2566 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01002568 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002570 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002571 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01002572 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00002573 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002574
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00002576 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002577 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00002578
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002579 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002580 start_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002581
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002582 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002583 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002584 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002585
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002586 /*
2587 * If dm_wait_for_completion returned 0, the device is completely
2588 * quiescent now. There is no request-processing activity. All new
2589 * requests are being added to md->deferred list.
2590 */
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 set_bit(DMF_SUSPENDED, &md->flags);
2593
Kiyoshi Ueda4d4471c2009-12-10 23:52:26 +00002594 dm_table_postsuspend_targets(map);
2595
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002596out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08002598
2599out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00002600 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002601 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602}
2603
2604int dm_resume(struct mapped_device *md)
2605{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002606 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002607 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
Daniel Walkere61290a2008-02-08 02:10:08 +00002609 mutex_lock(&md->suspend_lock);
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002610 if (!dm_suspended_md(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002611 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002612
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002613 map = dm_get_live_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002614 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002615 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616
Milan Broz8757b772006-10-03 01:15:36 -07002617 r = dm_table_resume_targets(map);
2618 if (r)
2619 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002620
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002621 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002622
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002623 /*
2624 * Flushing deferred I/Os must be done after targets are resumed
2625 * so that mapping of targets can work correctly.
2626 * Request-based dm is queueing the deferred I/Os in its request_queue.
2627 */
2628 if (dm_request_based(md))
2629 start_queue(md->queue);
2630
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002631 unlock_fs(md);
2632
2633 clear_bit(DMF_SUSPENDED, &md->flags);
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 dm_table_unplug_all(map);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002636 r = 0;
2637out:
2638 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00002639 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002640
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002641 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642}
2643
2644/*-----------------------------------------------------------------
2645 * Event notification.
2646 *---------------------------------------------------------------*/
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002647int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
Milan Broz60935eb2009-06-22 10:12:30 +01002648 unsigned cookie)
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002649{
Milan Broz60935eb2009-06-22 10:12:30 +01002650 char udev_cookie[DM_COOKIE_LENGTH];
2651 char *envp[] = { udev_cookie, NULL };
2652
2653 if (!cookie)
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002654 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
Milan Broz60935eb2009-06-22 10:12:30 +01002655 else {
2656 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2657 DM_COOKIE_ENV_VAR_NAME, cookie);
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002658 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2659 action, envp);
Milan Broz60935eb2009-06-22 10:12:30 +01002660 }
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002661}
2662
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002663uint32_t dm_next_uevent_seq(struct mapped_device *md)
2664{
2665 return atomic_add_return(1, &md->uevent_seq);
2666}
2667
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668uint32_t dm_get_event_nr(struct mapped_device *md)
2669{
2670 return atomic_read(&md->event_nr);
2671}
2672
2673int dm_wait_event(struct mapped_device *md, int event_nr)
2674{
2675 return wait_event_interruptible(md->eventq,
2676 (event_nr != atomic_read(&md->event_nr)));
2677}
2678
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002679void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2680{
2681 unsigned long flags;
2682
2683 spin_lock_irqsave(&md->uevent_lock, flags);
2684 list_add(elist, &md->uevent_list);
2685 spin_unlock_irqrestore(&md->uevent_lock, flags);
2686}
2687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688/*
2689 * The gendisk is only valid as long as you have a reference
2690 * count on 'md'.
2691 */
2692struct gendisk *dm_disk(struct mapped_device *md)
2693{
2694 return md->disk;
2695}
2696
Milan Broz784aae72009-01-06 03:05:12 +00002697struct kobject *dm_kobject(struct mapped_device *md)
2698{
2699 return &md->kobj;
2700}
2701
2702/*
2703 * struct mapped_device should not be exported outside of dm.c
2704 * so use this check to verify that kobj is part of md structure
2705 */
2706struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2707{
2708 struct mapped_device *md;
2709
2710 md = container_of(kobj, struct mapped_device, kobj);
2711 if (&md->kobj != kobj)
2712 return NULL;
2713
Milan Broz4d89b7b2009-06-22 10:12:11 +01002714 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +00002715 dm_deleting_md(md))
Milan Broz4d89b7b2009-06-22 10:12:11 +01002716 return NULL;
2717
Milan Broz784aae72009-01-06 03:05:12 +00002718 dm_get(md);
2719 return md;
2720}
2721
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002722int dm_suspended_md(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723{
2724 return test_bit(DMF_SUSPENDED, &md->flags);
2725}
2726
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002727int dm_suspended(struct dm_target *ti)
2728{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002729 return dm_suspended_md(dm_table_get_md(ti->table));
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002730}
2731EXPORT_SYMBOL_GPL(dm_suspended);
2732
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002733int dm_noflush_suspending(struct dm_target *ti)
2734{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002735 return __noflush_suspending(dm_table_get_md(ti->table));
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002736}
2737EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2738
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002739struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2740{
2741 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2742
2743 if (!pools)
2744 return NULL;
2745
2746 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2747 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2748 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2749 if (!pools->io_pool)
2750 goto free_pools_and_out;
2751
2752 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2753 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2754 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2755 if (!pools->tio_pool)
2756 goto free_io_pool_and_out;
2757
2758 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2759 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2760 if (!pools->bs)
2761 goto free_tio_pool_and_out;
2762
2763 return pools;
2764
2765free_tio_pool_and_out:
2766 mempool_destroy(pools->tio_pool);
2767
2768free_io_pool_and_out:
2769 mempool_destroy(pools->io_pool);
2770
2771free_pools_and_out:
2772 kfree(pools);
2773
2774 return NULL;
2775}
2776
2777void dm_free_md_mempools(struct dm_md_mempools *pools)
2778{
2779 if (!pools)
2780 return;
2781
2782 if (pools->io_pool)
2783 mempool_destroy(pools->io_pool);
2784
2785 if (pools->tio_pool)
2786 mempool_destroy(pools->tio_pool);
2787
2788 if (pools->bs)
2789 bioset_free(pools->bs);
2790
2791 kfree(pools);
2792}
2793
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07002794static const struct block_device_operations dm_blk_dops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 .open = dm_blk_open,
2796 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07002797 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002798 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 .owner = THIS_MODULE
2800};
2801
2802EXPORT_SYMBOL(dm_get_mapinfo);
2803
2804/*
2805 * module hooks
2806 */
2807module_init(dm_init);
2808module_exit(dm_exit);
2809
2810module_param(major, uint, 0);
2811MODULE_PARM_DESC(major, "The major number of the device mapper");
2812MODULE_DESCRIPTION(DM_NAME " driver");
2813MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2814MODULE_LICENSE("GPL");