blob: aa4e2aa86d490c5aa1af423d23c176ed69c25302 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +01009#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <linux/init.h>
12#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080013#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080021#include <linux/hdreg.h>
Li Zefan55782132009-06-09 13:43:05 +080022
23#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025#define DM_MSG_PREFIX "core"
26
Milan Broz60935eb2009-06-22 10:12:30 +010027/*
28 * Cookies are numeric values sent with CHANGE and REMOVE
29 * uevents while resuming, removing or renaming the device.
30 */
31#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
32#define DM_COOKIE_LENGTH 24
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034static const char *_name = DM_NAME;
35
36static unsigned int major = 0;
37static unsigned int _major = 0;
38
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070039static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000041 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 * One of these is allocated per bio.
43 */
44struct dm_io {
45 struct mapped_device *md;
46 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010048 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080049 unsigned long start_time;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +010050 spinlock_t endio_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051};
52
53/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000054 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 * One of these is allocated per target within a bio. Hopefully
56 * this will be simplified out one day.
57 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010058struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 struct dm_io *io;
60 struct dm_target *ti;
61 union map_info info;
62};
63
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000064/*
65 * For request-based dm.
66 * One of these is allocated per request.
67 */
68struct dm_rq_target_io {
69 struct mapped_device *md;
70 struct dm_target *ti;
71 struct request *orig, clone;
72 int error;
73 union map_info info;
74};
75
76/*
77 * For request-based dm.
78 * One of these is allocated per bio.
79 */
80struct dm_rq_clone_bio_info {
81 struct bio *orig;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010082 struct dm_rq_target_io *tio;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000083};
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085union map_info *dm_get_mapinfo(struct bio *bio)
86{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070087 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010088 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070089 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010092union map_info *dm_get_rq_mapinfo(struct request *rq)
93{
94 if (rq && rq->end_io_data)
95 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
96 return NULL;
97}
98EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
99
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -0700100#define MINOR_ALLOCED ((void *)-1)
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/*
103 * Bits for the md->flags field.
104 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100105#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -0800107#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700108#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700109#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800110#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100111#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Milan Broz304f3f62008-02-08 02:11:17 +0000113/*
114 * Work processed by per-device workqueue.
115 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700117 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000118 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 rwlock_t map_lock;
120 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700121 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 unsigned long flags;
124
Jens Axboe165125e2007-07-24 09:28:11 +0200125 struct request_queue *queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800127 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 void *interface_ptr;
130
131 /*
132 * A list of ios that arrived while we were suspended.
133 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200134 atomic_t pending[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100136 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800137 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100138 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140 /*
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100141 * An error from the barrier request currently being processed.
142 */
143 int barrier_error;
144
145 /*
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000146 * Protect barrier_error from concurrent endio processing
147 * in request-based dm.
148 */
149 spinlock_t barrier_error_lock;
150
151 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000152 * Processing queue (flush/barriers)
153 */
154 struct workqueue_struct *wq;
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000155 struct work_struct barrier_work;
156
157 /* A pointer to the currently processing pre/post flush request */
158 struct request *flush_request;
Milan Broz304f3f62008-02-08 02:11:17 +0000159
160 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 * The current mapping.
162 */
163 struct dm_table *map;
164
165 /*
166 * io objects are allocated from here.
167 */
168 mempool_t *io_pool;
169 mempool_t *tio_pool;
170
Stefan Bader9faf4002006-10-03 01:15:41 -0700171 struct bio_set *bs;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /*
174 * Event handling.
175 */
176 atomic_t event_nr;
177 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100178 atomic_t uevent_seq;
179 struct list_head uevent_list;
180 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 /*
183 * freeze/thaw support require holding onto a super block
184 */
185 struct super_block *frozen_sb;
Mikulas Patockadb8fef42009-06-22 10:12:15 +0100186 struct block_device *bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800187
188 /* forced geometry settings */
189 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000190
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100191 /* For saving the address of __make_request for request based dm */
192 make_request_fn *saved_make_request_fn;
193
Milan Broz784aae72009-01-06 03:05:12 +0000194 /* sysfs handle */
195 struct kobject kobj;
Mikulas Patocka52b1fd52009-06-22 10:12:21 +0100196
197 /* zero-length barrier that will be cloned and submitted to targets */
198 struct bio barrier_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199};
200
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100201/*
202 * For mempools pre-allocation at the table loading time.
203 */
204struct dm_md_mempools {
205 mempool_t *io_pool;
206 mempool_t *tio_pool;
207 struct bio_set *bs;
208};
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800211static struct kmem_cache *_io_cache;
212static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000213static struct kmem_cache *_rq_tio_cache;
214static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216static int __init local_init(void)
217{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100218 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100221 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100223 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100226 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100227 if (!_tio_cache)
228 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000230 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
231 if (!_rq_tio_cache)
232 goto out_free_tio_cache;
233
234 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
235 if (!_rq_bio_info_cache)
236 goto out_free_rq_tio_cache;
237
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100238 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100239 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000240 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 _major = major;
243 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100244 if (r < 0)
245 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 if (!_major)
248 _major = r;
249
250 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100251
252out_uevent_exit:
253 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000254out_free_rq_bio_info_cache:
255 kmem_cache_destroy(_rq_bio_info_cache);
256out_free_rq_tio_cache:
257 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100258out_free_tio_cache:
259 kmem_cache_destroy(_tio_cache);
260out_free_io_cache:
261 kmem_cache_destroy(_io_cache);
262
263 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static void local_exit(void)
267{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000268 kmem_cache_destroy(_rq_bio_info_cache);
269 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 kmem_cache_destroy(_tio_cache);
271 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700272 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100273 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
275 _major = 0;
276
277 DMINFO("cleaned up");
278}
279
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000280static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 local_init,
282 dm_target_init,
283 dm_linear_init,
284 dm_stripe_init,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000285 dm_io_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100286 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 dm_interface_init,
288};
289
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000290static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 local_exit,
292 dm_target_exit,
293 dm_linear_exit,
294 dm_stripe_exit,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000295 dm_io_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100296 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 dm_interface_exit,
298};
299
300static int __init dm_init(void)
301{
302 const int count = ARRAY_SIZE(_inits);
303
304 int r, i;
305
306 for (i = 0; i < count; i++) {
307 r = _inits[i]();
308 if (r)
309 goto bad;
310 }
311
312 return 0;
313
314 bad:
315 while (i--)
316 _exits[i]();
317
318 return r;
319}
320
321static void __exit dm_exit(void)
322{
323 int i = ARRAY_SIZE(_exits);
324
325 while (i--)
326 _exits[i]();
327}
328
329/*
330 * Block device functions
331 */
Mike Anderson432a2122009-12-10 23:52:20 +0000332int dm_deleting_md(struct mapped_device *md)
333{
334 return test_bit(DMF_DELETING, &md->flags);
335}
336
Al Virofe5f9f22008-03-02 10:29:31 -0500337static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 struct mapped_device *md;
340
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700341 spin_lock(&_minor_lock);
342
Al Virofe5f9f22008-03-02 10:29:31 -0500343 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700344 if (!md)
345 goto out;
346
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700347 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +0000348 dm_deleting_md(md)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700349 md = NULL;
350 goto out;
351 }
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700354 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700355
356out:
357 spin_unlock(&_minor_lock);
358
359 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360}
361
Al Virofe5f9f22008-03-02 10:29:31 -0500362static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
Al Virofe5f9f22008-03-02 10:29:31 -0500364 struct mapped_device *md = disk->private_data;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700365 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 dm_put(md);
367 return 0;
368}
369
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700370int dm_open_count(struct mapped_device *md)
371{
372 return atomic_read(&md->open_count);
373}
374
375/*
376 * Guarantees nothing is using the device before it's deleted.
377 */
378int dm_lock_for_deletion(struct mapped_device *md)
379{
380 int r = 0;
381
382 spin_lock(&_minor_lock);
383
384 if (dm_open_count(md))
385 r = -EBUSY;
386 else
387 set_bit(DMF_DELETING, &md->flags);
388
389 spin_unlock(&_minor_lock);
390
391 return r;
392}
393
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800394static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
395{
396 struct mapped_device *md = bdev->bd_disk->private_data;
397
398 return dm_get_geometry(md, geo);
399}
400
Al Virofe5f9f22008-03-02 10:29:31 -0500401static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700402 unsigned int cmd, unsigned long arg)
403{
Al Virofe5f9f22008-03-02 10:29:31 -0500404 struct mapped_device *md = bdev->bd_disk->private_data;
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000405 struct dm_table *map = dm_get_live_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700406 struct dm_target *tgt;
407 int r = -ENOTTY;
408
Milan Brozaa129a22006-10-03 01:15:15 -0700409 if (!map || !dm_table_get_size(map))
410 goto out;
411
412 /* We only support devices that have a single target */
413 if (dm_table_get_num_targets(map) != 1)
414 goto out;
415
416 tgt = dm_table_get_target(map, 0);
417
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +0000418 if (dm_suspended_md(md)) {
Milan Brozaa129a22006-10-03 01:15:15 -0700419 r = -EAGAIN;
420 goto out;
421 }
422
423 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400424 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700425
426out:
427 dm_table_put(map);
428
Milan Brozaa129a22006-10-03 01:15:15 -0700429 return r;
430}
431
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100432static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 return mempool_alloc(md->io_pool, GFP_NOIO);
435}
436
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100437static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438{
439 mempool_free(io, md->io_pool);
440}
441
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100442static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
444 mempool_free(tio, md->tio_pool);
445}
446
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000447static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
448 gfp_t gfp_mask)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100449{
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000450 return mempool_alloc(md->tio_pool, gfp_mask);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100451}
452
453static void free_rq_tio(struct dm_rq_target_io *tio)
454{
455 mempool_free(tio, tio->md->tio_pool);
456}
457
458static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
459{
460 return mempool_alloc(md->io_pool, GFP_ATOMIC);
461}
462
463static void free_bio_info(struct dm_rq_clone_bio_info *info)
464{
465 mempool_free(info, info->tio->md->io_pool);
466}
467
Kiyoshi Ueda90abb8c2009-12-10 23:52:13 +0000468static int md_in_flight(struct mapped_device *md)
469{
470 return atomic_read(&md->pending[READ]) +
471 atomic_read(&md->pending[WRITE]);
472}
473
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800474static void start_io_acct(struct dm_io *io)
475{
476 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900477 int cpu;
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200478 int rw = bio_data_dir(io->bio);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800479
480 io->start_time = jiffies;
481
Tejun Heo074a7ac2008-08-25 19:56:14 +0900482 cpu = part_stat_lock();
483 part_round_stats(cpu, &dm_disk(md)->part0);
484 part_stat_unlock();
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200485 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800486}
487
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000488static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800489{
490 struct mapped_device *md = io->md;
491 struct bio *bio = io->bio;
492 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900493 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800494 int rw = bio_data_dir(bio);
495
Tejun Heo074a7ac2008-08-25 19:56:14 +0900496 cpu = part_stat_lock();
497 part_round_stats(cpu, &dm_disk(md)->part0);
498 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
499 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800500
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100501 /*
502 * After this is decremented the bio must not be touched if it is
503 * a barrier.
504 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200505 dm_disk(md)->part0.in_flight[rw] = pending =
506 atomic_dec_return(&md->pending[rw]);
507 pending += atomic_read(&md->pending[rw^0x1]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800508
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000509 /* nudge anyone waiting on suspend queue */
510 if (!pending)
511 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800512}
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514/*
515 * Add the bio to the list of deferred io.
516 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100517static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700519 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
Mikulas Patocka022c2612009-04-02 19:55:39 +0100521 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100523 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Mikulas Patocka92c63902009-04-09 00:27:15 +0100525 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
526 queue_work(md->wq, &md->work);
527
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700528 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529}
530
531/*
532 * Everyone (including functions in this file), should use this
533 * function to access the md->map field, and make sure they call
534 * dm_table_put() when finished.
535 */
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000536struct dm_table *dm_get_live_table(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
538 struct dm_table *t;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100539 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100541 read_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 t = md->map;
543 if (t)
544 dm_table_get(t);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100545 read_unlock_irqrestore(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 return t;
548}
549
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800550/*
551 * Get the geometry associated with a dm device
552 */
553int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
554{
555 *geo = md->geometry;
556
557 return 0;
558}
559
560/*
561 * Set the geometry of a device.
562 */
563int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
564{
565 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
566
567 if (geo->start > sz) {
568 DMWARN("Start sector is beyond the geometry limits.");
569 return -EINVAL;
570 }
571
572 md->geometry = *geo;
573
574 return 0;
575}
576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577/*-----------------------------------------------------------------
578 * CRUD START:
579 * A more elegant soln is in the works that uses the queue
580 * merge fn, unfortunately there are a couple of changes to
581 * the block layer that I want to make for this. So in the
582 * interests of getting something for people to use I give
583 * you this clearly demarcated crap.
584 *---------------------------------------------------------------*/
585
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800586static int __noflush_suspending(struct mapped_device *md)
587{
588 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
589}
590
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591/*
592 * Decrements the number of outstanding ios that a bio has been
593 * cloned into, completing the original io if necc.
594 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800595static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800597 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000598 int io_error;
599 struct bio *bio;
600 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800601
602 /* Push-back supersedes any I/O errors */
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100603 if (unlikely(error)) {
604 spin_lock_irqsave(&io->endio_lock, flags);
605 if (!(io->error > 0 && __noflush_suspending(md)))
606 io->error = error;
607 spin_unlock_irqrestore(&io->endio_lock, flags);
608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800611 if (io->error == DM_ENDIO_REQUEUE) {
612 /*
613 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800614 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100615 spin_lock_irqsave(&md->deferred_lock, flags);
Mikulas Patocka2761e952009-06-22 10:12:18 +0100616 if (__noflush_suspending(md)) {
Jens Axboe1f98a132009-09-11 14:32:04 +0200617 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
Mikulas Patocka2761e952009-06-22 10:12:18 +0100618 bio_list_add_head(&md->deferred,
619 io->bio);
620 } else
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800621 /* noflush suspend was interrupted. */
622 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100623 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800624 }
625
Milan Brozb35f8ca2009-03-16 17:44:36 +0000626 io_error = io->error;
627 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100628
Jens Axboe1f98a132009-09-11 14:32:04 +0200629 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100630 /*
631 * There can be just one barrier request so we use
632 * a per-device variable for error reporting.
633 * Note that you can't touch the bio after end_io_acct
634 */
Mikulas Patockafdb95722009-06-22 10:12:19 +0100635 if (!md->barrier_error && io_error != -EOPNOTSUPP)
Mikulas Patocka5aa27812009-06-22 10:12:18 +0100636 md->barrier_error = io_error;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100637 end_io_acct(io);
638 } else {
639 end_io_acct(io);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000640
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100641 if (io_error != DM_ENDIO_REQUEUE) {
642 trace_block_bio_complete(md->queue, bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000643
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100644 bio_endio(bio, io_error);
645 }
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800646 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100647
648 free_io(md, io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 }
650}
651
NeilBrown6712ecf2007-09-27 12:47:43 +0200652static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653{
654 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100655 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000656 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700657 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 dm_endio_fn endio = tio->ti->type->end_io;
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
661 error = -EIO;
662
663 if (endio) {
664 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800665 if (r < 0 || r == DM_ENDIO_REQUEUE)
666 /*
667 * error and requeue request are handled
668 * in dec_pending().
669 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800671 else if (r == DM_ENDIO_INCOMPLETE)
672 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200673 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800674 else if (r) {
675 DMWARN("unimplemented target endio return value: %d", r);
676 BUG();
677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679
Stefan Bader9faf4002006-10-03 01:15:41 -0700680 /*
681 * Store md for cleanup instead of tio which is about to get freed.
682 */
683 bio->bi_private = md->bs;
684
Stefan Bader9faf4002006-10-03 01:15:41 -0700685 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000686 bio_put(bio);
687 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688}
689
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100690/*
691 * Partial completion handling for request-based dm
692 */
693static void end_clone_bio(struct bio *clone, int error)
694{
695 struct dm_rq_clone_bio_info *info = clone->bi_private;
696 struct dm_rq_target_io *tio = info->tio;
697 struct bio *bio = info->orig;
698 unsigned int nr_bytes = info->orig->bi_size;
699
700 bio_put(clone);
701
702 if (tio->error)
703 /*
704 * An error has already been detected on the request.
705 * Once error occurred, just let clone->end_io() handle
706 * the remainder.
707 */
708 return;
709 else if (error) {
710 /*
711 * Don't notice the error to the upper layer yet.
712 * The error handling decision is made by the target driver,
713 * when the request is completed.
714 */
715 tio->error = error;
716 return;
717 }
718
719 /*
720 * I/O for the bio successfully completed.
721 * Notice the data completion to the upper layer.
722 */
723
724 /*
725 * bios are processed from the head of the list.
726 * So the completing bio should always be rq->bio.
727 * If it's not, something wrong is happening.
728 */
729 if (tio->orig->bio != bio)
730 DMERR("bio completion is going in the middle of the request");
731
732 /*
733 * Update the original request.
734 * Do not use blk_end_request() here, because it may complete
735 * the original request before the clone, and break the ordering.
736 */
737 blk_update_request(tio->orig, 0, nr_bytes);
738}
739
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000740static void store_barrier_error(struct mapped_device *md, int error)
741{
742 unsigned long flags;
743
744 spin_lock_irqsave(&md->barrier_error_lock, flags);
745 /*
746 * Basically, the first error is taken, but:
747 * -EOPNOTSUPP supersedes any I/O error.
748 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
749 */
750 if (!md->barrier_error || error == -EOPNOTSUPP ||
751 (md->barrier_error != -EOPNOTSUPP &&
752 error == DM_ENDIO_REQUEUE))
753 md->barrier_error = error;
754 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
755}
756
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100757/*
758 * Don't touch any member of the md after calling this function because
759 * the md may be freed in dm_put() at the end of this function.
760 * Or do dm_get() before calling this function and dm_put() later.
761 */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000762static void rq_completed(struct mapped_device *md, int rw, int run_queue)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100763{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000764 atomic_dec(&md->pending[rw]);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100765
766 /* nudge anyone waiting on suspend queue */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000767 if (!md_in_flight(md))
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100768 wake_up(&md->wait);
769
770 if (run_queue)
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000771 blk_run_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100772
773 /*
774 * dm_put() must be at the end of this function. See the comment above
775 */
776 dm_put(md);
777}
778
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100779static void free_rq_clone(struct request *clone)
780{
781 struct dm_rq_target_io *tio = clone->end_io_data;
782
783 blk_rq_unprep_clone(clone);
784 free_rq_tio(tio);
785}
786
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000787/*
788 * Complete the clone and the original request.
789 * Must be called without queue lock.
790 */
791static void dm_end_request(struct request *clone, int error)
792{
793 int rw = rq_data_dir(clone);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000794 int run_queue = 1;
795 bool is_barrier = blk_barrier_rq(clone);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000796 struct dm_rq_target_io *tio = clone->end_io_data;
797 struct mapped_device *md = tio->md;
798 struct request *rq = tio->orig;
799
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000800 if (blk_pc_request(rq) && !is_barrier) {
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000801 rq->errors = clone->errors;
802 rq->resid_len = clone->resid_len;
803
804 if (rq->sense)
805 /*
806 * We are using the sense buffer of the original
807 * request.
808 * So setting the length of the sense data is enough.
809 */
810 rq->sense_len = clone->sense_len;
811 }
812
813 free_rq_clone(clone);
814
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000815 if (unlikely(is_barrier)) {
816 if (unlikely(error))
817 store_barrier_error(md, error);
818 run_queue = 0;
819 } else
820 blk_end_request_all(rq, error);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000821
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000822 rq_completed(md, rw, run_queue);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000823}
824
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100825static void dm_unprep_request(struct request *rq)
826{
827 struct request *clone = rq->special;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100828
829 rq->special = NULL;
830 rq->cmd_flags &= ~REQ_DONTPREP;
831
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100832 free_rq_clone(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100833}
834
835/*
836 * Requeue the original request of a clone.
837 */
838void dm_requeue_unmapped_request(struct request *clone)
839{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000840 int rw = rq_data_dir(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100841 struct dm_rq_target_io *tio = clone->end_io_data;
842 struct mapped_device *md = tio->md;
843 struct request *rq = tio->orig;
844 struct request_queue *q = rq->q;
845 unsigned long flags;
846
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000847 if (unlikely(blk_barrier_rq(clone))) {
848 /*
849 * Barrier clones share an original request.
850 * Leave it to dm_end_request(), which handles this special
851 * case.
852 */
853 dm_end_request(clone, DM_ENDIO_REQUEUE);
854 return;
855 }
856
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100857 dm_unprep_request(rq);
858
859 spin_lock_irqsave(q->queue_lock, flags);
860 if (elv_queue_empty(q))
861 blk_plug_device(q);
862 blk_requeue_request(q, rq);
863 spin_unlock_irqrestore(q->queue_lock, flags);
864
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000865 rq_completed(md, rw, 0);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100866}
867EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
868
869static void __stop_queue(struct request_queue *q)
870{
871 blk_stop_queue(q);
872}
873
874static void stop_queue(struct request_queue *q)
875{
876 unsigned long flags;
877
878 spin_lock_irqsave(q->queue_lock, flags);
879 __stop_queue(q);
880 spin_unlock_irqrestore(q->queue_lock, flags);
881}
882
883static void __start_queue(struct request_queue *q)
884{
885 if (blk_queue_stopped(q))
886 blk_start_queue(q);
887}
888
889static void start_queue(struct request_queue *q)
890{
891 unsigned long flags;
892
893 spin_lock_irqsave(q->queue_lock, flags);
894 __start_queue(q);
895 spin_unlock_irqrestore(q->queue_lock, flags);
896}
897
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000898static void dm_done(struct request *clone, int error, bool mapped)
899{
900 int r = error;
901 struct dm_rq_target_io *tio = clone->end_io_data;
902 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
903
904 if (mapped && rq_end_io)
905 r = rq_end_io(tio->ti, clone, error, &tio->info);
906
907 if (r <= 0)
908 /* The target wants to complete the I/O */
909 dm_end_request(clone, r);
910 else if (r == DM_ENDIO_INCOMPLETE)
911 /* The target will handle the I/O */
912 return;
913 else if (r == DM_ENDIO_REQUEUE)
914 /* The target wants to requeue the I/O */
915 dm_requeue_unmapped_request(clone);
916 else {
917 DMWARN("unimplemented target endio return value: %d", r);
918 BUG();
919 }
920}
921
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100922/*
923 * Request completion handler for request-based dm
924 */
925static void dm_softirq_done(struct request *rq)
926{
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000927 bool mapped = true;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100928 struct request *clone = rq->completion_data;
929 struct dm_rq_target_io *tio = clone->end_io_data;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100930
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000931 if (rq->cmd_flags & REQ_FAILED)
932 mapped = false;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100933
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000934 dm_done(clone, tio->error, mapped);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100935}
936
937/*
938 * Complete the clone and the original request with the error status
939 * through softirq context.
940 */
941static void dm_complete_request(struct request *clone, int error)
942{
943 struct dm_rq_target_io *tio = clone->end_io_data;
944 struct request *rq = tio->orig;
945
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000946 if (unlikely(blk_barrier_rq(clone))) {
947 /*
948 * Barrier clones share an original request. So can't use
949 * softirq_done with the original.
950 * Pass the clone to dm_done() directly in this special case.
951 * It is safe (even if clone->q->queue_lock is held here)
952 * because there is no I/O dispatching during the completion
953 * of barrier clone.
954 */
955 dm_done(clone, error, true);
956 return;
957 }
958
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100959 tio->error = error;
960 rq->completion_data = clone;
961 blk_complete_request(rq);
962}
963
964/*
965 * Complete the not-mapped clone and the original request with the error status
966 * through softirq context.
967 * Target's rq_end_io() function isn't called.
968 * This may be used when the target's map_rq() function fails.
969 */
970void dm_kill_unmapped_request(struct request *clone, int error)
971{
972 struct dm_rq_target_io *tio = clone->end_io_data;
973 struct request *rq = tio->orig;
974
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000975 if (unlikely(blk_barrier_rq(clone))) {
976 /*
977 * Barrier clones share an original request.
978 * Leave it to dm_end_request(), which handles this special
979 * case.
980 */
981 BUG_ON(error > 0);
982 dm_end_request(clone, error);
983 return;
984 }
985
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100986 rq->cmd_flags |= REQ_FAILED;
987 dm_complete_request(clone, error);
988}
989EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
990
991/*
992 * Called with the queue lock held
993 */
994static void end_clone_request(struct request *clone, int error)
995{
996 /*
997 * For just cleaning up the information of the queue in which
998 * the clone was dispatched.
999 * The clone is *NOT* freed actually here because it is alloced from
1000 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1001 */
1002 __blk_put_request(clone->q, clone);
1003
1004 /*
1005 * Actual request completion is done in a softirq context which doesn't
1006 * hold the queue lock. Otherwise, deadlock could occur because:
1007 * - another request may be submitted by the upper level driver
1008 * of the stacking during the completion
1009 * - the submission which requires queue lock may be done
1010 * against this queue
1011 */
1012 dm_complete_request(clone, error);
1013}
1014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015static sector_t max_io_len(struct mapped_device *md,
1016 sector_t sector, struct dm_target *ti)
1017{
1018 sector_t offset = sector - ti->begin;
1019 sector_t len = ti->len - offset;
1020
1021 /*
1022 * Does the target need to split even further ?
1023 */
1024 if (ti->split_io) {
1025 sector_t boundary;
1026 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
1027 - offset;
1028 if (len > boundary)
1029 len = boundary;
1030 }
1031
1032 return len;
1033}
1034
1035static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001036 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
1038 int r;
Jens Axboe2056a782006-03-23 20:00:26 +01001039 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -07001040 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 clone->bi_end_io = clone_endio;
1043 clone->bi_private = tio;
1044
1045 /*
1046 * Map the clone. If r == 0 we don't need to do
1047 * anything, the target has assumed ownership of
1048 * this io.
1049 */
1050 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +01001051 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001053 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +01001055
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +01001056 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -04001057 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +01001058
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001060 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1061 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -07001062 md = tio->io->md;
1063 dec_pending(tio->io, r);
1064 /*
1065 * Store bio_set for cleanup.
1066 */
1067 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -07001069 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001070 } else if (r) {
1071 DMWARN("unimplemented target map return value: %d", r);
1072 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 }
1074}
1075
1076struct clone_info {
1077 struct mapped_device *md;
1078 struct dm_table *map;
1079 struct bio *bio;
1080 struct dm_io *io;
1081 sector_t sector;
1082 sector_t sector_count;
1083 unsigned short idx;
1084};
1085
Peter Osterlund36763472005-09-06 15:16:42 -07001086static void dm_bio_destructor(struct bio *bio)
1087{
Stefan Bader9faf4002006-10-03 01:15:41 -07001088 struct bio_set *bs = bio->bi_private;
1089
1090 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001091}
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/*
1094 * Creates a little bio that is just does part of a bvec.
1095 */
1096static struct bio *split_bvec(struct bio *bio, sector_t sector,
1097 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -07001098 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099{
1100 struct bio *clone;
1101 struct bio_vec *bv = bio->bi_io_vec + idx;
1102
Stefan Bader9faf4002006-10-03 01:15:41 -07001103 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001104 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 *clone->bi_io_vec = *bv;
1106
1107 clone->bi_sector = sector;
1108 clone->bi_bdev = bio->bi_bdev;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001109 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 clone->bi_vcnt = 1;
1111 clone->bi_size = to_bytes(len);
1112 clone->bi_io_vec->bv_offset = offset;
1113 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +01001114 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Martin K. Petersen9c470082009-04-09 00:27:12 +01001116 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001117 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001118 bio_integrity_trim(clone,
1119 bio_sector_offset(bio, idx, offset), len);
1120 }
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 return clone;
1123}
1124
1125/*
1126 * Creates a bio that consists of range of complete bvecs.
1127 */
1128static struct bio *clone_bio(struct bio *bio, sector_t sector,
1129 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -07001130 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
1132 struct bio *clone;
1133
Stefan Bader9faf4002006-10-03 01:15:41 -07001134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1135 __bio_clone(clone, bio);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001136 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
Stefan Bader9faf4002006-10-03 01:15:41 -07001137 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 clone->bi_sector = sector;
1139 clone->bi_idx = idx;
1140 clone->bi_vcnt = idx + bv_count;
1141 clone->bi_size = to_bytes(len);
1142 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1143
Martin K. Petersen9c470082009-04-09 00:27:12 +01001144 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001145 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001146
1147 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1148 bio_integrity_trim(clone,
1149 bio_sector_offset(bio, idx, 0), len);
1150 }
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 return clone;
1153}
1154
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001155static struct dm_target_io *alloc_tio(struct clone_info *ci,
1156 struct dm_target *ti)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001157{
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001158 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001159
1160 tio->io = ci->io;
1161 tio->ti = ti;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001162 memset(&tio->info, 0, sizeof(tio->info));
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001163
1164 return tio;
1165}
1166
1167static void __flush_target(struct clone_info *ci, struct dm_target *ti,
1168 unsigned flush_nr)
1169{
1170 struct dm_target_io *tio = alloc_tio(ci, ti);
1171 struct bio *clone;
1172
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001173 tio->info.flush_request = flush_nr;
1174
1175 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1176 __bio_clone(clone, ci->bio);
1177 clone->bi_destructor = dm_bio_destructor;
1178
1179 __map_bio(ti, clone, tio);
1180}
1181
1182static int __clone_and_map_empty_barrier(struct clone_info *ci)
1183{
1184 unsigned target_nr = 0, flush_nr;
1185 struct dm_target *ti;
1186
1187 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1188 for (flush_nr = 0; flush_nr < ti->num_flush_requests;
1189 flush_nr++)
1190 __flush_target(ci, ti, flush_nr);
1191
1192 ci->sector_count = 0;
1193
1194 return 0;
1195}
1196
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001197static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
1199 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001200 struct dm_target *ti;
1201 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001202 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001204 if (unlikely(bio_empty_barrier(bio)))
1205 return __clone_and_map_empty_barrier(ci);
1206
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001207 ti = dm_table_find_target(ci->map, ci->sector);
1208 if (!dm_target_is_valid(ti))
1209 return -EIO;
1210
1211 max = max_io_len(ci->md, ci->sector, ti);
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 /*
1214 * Allocate a target io object.
1215 */
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001216 tio = alloc_tio(ci, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
1218 if (ci->sector_count <= max) {
1219 /*
1220 * Optimise for the simple case where we can do all of
1221 * the remaining io with a single clone.
1222 */
1223 clone = clone_bio(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001224 bio->bi_vcnt - ci->idx, ci->sector_count,
1225 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 __map_bio(ti, clone, tio);
1227 ci->sector_count = 0;
1228
1229 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1230 /*
1231 * There are some bvecs that don't span targets.
1232 * Do as many of these as possible.
1233 */
1234 int i;
1235 sector_t remaining = max;
1236 sector_t bv_len;
1237
1238 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1239 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1240
1241 if (bv_len > remaining)
1242 break;
1243
1244 remaining -= bv_len;
1245 len += bv_len;
1246 }
1247
Stefan Bader9faf4002006-10-03 01:15:41 -07001248 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1249 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 __map_bio(ti, clone, tio);
1251
1252 ci->sector += len;
1253 ci->sector_count -= len;
1254 ci->idx = i;
1255
1256 } else {
1257 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001258 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 */
1260 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001261 sector_t remaining = to_sector(bv->bv_len);
1262 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001264 do {
1265 if (offset) {
1266 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001267 if (!dm_target_is_valid(ti))
1268 return -EIO;
1269
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001270 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001272 tio = alloc_tio(ci, ti);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001275 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001277 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001278 bv->bv_offset + offset, len,
1279 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001280
1281 __map_bio(ti, clone, tio);
1282
1283 ci->sector += len;
1284 ci->sector_count -= len;
1285 offset += to_bytes(len);
1286 } while (remaining -= len);
1287
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 ci->idx++;
1289 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001290
1291 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292}
1293
1294/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +01001295 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001297static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298{
1299 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001300 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001302 ci.map = dm_get_live_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001303 if (unlikely(!ci.map)) {
Jens Axboe1f98a132009-09-11 14:32:04 +02001304 if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001305 bio_io_error(bio);
1306 else
Mikulas Patocka5aa27812009-06-22 10:12:18 +01001307 if (!md->barrier_error)
1308 md->barrier_error = -EIO;
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001309 return;
1310 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +01001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 ci.md = md;
1313 ci.bio = bio;
1314 ci.io = alloc_io(md);
1315 ci.io->error = 0;
1316 atomic_set(&ci.io->io_count, 1);
1317 ci.io->bio = bio;
1318 ci.io->md = md;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +01001319 spin_lock_init(&ci.io->endio_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 ci.sector = bio->bi_sector;
1321 ci.sector_count = bio_sectors(bio);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001322 if (unlikely(bio_empty_barrier(bio)))
1323 ci.sector_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 ci.idx = bio->bi_idx;
1325
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -08001326 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001327 while (ci.sector_count && !error)
1328 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
1330 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001331 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 dm_table_put(ci.map);
1333}
1334/*-----------------------------------------------------------------
1335 * CRUD END
1336 *---------------------------------------------------------------*/
1337
Milan Brozf6fccb12008-07-21 12:00:37 +01001338static int dm_merge_bvec(struct request_queue *q,
1339 struct bvec_merge_data *bvm,
1340 struct bio_vec *biovec)
1341{
1342 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001343 struct dm_table *map = dm_get_live_table(md);
Milan Brozf6fccb12008-07-21 12:00:37 +01001344 struct dm_target *ti;
1345 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +01001346 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001347
1348 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +01001349 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +01001350
1351 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001352 if (!dm_target_is_valid(ti))
1353 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +01001354
1355 /*
1356 * Find maximum amount of I/O that won't need splitting
1357 */
1358 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1359 (sector_t) BIO_MAX_SECTORS);
1360 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1361 if (max_size < 0)
1362 max_size = 0;
1363
1364 /*
1365 * merge_bvec_fn() returns number of bytes
1366 * it can accept at this offset
1367 * max is precomputed maximal io size
1368 */
1369 if (max_size && ti->type->merge)
1370 max_size = ti->type->merge(ti, bvm, biovec, max_size);
Mikulas Patocka8cbeb672009-06-22 10:12:14 +01001371 /*
1372 * If the target doesn't support merge method and some of the devices
1373 * provided their merge_bvec method (we know this by looking at
1374 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1375 * entries. So always set max_size to 0, and the code below allows
1376 * just one page.
1377 */
1378 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1379
1380 max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001381
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001382out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +01001383 dm_table_put(map);
1384
1385out:
Milan Brozf6fccb12008-07-21 12:00:37 +01001386 /*
1387 * Always allow an entire first page
1388 */
1389 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1390 max_size = biovec->bv_len;
1391
Milan Brozf6fccb12008-07-21 12:00:37 +01001392 return max_size;
1393}
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395/*
1396 * The request function that just remaps the bio built up by
1397 * dm_merge_bvec.
1398 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001399static int _dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400{
Kevin Corry12f03a42006-02-01 03:04:52 -08001401 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +09001403 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001405 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
Tejun Heo074a7ac2008-08-25 19:56:14 +09001407 cpu = part_stat_lock();
1408 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1409 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1410 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -08001411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001413 * If we're suspended or the thread is processing barriers
1414 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 */
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
Jens Axboe1f98a132009-09-11 14:32:04 +02001417 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001418 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +01001420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1421 bio_rw(bio) == READA) {
1422 bio_io_error(bio);
1423 return 0;
1424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
Mikulas Patocka92c63902009-04-09 00:27:15 +01001426 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
Mikulas Patocka92c63902009-04-09 00:27:15 +01001428 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 }
1430
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001431 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001432 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001433 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434}
1435
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001436static int dm_make_request(struct request_queue *q, struct bio *bio)
1437{
1438 struct mapped_device *md = q->queuedata;
1439
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001440 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1441}
1442
1443static int dm_request_based(struct mapped_device *md)
1444{
1445 return blk_queue_stackable(md->queue);
1446}
1447
1448static int dm_request(struct request_queue *q, struct bio *bio)
1449{
1450 struct mapped_device *md = q->queuedata;
1451
1452 if (dm_request_based(md))
1453 return dm_make_request(q, bio);
1454
1455 return _dm_request(q, bio);
1456}
1457
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001458/*
1459 * Mark this request as flush request, so that dm_request_fn() can
1460 * recognize.
1461 */
1462static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
1463{
1464 rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
1465 rq->cmd[0] = REQ_LB_OP_FLUSH;
1466}
1467
1468static bool dm_rq_is_flush_request(struct request *rq)
1469{
1470 if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
1471 rq->cmd[0] == REQ_LB_OP_FLUSH)
1472 return true;
1473 else
1474 return false;
1475}
1476
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001477void dm_dispatch_request(struct request *rq)
1478{
1479 int r;
1480
1481 if (blk_queue_io_stat(rq->q))
1482 rq->cmd_flags |= REQ_IO_STAT;
1483
1484 rq->start_time = jiffies;
1485 r = blk_insert_cloned_request(rq->q, rq);
1486 if (r)
1487 dm_complete_request(rq, r);
1488}
1489EXPORT_SYMBOL_GPL(dm_dispatch_request);
1490
1491static void dm_rq_bio_destructor(struct bio *bio)
1492{
1493 struct dm_rq_clone_bio_info *info = bio->bi_private;
1494 struct mapped_device *md = info->tio->md;
1495
1496 free_bio_info(info);
1497 bio_free(bio, md->bs);
1498}
1499
1500static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1501 void *data)
1502{
1503 struct dm_rq_target_io *tio = data;
1504 struct mapped_device *md = tio->md;
1505 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1506
1507 if (!info)
1508 return -ENOMEM;
1509
1510 info->orig = bio_orig;
1511 info->tio = tio;
1512 bio->bi_end_io = end_clone_bio;
1513 bio->bi_private = info;
1514 bio->bi_destructor = dm_rq_bio_destructor;
1515
1516 return 0;
1517}
1518
1519static int setup_clone(struct request *clone, struct request *rq,
1520 struct dm_rq_target_io *tio)
1521{
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001522 int r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001523
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001524 if (dm_rq_is_flush_request(rq)) {
1525 blk_rq_init(NULL, clone);
1526 clone->cmd_type = REQ_TYPE_FS;
1527 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1528 } else {
1529 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1530 dm_rq_bio_constructor, tio);
1531 if (r)
1532 return r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001533
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001534 clone->cmd = rq->cmd;
1535 clone->cmd_len = rq->cmd_len;
1536 clone->sense = rq->sense;
1537 clone->buffer = rq->buffer;
1538 }
1539
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001540 clone->end_io = end_clone_request;
1541 clone->end_io_data = tio;
1542
1543 return 0;
1544}
1545
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001546static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1547 gfp_t gfp_mask)
1548{
1549 struct request *clone;
1550 struct dm_rq_target_io *tio;
1551
1552 tio = alloc_rq_tio(md, gfp_mask);
1553 if (!tio)
1554 return NULL;
1555
1556 tio->md = md;
1557 tio->ti = NULL;
1558 tio->orig = rq;
1559 tio->error = 0;
1560 memset(&tio->info, 0, sizeof(tio->info));
1561
1562 clone = &tio->clone;
1563 if (setup_clone(clone, rq, tio)) {
1564 /* -ENOMEM */
1565 free_rq_tio(tio);
1566 return NULL;
1567 }
1568
1569 return clone;
1570}
1571
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001572/*
1573 * Called with the queue lock held.
1574 */
1575static int dm_prep_fn(struct request_queue *q, struct request *rq)
1576{
1577 struct mapped_device *md = q->queuedata;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001578 struct request *clone;
1579
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001580 if (unlikely(dm_rq_is_flush_request(rq)))
1581 return BLKPREP_OK;
1582
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001583 if (unlikely(rq->special)) {
1584 DMWARN("Already has something in rq->special.");
1585 return BLKPREP_KILL;
1586 }
1587
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001588 clone = clone_rq(rq, md, GFP_ATOMIC);
1589 if (!clone)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001590 return BLKPREP_DEFER;
1591
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001592 rq->special = clone;
1593 rq->cmd_flags |= REQ_DONTPREP;
1594
1595 return BLKPREP_OK;
1596}
1597
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001598/*
1599 * Returns:
1600 * 0 : the request has been processed (not requeued)
1601 * !0 : the request has been requeued
1602 */
1603static int map_request(struct dm_target *ti, struct request *clone,
1604 struct mapped_device *md)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001605{
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001606 int r, requeued = 0;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001607 struct dm_rq_target_io *tio = clone->end_io_data;
1608
1609 /*
1610 * Hold the md reference here for the in-flight I/O.
1611 * We can't rely on the reference count by device opener,
1612 * because the device may be closed during the request completion
1613 * when all bios are completed.
1614 * See the comment in rq_completed() too.
1615 */
1616 dm_get(md);
1617
1618 tio->ti = ti;
1619 r = ti->type->map_rq(ti, clone, &tio->info);
1620 switch (r) {
1621 case DM_MAPIO_SUBMITTED:
1622 /* The target has taken the I/O to submit by itself later */
1623 break;
1624 case DM_MAPIO_REMAPPED:
1625 /* The target has remapped the I/O so dispatch it */
Jun'ichi Nomura6db4ccd2009-12-10 23:52:25 +00001626 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1627 blk_rq_pos(tio->orig));
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001628 dm_dispatch_request(clone);
1629 break;
1630 case DM_MAPIO_REQUEUE:
1631 /* The target wants to requeue the I/O */
1632 dm_requeue_unmapped_request(clone);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001633 requeued = 1;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001634 break;
1635 default:
1636 if (r > 0) {
1637 DMWARN("unimplemented target map return value: %d", r);
1638 BUG();
1639 }
1640
1641 /* The target wants to complete the I/O */
1642 dm_kill_unmapped_request(clone, r);
1643 break;
1644 }
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001645
1646 return requeued;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001647}
1648
1649/*
1650 * q->request_fn for request-based dm.
1651 * Called with the queue lock held.
1652 */
1653static void dm_request_fn(struct request_queue *q)
1654{
1655 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001656 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001657 struct dm_target *ti;
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001658 struct request *rq, *clone;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001659
1660 /*
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001661 * For suspend, check blk_queue_stopped() and increment
1662 * ->pending within a single queue_lock not to increment the
1663 * number of in-flight I/Os after the queue is stopped in
1664 * dm_suspend().
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001665 */
1666 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1667 rq = blk_peek_request(q);
1668 if (!rq)
1669 goto plug_and_out;
1670
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001671 if (unlikely(dm_rq_is_flush_request(rq))) {
1672 BUG_ON(md->flush_request);
1673 md->flush_request = rq;
1674 blk_start_request(rq);
1675 queue_work(md->wq, &md->barrier_work);
1676 goto out;
1677 }
1678
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001679 ti = dm_table_find_target(map, blk_rq_pos(rq));
1680 if (ti->type->busy && ti->type->busy(ti))
1681 goto plug_and_out;
1682
1683 blk_start_request(rq);
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001684 clone = rq->special;
1685 atomic_inc(&md->pending[rq_data_dir(clone)]);
1686
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001687 spin_unlock(q->queue_lock);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001688 if (map_request(ti, clone, md))
1689 goto requeued;
1690
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001691 spin_lock_irq(q->queue_lock);
1692 }
1693
1694 goto out;
1695
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001696requeued:
1697 spin_lock_irq(q->queue_lock);
1698
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001699plug_and_out:
1700 if (!elv_queue_empty(q))
1701 /* Some requests still remain, retry later */
1702 blk_plug_device(q);
1703
1704out:
1705 dm_table_put(map);
1706
1707 return;
1708}
1709
1710int dm_underlying_device_busy(struct request_queue *q)
1711{
1712 return blk_lld_busy(q);
1713}
1714EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1715
1716static int dm_lld_busy(struct request_queue *q)
1717{
1718 int r;
1719 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001720 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001721
1722 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1723 r = 1;
1724 else
1725 r = dm_table_any_busy_target(map);
1726
1727 dm_table_put(map);
1728
1729 return r;
1730}
1731
Jens Axboe165125e2007-07-24 09:28:11 +02001732static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733{
1734 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001735 struct dm_table *map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
1737 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001738 if (dm_request_based(md))
1739 generic_unplug_device(q);
1740
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 dm_table_unplug_all(map);
1742 dm_table_put(map);
1743 }
1744}
1745
1746static int dm_any_congested(void *congested_data, int bdi_bits)
1747{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001748 int r = bdi_bits;
1749 struct mapped_device *md = congested_data;
1750 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001752 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001753 map = dm_get_live_table(md);
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001754 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001755 /*
1756 * Request-based dm cares about only own queue for
1757 * the query about congestion status of request_queue
1758 */
1759 if (dm_request_based(md))
1760 r = md->queue->backing_dev_info.state &
1761 bdi_bits;
1762 else
1763 r = dm_table_any_congested(map, bdi_bits);
1764
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001765 dm_table_put(map);
1766 }
1767 }
1768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 return r;
1770}
1771
1772/*-----------------------------------------------------------------
1773 * An IDR is used to keep track of allocated minor numbers.
1774 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775static DEFINE_IDR(_minor_idr);
1776
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001777static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001779 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001781 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782}
1783
1784/*
1785 * See if the device with a specific minor # is free.
1786 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001787static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788{
1789 int r, m;
1790
1791 if (minor >= (1 << MINORBITS))
1792 return -EINVAL;
1793
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001794 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1795 if (!r)
1796 return -ENOMEM;
1797
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001798 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 if (idr_find(&_minor_idr, minor)) {
1801 r = -EBUSY;
1802 goto out;
1803 }
1804
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001805 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001806 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 if (m != minor) {
1810 idr_remove(&_minor_idr, m);
1811 r = -EBUSY;
1812 goto out;
1813 }
1814
1815out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001816 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 return r;
1818}
1819
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001820static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001822 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001825 if (!r)
1826 return -ENOMEM;
1827
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001828 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001830 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001831 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
1834 if (m >= (1 << MINORBITS)) {
1835 idr_remove(&_minor_idr, m);
1836 r = -ENOSPC;
1837 goto out;
1838 }
1839
1840 *minor = m;
1841
1842out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001843 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 return r;
1845}
1846
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001847static const struct block_device_operations dm_blk_dops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Mikulas Patocka53d59142009-04-02 19:55:37 +01001849static void dm_wq_work(struct work_struct *work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001850static void dm_rq_barrier_work(struct work_struct *work);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852/*
1853 * Allocate and initialise a blank device with a given minor.
1854 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001855static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856{
1857 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001858 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001859 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
1861 if (!md) {
1862 DMWARN("unable to allocate device, out of memory.");
1863 return NULL;
1864 }
1865
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001866 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001867 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001870 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001871 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001872 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001873 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001875 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001877 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001878 mutex_init(&md->suspend_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001879 spin_lock_init(&md->deferred_lock);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001880 spin_lock_init(&md->barrier_error_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 rwlock_init(&md->map_lock);
1882 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001883 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001885 atomic_set(&md->uevent_seq, 0);
1886 INIT_LIST_HEAD(&md->uevent_list);
1887 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001889 md->queue = blk_init_queue(dm_request_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001891 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001893 /*
1894 * Request-based dm devices cannot be stacked on top of bio-based dm
1895 * devices. The type of this dm device has not been decided yet,
1896 * although we initialized the queue using blk_init_queue().
1897 * The type is decided at the first table loading time.
1898 * To prevent problematic device stacking, clear the queue flag
1899 * for request stacking support until then.
1900 *
1901 * This queue is new, so no concurrency on the queue_flags.
1902 */
1903 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1904 md->saved_make_request_fn = md->queue->make_request_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 md->queue->queuedata = md;
1906 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1907 md->queue->backing_dev_info.congested_data = md;
1908 blk_queue_make_request(md->queue, dm_request);
Jens Axboedaef2652006-01-10 10:48:02 +01001909 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 md->queue->unplug_fn = dm_unplug_all;
Milan Brozf6fccb12008-07-21 12:00:37 +01001911 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001912 blk_queue_softirq_done(md->queue, dm_softirq_done);
1913 blk_queue_prep_rq(md->queue, dm_prep_fn);
1914 blk_queue_lld_busy(md->queue, dm_lld_busy);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001915 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
1916 dm_rq_prepare_flush);
Stefan Bader9faf4002006-10-03 01:15:41 -07001917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 md->disk = alloc_disk(1);
1919 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001920 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02001922 atomic_set(&md->pending[0], 0);
1923 atomic_set(&md->pending[1], 0);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001924 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001925 INIT_WORK(&md->work, dm_wq_work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001926 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001927 init_waitqueue_head(&md->eventq);
1928
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 md->disk->major = _major;
1930 md->disk->first_minor = minor;
1931 md->disk->fops = &dm_blk_dops;
1932 md->disk->queue = md->queue;
1933 md->disk->private_data = md;
1934 sprintf(md->disk->disk_name, "dm-%d", minor);
1935 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001936 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
Milan Broz304f3f62008-02-08 02:11:17 +00001938 md->wq = create_singlethread_workqueue("kdmflush");
1939 if (!md->wq)
1940 goto bad_thread;
1941
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001942 md->bdev = bdget_disk(md->disk, 0);
1943 if (!md->bdev)
1944 goto bad_bdev;
1945
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001946 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001947 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001948 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001949 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001950
1951 BUG_ON(old_md != MINOR_ALLOCED);
1952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 return md;
1954
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001955bad_bdev:
1956 destroy_workqueue(md->wq);
Milan Broz304f3f62008-02-08 02:11:17 +00001957bad_thread:
Zdenek Kabelac03022c52009-10-16 23:18:15 +01001958 del_gendisk(md->disk);
Milan Broz304f3f62008-02-08 02:11:17 +00001959 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001960bad_disk:
Al Viro1312f402006-03-12 11:02:03 -05001961 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001962bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001964bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001965 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001966bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 kfree(md);
1968 return NULL;
1969}
1970
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001971static void unlock_fs(struct mapped_device *md);
1972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973static void free_dev(struct mapped_device *md)
1974{
Tejun Heof331c022008-09-03 09:01:48 +02001975 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001976
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001977 unlock_fs(md);
1978 bdput(md->bdev);
Milan Broz304f3f62008-02-08 02:11:17 +00001979 destroy_workqueue(md->wq);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001980 if (md->tio_pool)
1981 mempool_destroy(md->tio_pool);
1982 if (md->io_pool)
1983 mempool_destroy(md->io_pool);
1984 if (md->bs)
1985 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001986 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001988 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001989
1990 spin_lock(&_minor_lock);
1991 md->disk->private_data = NULL;
1992 spin_unlock(&_minor_lock);
1993
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001995 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001996 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 kfree(md);
1998}
1999
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002000static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2001{
2002 struct dm_md_mempools *p;
2003
2004 if (md->io_pool && md->tio_pool && md->bs)
2005 /* the md already has necessary mempools */
2006 goto out;
2007
2008 p = dm_table_get_md_mempools(t);
2009 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2010
2011 md->io_pool = p->io_pool;
2012 p->io_pool = NULL;
2013 md->tio_pool = p->tio_pool;
2014 p->tio_pool = NULL;
2015 md->bs = p->bs;
2016 p->bs = NULL;
2017
2018out:
2019 /* mempool bind completed, now no need any mempools in the table */
2020 dm_table_free_md_mempools(t);
2021}
2022
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023/*
2024 * Bind a table to the device.
2025 */
2026static void event_callback(void *context)
2027{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002028 unsigned long flags;
2029 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 struct mapped_device *md = (struct mapped_device *) context;
2031
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002032 spin_lock_irqsave(&md->uevent_lock, flags);
2033 list_splice_init(&md->uevent_list, &uevents);
2034 spin_unlock_irqrestore(&md->uevent_lock, flags);
2035
Tejun Heoed9e1982008-08-25 19:56:05 +09002036 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002037
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 atomic_inc(&md->event_nr);
2039 wake_up(&md->eventq);
2040}
2041
Alasdair G Kergon4e901882005-07-28 21:15:59 -07002042static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043{
Alasdair G Kergon4e901882005-07-28 21:15:59 -07002044 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002046 mutex_lock(&md->bdev->bd_inode->i_mutex);
2047 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2048 mutex_unlock(&md->bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049}
2050
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002051/*
2052 * Returns old map, which caller must destroy.
2053 */
2054static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2055 struct queue_limits *limits)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002057 struct dm_table *old_map;
Jens Axboe165125e2007-07-24 09:28:11 +02002058 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 sector_t size;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002060 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
2062 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002063
2064 /*
2065 * Wipe any geometry if the size of the table changed.
2066 */
2067 if (size != get_capacity(md->disk))
2068 memset(&md->geometry, 0, sizeof(md->geometry));
2069
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002070 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002072 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002073
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002074 /*
2075 * The queue hasn't been stopped yet, if the old table type wasn't
2076 * for request-based during suspension. So stop it to prevent
2077 * I/O mapping before resume.
2078 * This must be done before setting the queue restrictions,
2079 * because request-based dm may be run just after the setting.
2080 */
2081 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2082 stop_queue(q);
2083
2084 __bind_mempools(md, t);
2085
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002086 write_lock_irqsave(&md->map_lock, flags);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002087 old_map = md->map;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002088 md->map = t;
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002089 dm_table_set_restrictions(t, q, limits);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002090 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002091
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002092 return old_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093}
2094
Alasdair G Kergona7940152009-12-10 23:52:23 +00002095/*
2096 * Returns unbound table for the caller to free.
2097 */
2098static struct dm_table *__unbind(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099{
2100 struct dm_table *map = md->map;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002101 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 if (!map)
Alasdair G Kergona7940152009-12-10 23:52:23 +00002104 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
2106 dm_table_event_callback(map, NULL, NULL);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002107 write_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 md->map = NULL;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002109 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002110
2111 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112}
2113
2114/*
2115 * Constructor for a new device.
2116 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002117int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118{
2119 struct mapped_device *md;
2120
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002121 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 if (!md)
2123 return -ENXIO;
2124
Milan Broz784aae72009-01-06 03:05:12 +00002125 dm_sysfs_init(md);
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 *result = md;
2128 return 0;
2129}
2130
David Teigland637842c2006-01-06 00:20:00 -08002131static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
2133 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 unsigned minor = MINOR(dev);
2135
2136 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2137 return NULL;
2138
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002139 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002142 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02002143 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07002144 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08002145 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002146 goto out;
2147 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002149out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002150 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
David Teigland637842c2006-01-06 00:20:00 -08002152 return md;
2153}
2154
David Teiglandd229a952006-01-06 00:20:01 -08002155struct mapped_device *dm_get_md(dev_t dev)
2156{
2157 struct mapped_device *md = dm_find_md(dev);
2158
2159 if (md)
2160 dm_get(md);
2161
2162 return md;
2163}
2164
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002165void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08002166{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002167 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168}
2169
2170void dm_set_mdptr(struct mapped_device *md, void *ptr)
2171{
2172 md->interface_ptr = ptr;
2173}
2174
2175void dm_get(struct mapped_device *md)
2176{
2177 atomic_inc(&md->holders);
2178}
2179
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002180const char *dm_device_name(struct mapped_device *md)
2181{
2182 return md->name;
2183}
2184EXPORT_SYMBOL_GPL(dm_device_name);
2185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186void dm_put(struct mapped_device *md)
2187{
Mike Anderson1134e5a2006-03-27 01:17:54 -08002188 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002190 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2191
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002192 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002193 map = dm_get_live_table(md);
Tejun Heof331c022008-09-03 09:01:48 +02002194 idr_replace(&_minor_idr, MINOR_ALLOCED,
2195 MINOR(disk_devt(dm_disk(md))));
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002196 set_bit(DMF_FREEING, &md->flags);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002197 spin_unlock(&_minor_lock);
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002198 if (!dm_suspended_md(md)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 dm_table_presuspend_targets(map);
2200 dm_table_postsuspend_targets(map);
2201 }
Milan Broz784aae72009-01-06 03:05:12 +00002202 dm_sysfs_exit(md);
Mike Anderson1134e5a2006-03-27 01:17:54 -08002203 dm_table_put(map);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002204 dm_table_destroy(__unbind(md));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 free_dev(md);
2206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
Edward Goggin79eb8852007-05-09 02:32:56 -07002208EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Mikulas Patocka401600d2009-04-02 19:55:38 +01002210static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00002211{
2212 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002213 DECLARE_WAITQUEUE(wait, current);
2214
2215 dm_unplug_all(md->queue);
2216
2217 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00002218
2219 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01002220 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00002221
2222 smp_mb();
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00002223 if (!md_in_flight(md))
Milan Broz46125c12008-02-08 02:10:30 +00002224 break;
2225
Mikulas Patocka401600d2009-04-02 19:55:38 +01002226 if (interruptible == TASK_INTERRUPTIBLE &&
2227 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00002228 r = -EINTR;
2229 break;
2230 }
2231
2232 io_schedule();
2233 }
2234 set_current_state(TASK_RUNNING);
2235
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002236 remove_wait_queue(&md->wait, &wait);
2237
Milan Broz46125c12008-02-08 02:10:30 +00002238 return r;
2239}
2240
Mikulas Patocka531fe962009-06-22 10:12:17 +01002241static void dm_flush(struct mapped_device *md)
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002242{
2243 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patocka52b1fd52009-06-22 10:12:21 +01002244
2245 bio_init(&md->barrier_bio);
2246 md->barrier_bio.bi_bdev = md->bdev;
2247 md->barrier_bio.bi_rw = WRITE_BARRIER;
2248 __split_and_process_bio(md, &md->barrier_bio);
2249
2250 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002251}
2252
2253static void process_barrier(struct mapped_device *md, struct bio *bio)
2254{
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002255 md->barrier_error = 0;
2256
Mikulas Patocka531fe962009-06-22 10:12:17 +01002257 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002258
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002259 if (!bio_empty_barrier(bio)) {
2260 __split_and_process_bio(md, bio);
2261 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002262 }
2263
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002264 if (md->barrier_error != DM_ENDIO_REQUEUE)
Mikulas Patocka531fe962009-06-22 10:12:17 +01002265 bio_endio(bio, md->barrier_error);
Mikulas Patocka2761e952009-06-22 10:12:18 +01002266 else {
2267 spin_lock_irq(&md->deferred_lock);
2268 bio_list_add_head(&md->deferred, bio);
2269 spin_unlock_irq(&md->deferred_lock);
2270 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002271}
2272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273/*
2274 * Process the deferred bios
2275 */
Mikulas Patockaef208582009-04-02 19:55:38 +01002276static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277{
Mikulas Patockaef208582009-04-02 19:55:38 +01002278 struct mapped_device *md = container_of(work, struct mapped_device,
2279 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002280 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
Mikulas Patockaef208582009-04-02 19:55:38 +01002282 down_write(&md->io_lock);
2283
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002284 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002285 spin_lock_irq(&md->deferred_lock);
2286 c = bio_list_pop(&md->deferred);
2287 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002288
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002289 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002290 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002291 break;
2292 }
2293
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002294 up_write(&md->io_lock);
2295
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002296 if (dm_request_based(md))
2297 generic_make_request(c);
2298 else {
Jens Axboe1f98a132009-09-11 14:32:04 +02002299 if (bio_rw_flagged(c, BIO_RW_BARRIER))
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002300 process_barrier(md, c);
2301 else
2302 __split_and_process_bio(md, c);
2303 }
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002304
2305 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002306 }
Milan Broz73d410c2008-02-08 02:10:25 +00002307
Mikulas Patockaef208582009-04-02 19:55:38 +01002308 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309}
2310
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002311static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00002312{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002313 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2314 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01002315 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00002316}
2317
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002318static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
2319{
2320 struct dm_rq_target_io *tio = clone->end_io_data;
2321
2322 tio->info.flush_request = flush_nr;
2323}
2324
2325/* Issue barrier requests to targets and wait for their completion. */
2326static int dm_rq_barrier(struct mapped_device *md)
2327{
2328 int i, j;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002329 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002330 unsigned num_targets = dm_table_get_num_targets(map);
2331 struct dm_target *ti;
2332 struct request *clone;
2333
2334 md->barrier_error = 0;
2335
2336 for (i = 0; i < num_targets; i++) {
2337 ti = dm_table_get_target(map, i);
2338 for (j = 0; j < ti->num_flush_requests; j++) {
2339 clone = clone_rq(md->flush_request, md, GFP_NOIO);
2340 dm_rq_set_flush_nr(clone, j);
2341 atomic_inc(&md->pending[rq_data_dir(clone)]);
2342 map_request(ti, clone, md);
2343 }
2344 }
2345
2346 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2347 dm_table_put(map);
2348
2349 return md->barrier_error;
2350}
2351
2352static void dm_rq_barrier_work(struct work_struct *work)
2353{
2354 int error;
2355 struct mapped_device *md = container_of(work, struct mapped_device,
2356 barrier_work);
2357 struct request_queue *q = md->queue;
2358 struct request *rq;
2359 unsigned long flags;
2360
2361 /*
2362 * Hold the md reference here and leave it at the last part so that
2363 * the md can't be deleted by device opener when the barrier request
2364 * completes.
2365 */
2366 dm_get(md);
2367
2368 error = dm_rq_barrier(md);
2369
2370 rq = md->flush_request;
2371 md->flush_request = NULL;
2372
2373 if (error == DM_ENDIO_REQUEUE) {
2374 spin_lock_irqsave(q->queue_lock, flags);
2375 blk_requeue_request(q, rq);
2376 spin_unlock_irqrestore(q->queue_lock, flags);
2377 } else
2378 blk_end_request_all(rq, error);
2379
2380 blk_run_queue(q);
2381
2382 dm_put(md);
2383}
2384
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385/*
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002386 * Swap in a new table, returning the old one for the caller to destroy.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 */
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002388struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002390 struct dm_table *map = ERR_PTR(-EINVAL);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002391 struct queue_limits limits;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002392 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
Daniel Walkere61290a2008-02-08 02:10:08 +00002394 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
2396 /* device must be suspended */
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002397 if (!dm_suspended_md(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002398 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002400 r = dm_calculate_queue_limits(table, &limits);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002401 if (r) {
2402 map = ERR_PTR(r);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002403 goto out;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002404 }
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002405
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002406 /* cannot change the device type, once a table is bound */
2407 if (md->map &&
2408 (dm_table_get_type(md->map) != dm_table_get_type(table))) {
2409 DMWARN("can't change the device type after a table is bound");
2410 goto out;
2411 }
2412
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002413 map = __bind(md, table, &limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002415out:
Daniel Walkere61290a2008-02-08 02:10:08 +00002416 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002417 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418}
2419
2420/*
2421 * Functions to lock and unlock any filesystem running on the
2422 * device.
2423 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002424static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002426 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
2428 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002429
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002430 md->frozen_sb = freeze_bdev(md->bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002431 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002432 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002433 md->frozen_sb = NULL;
2434 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002435 }
2436
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002437 set_bit(DMF_FROZEN, &md->flags);
2438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 return 0;
2440}
2441
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002442static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002444 if (!test_bit(DMF_FROZEN, &md->flags))
2445 return;
2446
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002447 thaw_bdev(md->bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002449 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450}
2451
2452/*
2453 * We need to be able to change a mapping table under a mounted
2454 * filesystem. For example we might want to move some data in
2455 * the background. Before the table can be swapped with
2456 * dm_bind_table, dm_suspend must be called to flush any in
2457 * flight bios and ensure that any further io gets deferred.
2458 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002459/*
2460 * Suspend mechanism in request-based dm.
2461 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002462 * 1. Flush all I/Os by lock_fs() if needed.
2463 * 2. Stop dispatching any I/O by stopping the request_queue.
2464 * 3. Wait for all in-flight I/Os to be completed or requeued.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002465 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002466 * To abort suspend, start the request_queue.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002467 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002468int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002470 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00002471 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002472 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002473 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Daniel Walkere61290a2008-02-08 02:10:08 +00002475 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002476
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002477 if (dm_suspended_md(md)) {
Milan Broz73d410c2008-02-08 02:10:25 +00002478 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08002479 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00002480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002482 map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002484 /*
2485 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2486 * This flag is cleared before dm_suspend returns.
2487 */
2488 if (noflush)
2489 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2490
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002491 /* This does not get reverted if there's an error later. */
2492 dm_table_presuspend_targets(map);
2493
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002494 /*
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002495 * Flush I/O to the device.
2496 * Any I/O submitted after lock_fs() may not be flushed.
2497 * noflush takes precedence over do_lockfs.
2498 * (lock_fs() flushes I/Os and waits for them to complete.)
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002499 */
2500 if (!noflush && do_lockfs) {
2501 r = lock_fs(md);
2502 if (r)
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01002503 goto out;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002507 * Here we must make sure that no processes are submitting requests
2508 * to target drivers i.e. no one may be executing
2509 * __split_and_process_bio. This is called from dm_request and
2510 * dm_wq_work.
2511 *
2512 * To get all processes out of __split_and_process_bio in dm_request,
2513 * we take the write lock. To prevent any process from reentering
2514 * __split_and_process_bio from dm_request, we set
2515 * DMF_QUEUE_IO_TO_THREAD.
2516 *
2517 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2518 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2519 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2520 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002522 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002523 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2524 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002525 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002527 /*
2528 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2529 * can be kicked until md->queue is stopped. So stop md->queue before
2530 * flushing md->wq.
2531 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002532 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002533 stop_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002534
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002535 flush_workqueue(md->wq);
2536
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002538 * At this point no more requests are entering target request routines.
2539 * We call dm_wait_for_completion to wait for all existing requests
2540 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01002542 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002544 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002545 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01002546 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00002547 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00002550 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002551 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00002552
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002553 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002554 start_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002555
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002556 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002557 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002558 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002559
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002560 /*
2561 * If dm_wait_for_completion returned 0, the device is completely
2562 * quiescent now. There is no request-processing activity. All new
2563 * requests are being added to md->deferred list.
2564 */
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 set_bit(DMF_SUSPENDED, &md->flags);
2567
Kiyoshi Ueda4d4471c2009-12-10 23:52:26 +00002568 dm_table_postsuspend_targets(map);
2569
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002570out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08002572
2573out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00002574 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002575 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576}
2577
2578int dm_resume(struct mapped_device *md)
2579{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002580 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002581 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582
Daniel Walkere61290a2008-02-08 02:10:08 +00002583 mutex_lock(&md->suspend_lock);
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002584 if (!dm_suspended_md(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002585 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002586
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002587 map = dm_get_live_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002588 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002589 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590
Milan Broz8757b772006-10-03 01:15:36 -07002591 r = dm_table_resume_targets(map);
2592 if (r)
2593 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002594
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002595 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002596
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002597 /*
2598 * Flushing deferred I/Os must be done after targets are resumed
2599 * so that mapping of targets can work correctly.
2600 * Request-based dm is queueing the deferred I/Os in its request_queue.
2601 */
2602 if (dm_request_based(md))
2603 start_queue(md->queue);
2604
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002605 unlock_fs(md);
2606
2607 clear_bit(DMF_SUSPENDED, &md->flags);
2608
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 dm_table_unplug_all(map);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002610 r = 0;
2611out:
2612 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00002613 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002614
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002615 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616}
2617
2618/*-----------------------------------------------------------------
2619 * Event notification.
2620 *---------------------------------------------------------------*/
Milan Broz60935eb2009-06-22 10:12:30 +01002621void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2622 unsigned cookie)
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002623{
Milan Broz60935eb2009-06-22 10:12:30 +01002624 char udev_cookie[DM_COOKIE_LENGTH];
2625 char *envp[] = { udev_cookie, NULL };
2626
2627 if (!cookie)
2628 kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2629 else {
2630 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2631 DM_COOKIE_ENV_VAR_NAME, cookie);
2632 kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
2633 }
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002634}
2635
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002636uint32_t dm_next_uevent_seq(struct mapped_device *md)
2637{
2638 return atomic_add_return(1, &md->uevent_seq);
2639}
2640
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641uint32_t dm_get_event_nr(struct mapped_device *md)
2642{
2643 return atomic_read(&md->event_nr);
2644}
2645
2646int dm_wait_event(struct mapped_device *md, int event_nr)
2647{
2648 return wait_event_interruptible(md->eventq,
2649 (event_nr != atomic_read(&md->event_nr)));
2650}
2651
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002652void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2653{
2654 unsigned long flags;
2655
2656 spin_lock_irqsave(&md->uevent_lock, flags);
2657 list_add(elist, &md->uevent_list);
2658 spin_unlock_irqrestore(&md->uevent_lock, flags);
2659}
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661/*
2662 * The gendisk is only valid as long as you have a reference
2663 * count on 'md'.
2664 */
2665struct gendisk *dm_disk(struct mapped_device *md)
2666{
2667 return md->disk;
2668}
2669
Milan Broz784aae72009-01-06 03:05:12 +00002670struct kobject *dm_kobject(struct mapped_device *md)
2671{
2672 return &md->kobj;
2673}
2674
2675/*
2676 * struct mapped_device should not be exported outside of dm.c
2677 * so use this check to verify that kobj is part of md structure
2678 */
2679struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2680{
2681 struct mapped_device *md;
2682
2683 md = container_of(kobj, struct mapped_device, kobj);
2684 if (&md->kobj != kobj)
2685 return NULL;
2686
Milan Broz4d89b7b2009-06-22 10:12:11 +01002687 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +00002688 dm_deleting_md(md))
Milan Broz4d89b7b2009-06-22 10:12:11 +01002689 return NULL;
2690
Milan Broz784aae72009-01-06 03:05:12 +00002691 dm_get(md);
2692 return md;
2693}
2694
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002695int dm_suspended_md(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696{
2697 return test_bit(DMF_SUSPENDED, &md->flags);
2698}
2699
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002700int dm_suspended(struct dm_target *ti)
2701{
2702 struct mapped_device *md = dm_table_get_md(ti->table);
2703 int r = dm_suspended_md(md);
2704
2705 dm_put(md);
2706
2707 return r;
2708}
2709EXPORT_SYMBOL_GPL(dm_suspended);
2710
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002711int dm_noflush_suspending(struct dm_target *ti)
2712{
2713 struct mapped_device *md = dm_table_get_md(ti->table);
2714 int r = __noflush_suspending(md);
2715
2716 dm_put(md);
2717
2718 return r;
2719}
2720EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2721
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002722struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2723{
2724 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2725
2726 if (!pools)
2727 return NULL;
2728
2729 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2730 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2731 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2732 if (!pools->io_pool)
2733 goto free_pools_and_out;
2734
2735 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2736 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2737 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2738 if (!pools->tio_pool)
2739 goto free_io_pool_and_out;
2740
2741 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2742 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2743 if (!pools->bs)
2744 goto free_tio_pool_and_out;
2745
2746 return pools;
2747
2748free_tio_pool_and_out:
2749 mempool_destroy(pools->tio_pool);
2750
2751free_io_pool_and_out:
2752 mempool_destroy(pools->io_pool);
2753
2754free_pools_and_out:
2755 kfree(pools);
2756
2757 return NULL;
2758}
2759
2760void dm_free_md_mempools(struct dm_md_mempools *pools)
2761{
2762 if (!pools)
2763 return;
2764
2765 if (pools->io_pool)
2766 mempool_destroy(pools->io_pool);
2767
2768 if (pools->tio_pool)
2769 mempool_destroy(pools->tio_pool);
2770
2771 if (pools->bs)
2772 bioset_free(pools->bs);
2773
2774 kfree(pools);
2775}
2776
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07002777static const struct block_device_operations dm_blk_dops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 .open = dm_blk_open,
2779 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07002780 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002781 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 .owner = THIS_MODULE
2783};
2784
2785EXPORT_SYMBOL(dm_get_mapinfo);
2786
2787/*
2788 * module hooks
2789 */
2790module_init(dm_init);
2791module_exit(dm_exit);
2792
2793module_param(major, uint, 0);
2794MODULE_PARM_DESC(major, "The major number of the device mapper");
2795MODULE_DESCRIPTION(DM_NAME " driver");
2796MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2797MODULE_LICENSE("GPL");