blob: ba6934c3e2c5ecf0d2dbed5d560a97327c8bf774 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +01009#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <linux/init.h>
12#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080013#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
Arnd Bergmann6e9624b2010-08-07 18:25:34 +020018#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080022#include <linux/hdreg.h>
Li Zefan55782132009-06-09 13:43:05 +080023
24#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "core"
27
Milan Broz60935eb2009-06-22 10:12:30 +010028/*
29 * Cookies are numeric values sent with CHANGE and REMOVE
30 * uevents while resuming, removing or renaming the device.
31 */
32#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
33#define DM_COOKIE_LENGTH 24
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035static const char *_name = DM_NAME;
36
37static unsigned int major = 0;
38static unsigned int _major = 0;
39
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070040static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000042 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * One of these is allocated per bio.
44 */
45struct dm_io {
46 struct mapped_device *md;
47 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010049 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080050 unsigned long start_time;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +010051 spinlock_t endio_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052};
53
54/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000055 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 * One of these is allocated per target within a bio. Hopefully
57 * this will be simplified out one day.
58 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010059struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 struct dm_io *io;
61 struct dm_target *ti;
62 union map_info info;
63};
64
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000065/*
66 * For request-based dm.
67 * One of these is allocated per request.
68 */
69struct dm_rq_target_io {
70 struct mapped_device *md;
71 struct dm_target *ti;
72 struct request *orig, clone;
73 int error;
74 union map_info info;
75};
76
77/*
78 * For request-based dm.
79 * One of these is allocated per bio.
80 */
81struct dm_rq_clone_bio_info {
82 struct bio *orig;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010083 struct dm_rq_target_io *tio;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000084};
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086union map_info *dm_get_mapinfo(struct bio *bio)
87{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070088 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010089 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070090 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010093union map_info *dm_get_rq_mapinfo(struct request *rq)
94{
95 if (rq && rq->end_io_data)
96 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
97 return NULL;
98}
99EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
100
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -0700101#define MINOR_ALLOCED ((void *)-1)
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/*
104 * Bits for the md->flags field.
105 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100106#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -0800108#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700109#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700110#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800111#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100112#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Milan Broz304f3f62008-02-08 02:11:17 +0000114/*
115 * Work processed by per-device workqueue.
116 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700118 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000119 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 rwlock_t map_lock;
121 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700122 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124 unsigned long flags;
125
Jens Axboe165125e2007-07-24 09:28:11 +0200126 struct request_queue *queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800128 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 void *interface_ptr;
131
132 /*
133 * A list of ios that arrived while we were suspended.
134 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200135 atomic_t pending[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100137 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800138 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100139 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 /*
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100142 * An error from the barrier request currently being processed.
143 */
144 int barrier_error;
145
146 /*
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000147 * Protect barrier_error from concurrent endio processing
148 * in request-based dm.
149 */
150 spinlock_t barrier_error_lock;
151
152 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000153 * Processing queue (flush/barriers)
154 */
155 struct workqueue_struct *wq;
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000156 struct work_struct barrier_work;
157
158 /* A pointer to the currently processing pre/post flush request */
159 struct request *flush_request;
Milan Broz304f3f62008-02-08 02:11:17 +0000160
161 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * The current mapping.
163 */
164 struct dm_table *map;
165
166 /*
167 * io objects are allocated from here.
168 */
169 mempool_t *io_pool;
170 mempool_t *tio_pool;
171
Stefan Bader9faf4002006-10-03 01:15:41 -0700172 struct bio_set *bs;
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 /*
175 * Event handling.
176 */
177 atomic_t event_nr;
178 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100179 atomic_t uevent_seq;
180 struct list_head uevent_list;
181 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183 /*
184 * freeze/thaw support require holding onto a super block
185 */
186 struct super_block *frozen_sb;
Mikulas Patockadb8fef42009-06-22 10:12:15 +0100187 struct block_device *bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800188
189 /* forced geometry settings */
190 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000191
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100192 /* For saving the address of __make_request for request based dm */
193 make_request_fn *saved_make_request_fn;
194
Milan Broz784aae72009-01-06 03:05:12 +0000195 /* sysfs handle */
196 struct kobject kobj;
Mikulas Patocka52b1fd52009-06-22 10:12:21 +0100197
198 /* zero-length barrier that will be cloned and submitted to targets */
199 struct bio barrier_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200};
201
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100202/*
203 * For mempools pre-allocation at the table loading time.
204 */
205struct dm_md_mempools {
206 mempool_t *io_pool;
207 mempool_t *tio_pool;
208 struct bio_set *bs;
209};
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800212static struct kmem_cache *_io_cache;
213static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000214static struct kmem_cache *_rq_tio_cache;
215static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217static int __init local_init(void)
218{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100219 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100222 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100224 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100227 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100228 if (!_tio_cache)
229 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000231 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
232 if (!_rq_tio_cache)
233 goto out_free_tio_cache;
234
235 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
236 if (!_rq_bio_info_cache)
237 goto out_free_rq_tio_cache;
238
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100239 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100240 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000241 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 _major = major;
244 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100245 if (r < 0)
246 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 if (!_major)
249 _major = r;
250
251 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100252
253out_uevent_exit:
254 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000255out_free_rq_bio_info_cache:
256 kmem_cache_destroy(_rq_bio_info_cache);
257out_free_rq_tio_cache:
258 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100259out_free_tio_cache:
260 kmem_cache_destroy(_tio_cache);
261out_free_io_cache:
262 kmem_cache_destroy(_io_cache);
263
264 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267static void local_exit(void)
268{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000269 kmem_cache_destroy(_rq_bio_info_cache);
270 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 kmem_cache_destroy(_tio_cache);
272 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700273 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100274 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 _major = 0;
277
278 DMINFO("cleaned up");
279}
280
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000281static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 local_init,
283 dm_target_init,
284 dm_linear_init,
285 dm_stripe_init,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000286 dm_io_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100287 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 dm_interface_init,
289};
290
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000291static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 local_exit,
293 dm_target_exit,
294 dm_linear_exit,
295 dm_stripe_exit,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000296 dm_io_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100297 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 dm_interface_exit,
299};
300
301static int __init dm_init(void)
302{
303 const int count = ARRAY_SIZE(_inits);
304
305 int r, i;
306
307 for (i = 0; i < count; i++) {
308 r = _inits[i]();
309 if (r)
310 goto bad;
311 }
312
313 return 0;
314
315 bad:
316 while (i--)
317 _exits[i]();
318
319 return r;
320}
321
322static void __exit dm_exit(void)
323{
324 int i = ARRAY_SIZE(_exits);
325
326 while (i--)
327 _exits[i]();
328}
329
330/*
331 * Block device functions
332 */
Mike Anderson432a2122009-12-10 23:52:20 +0000333int dm_deleting_md(struct mapped_device *md)
334{
335 return test_bit(DMF_DELETING, &md->flags);
336}
337
Al Virofe5f9f22008-03-02 10:29:31 -0500338static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
340 struct mapped_device *md;
341
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200342 lock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700343 spin_lock(&_minor_lock);
344
Al Virofe5f9f22008-03-02 10:29:31 -0500345 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700346 if (!md)
347 goto out;
348
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700349 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +0000350 dm_deleting_md(md)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700351 md = NULL;
352 goto out;
353 }
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700356 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700357
358out:
359 spin_unlock(&_minor_lock);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200360 unlock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700361
362 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
364
Al Virofe5f9f22008-03-02 10:29:31 -0500365static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
Al Virofe5f9f22008-03-02 10:29:31 -0500367 struct mapped_device *md = disk->private_data;
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200368
369 lock_kernel();
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700370 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 dm_put(md);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200372 unlock_kernel();
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 return 0;
375}
376
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700377int dm_open_count(struct mapped_device *md)
378{
379 return atomic_read(&md->open_count);
380}
381
382/*
383 * Guarantees nothing is using the device before it's deleted.
384 */
385int dm_lock_for_deletion(struct mapped_device *md)
386{
387 int r = 0;
388
389 spin_lock(&_minor_lock);
390
391 if (dm_open_count(md))
392 r = -EBUSY;
393 else
394 set_bit(DMF_DELETING, &md->flags);
395
396 spin_unlock(&_minor_lock);
397
398 return r;
399}
400
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800401static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
402{
403 struct mapped_device *md = bdev->bd_disk->private_data;
404
405 return dm_get_geometry(md, geo);
406}
407
Al Virofe5f9f22008-03-02 10:29:31 -0500408static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700409 unsigned int cmd, unsigned long arg)
410{
Al Virofe5f9f22008-03-02 10:29:31 -0500411 struct mapped_device *md = bdev->bd_disk->private_data;
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000412 struct dm_table *map = dm_get_live_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700413 struct dm_target *tgt;
414 int r = -ENOTTY;
415
Milan Brozaa129a22006-10-03 01:15:15 -0700416 if (!map || !dm_table_get_size(map))
417 goto out;
418
419 /* We only support devices that have a single target */
420 if (dm_table_get_num_targets(map) != 1)
421 goto out;
422
423 tgt = dm_table_get_target(map, 0);
424
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +0000425 if (dm_suspended_md(md)) {
Milan Brozaa129a22006-10-03 01:15:15 -0700426 r = -EAGAIN;
427 goto out;
428 }
429
430 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400431 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700432
433out:
434 dm_table_put(map);
435
Milan Brozaa129a22006-10-03 01:15:15 -0700436 return r;
437}
438
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100439static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 return mempool_alloc(md->io_pool, GFP_NOIO);
442}
443
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100444static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
446 mempool_free(io, md->io_pool);
447}
448
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100449static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
451 mempool_free(tio, md->tio_pool);
452}
453
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000454static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
455 gfp_t gfp_mask)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100456{
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000457 return mempool_alloc(md->tio_pool, gfp_mask);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100458}
459
460static void free_rq_tio(struct dm_rq_target_io *tio)
461{
462 mempool_free(tio, tio->md->tio_pool);
463}
464
465static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
466{
467 return mempool_alloc(md->io_pool, GFP_ATOMIC);
468}
469
470static void free_bio_info(struct dm_rq_clone_bio_info *info)
471{
472 mempool_free(info, info->tio->md->io_pool);
473}
474
Kiyoshi Ueda90abb8c2009-12-10 23:52:13 +0000475static int md_in_flight(struct mapped_device *md)
476{
477 return atomic_read(&md->pending[READ]) +
478 atomic_read(&md->pending[WRITE]);
479}
480
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800481static void start_io_acct(struct dm_io *io)
482{
483 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900484 int cpu;
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200485 int rw = bio_data_dir(io->bio);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800486
487 io->start_time = jiffies;
488
Tejun Heo074a7ac2008-08-25 19:56:14 +0900489 cpu = part_stat_lock();
490 part_round_stats(cpu, &dm_disk(md)->part0);
491 part_stat_unlock();
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200492 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800493}
494
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000495static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800496{
497 struct mapped_device *md = io->md;
498 struct bio *bio = io->bio;
499 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900500 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800501 int rw = bio_data_dir(bio);
502
Tejun Heo074a7ac2008-08-25 19:56:14 +0900503 cpu = part_stat_lock();
504 part_round_stats(cpu, &dm_disk(md)->part0);
505 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
506 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800507
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100508 /*
509 * After this is decremented the bio must not be touched if it is
510 * a barrier.
511 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200512 dm_disk(md)->part0.in_flight[rw] = pending =
513 atomic_dec_return(&md->pending[rw]);
514 pending += atomic_read(&md->pending[rw^0x1]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800515
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000516 /* nudge anyone waiting on suspend queue */
517 if (!pending)
518 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800519}
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521/*
522 * Add the bio to the list of deferred io.
523 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100524static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700526 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Mikulas Patocka022c2612009-04-02 19:55:39 +0100528 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100530 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Mikulas Patocka92c63902009-04-09 00:27:15 +0100532 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
533 queue_work(md->wq, &md->work);
534
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700535 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
538/*
539 * Everyone (including functions in this file), should use this
540 * function to access the md->map field, and make sure they call
541 * dm_table_put() when finished.
542 */
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000543struct dm_table *dm_get_live_table(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
545 struct dm_table *t;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100546 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100548 read_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 t = md->map;
550 if (t)
551 dm_table_get(t);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100552 read_unlock_irqrestore(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 return t;
555}
556
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800557/*
558 * Get the geometry associated with a dm device
559 */
560int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
561{
562 *geo = md->geometry;
563
564 return 0;
565}
566
567/*
568 * Set the geometry of a device.
569 */
570int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
571{
572 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
573
574 if (geo->start > sz) {
575 DMWARN("Start sector is beyond the geometry limits.");
576 return -EINVAL;
577 }
578
579 md->geometry = *geo;
580
581 return 0;
582}
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584/*-----------------------------------------------------------------
585 * CRUD START:
586 * A more elegant soln is in the works that uses the queue
587 * merge fn, unfortunately there are a couple of changes to
588 * the block layer that I want to make for this. So in the
589 * interests of getting something for people to use I give
590 * you this clearly demarcated crap.
591 *---------------------------------------------------------------*/
592
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800593static int __noflush_suspending(struct mapped_device *md)
594{
595 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
596}
597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598/*
599 * Decrements the number of outstanding ios that a bio has been
600 * cloned into, completing the original io if necc.
601 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800602static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800604 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000605 int io_error;
606 struct bio *bio;
607 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800608
609 /* Push-back supersedes any I/O errors */
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100610 if (unlikely(error)) {
611 spin_lock_irqsave(&io->endio_lock, flags);
612 if (!(io->error > 0 && __noflush_suspending(md)))
613 io->error = error;
614 spin_unlock_irqrestore(&io->endio_lock, flags);
615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800618 if (io->error == DM_ENDIO_REQUEUE) {
619 /*
620 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800621 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100622 spin_lock_irqsave(&md->deferred_lock, flags);
Mikulas Patocka2761e952009-06-22 10:12:18 +0100623 if (__noflush_suspending(md)) {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200624 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
Mikulas Patocka2761e952009-06-22 10:12:18 +0100625 bio_list_add_head(&md->deferred,
626 io->bio);
627 } else
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800628 /* noflush suspend was interrupted. */
629 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100630 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800631 }
632
Milan Brozb35f8ca2009-03-16 17:44:36 +0000633 io_error = io->error;
634 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100635
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200636 if (bio->bi_rw & REQ_HARDBARRIER) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100637 /*
638 * There can be just one barrier request so we use
639 * a per-device variable for error reporting.
640 * Note that you can't touch the bio after end_io_acct
641 */
Mikulas Patockafdb95722009-06-22 10:12:19 +0100642 if (!md->barrier_error && io_error != -EOPNOTSUPP)
Mikulas Patocka5aa27812009-06-22 10:12:18 +0100643 md->barrier_error = io_error;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100644 end_io_acct(io);
Mikulas Patockaa97f9252010-03-06 02:32:29 +0000645 free_io(md, io);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100646 } else {
647 end_io_acct(io);
Mikulas Patockaa97f9252010-03-06 02:32:29 +0000648 free_io(md, io);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000649
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100650 if (io_error != DM_ENDIO_REQUEUE) {
651 trace_block_bio_complete(md->queue, bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000652
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100653 bio_endio(bio, io_error);
654 }
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
657}
658
NeilBrown6712ecf2007-09-27 12:47:43 +0200659static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
661 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100662 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000663 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700664 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 dm_endio_fn endio = tio->ti->type->end_io;
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
668 error = -EIO;
669
670 if (endio) {
671 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800672 if (r < 0 || r == DM_ENDIO_REQUEUE)
673 /*
674 * error and requeue request are handled
675 * in dec_pending().
676 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800678 else if (r == DM_ENDIO_INCOMPLETE)
679 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200680 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800681 else if (r) {
682 DMWARN("unimplemented target endio return value: %d", r);
683 BUG();
684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 }
686
Stefan Bader9faf4002006-10-03 01:15:41 -0700687 /*
688 * Store md for cleanup instead of tio which is about to get freed.
689 */
690 bio->bi_private = md->bs;
691
Stefan Bader9faf4002006-10-03 01:15:41 -0700692 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000693 bio_put(bio);
694 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695}
696
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100697/*
698 * Partial completion handling for request-based dm
699 */
700static void end_clone_bio(struct bio *clone, int error)
701{
702 struct dm_rq_clone_bio_info *info = clone->bi_private;
703 struct dm_rq_target_io *tio = info->tio;
704 struct bio *bio = info->orig;
705 unsigned int nr_bytes = info->orig->bi_size;
706
707 bio_put(clone);
708
709 if (tio->error)
710 /*
711 * An error has already been detected on the request.
712 * Once error occurred, just let clone->end_io() handle
713 * the remainder.
714 */
715 return;
716 else if (error) {
717 /*
718 * Don't notice the error to the upper layer yet.
719 * The error handling decision is made by the target driver,
720 * when the request is completed.
721 */
722 tio->error = error;
723 return;
724 }
725
726 /*
727 * I/O for the bio successfully completed.
728 * Notice the data completion to the upper layer.
729 */
730
731 /*
732 * bios are processed from the head of the list.
733 * So the completing bio should always be rq->bio.
734 * If it's not, something wrong is happening.
735 */
736 if (tio->orig->bio != bio)
737 DMERR("bio completion is going in the middle of the request");
738
739 /*
740 * Update the original request.
741 * Do not use blk_end_request() here, because it may complete
742 * the original request before the clone, and break the ordering.
743 */
744 blk_update_request(tio->orig, 0, nr_bytes);
745}
746
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000747static void store_barrier_error(struct mapped_device *md, int error)
748{
749 unsigned long flags;
750
751 spin_lock_irqsave(&md->barrier_error_lock, flags);
752 /*
753 * Basically, the first error is taken, but:
754 * -EOPNOTSUPP supersedes any I/O error.
755 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
756 */
757 if (!md->barrier_error || error == -EOPNOTSUPP ||
758 (md->barrier_error != -EOPNOTSUPP &&
759 error == DM_ENDIO_REQUEUE))
760 md->barrier_error = error;
761 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
762}
763
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100764/*
765 * Don't touch any member of the md after calling this function because
766 * the md may be freed in dm_put() at the end of this function.
767 * Or do dm_get() before calling this function and dm_put() later.
768 */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000769static void rq_completed(struct mapped_device *md, int rw, int run_queue)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100770{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000771 atomic_dec(&md->pending[rw]);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100772
773 /* nudge anyone waiting on suspend queue */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000774 if (!md_in_flight(md))
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100775 wake_up(&md->wait);
776
777 if (run_queue)
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000778 blk_run_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100779
780 /*
781 * dm_put() must be at the end of this function. See the comment above
782 */
783 dm_put(md);
784}
785
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100786static void free_rq_clone(struct request *clone)
787{
788 struct dm_rq_target_io *tio = clone->end_io_data;
789
790 blk_rq_unprep_clone(clone);
791 free_rq_tio(tio);
792}
793
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000794/*
795 * Complete the clone and the original request.
796 * Must be called without queue lock.
797 */
798static void dm_end_request(struct request *clone, int error)
799{
800 int rw = rq_data_dir(clone);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000801 int run_queue = 1;
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200802 bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000803 struct dm_rq_target_io *tio = clone->end_io_data;
804 struct mapped_device *md = tio->md;
805 struct request *rq = tio->orig;
806
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200807 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000808 rq->errors = clone->errors;
809 rq->resid_len = clone->resid_len;
810
811 if (rq->sense)
812 /*
813 * We are using the sense buffer of the original
814 * request.
815 * So setting the length of the sense data is enough.
816 */
817 rq->sense_len = clone->sense_len;
818 }
819
820 free_rq_clone(clone);
821
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000822 if (unlikely(is_barrier)) {
823 if (unlikely(error))
824 store_barrier_error(md, error);
825 run_queue = 0;
826 } else
827 blk_end_request_all(rq, error);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000828
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000829 rq_completed(md, rw, run_queue);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000830}
831
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100832static void dm_unprep_request(struct request *rq)
833{
834 struct request *clone = rq->special;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100835
836 rq->special = NULL;
837 rq->cmd_flags &= ~REQ_DONTPREP;
838
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100839 free_rq_clone(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100840}
841
842/*
843 * Requeue the original request of a clone.
844 */
845void dm_requeue_unmapped_request(struct request *clone)
846{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000847 int rw = rq_data_dir(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100848 struct dm_rq_target_io *tio = clone->end_io_data;
849 struct mapped_device *md = tio->md;
850 struct request *rq = tio->orig;
851 struct request_queue *q = rq->q;
852 unsigned long flags;
853
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200854 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000855 /*
856 * Barrier clones share an original request.
857 * Leave it to dm_end_request(), which handles this special
858 * case.
859 */
860 dm_end_request(clone, DM_ENDIO_REQUEUE);
861 return;
862 }
863
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100864 dm_unprep_request(rq);
865
866 spin_lock_irqsave(q->queue_lock, flags);
867 if (elv_queue_empty(q))
868 blk_plug_device(q);
869 blk_requeue_request(q, rq);
870 spin_unlock_irqrestore(q->queue_lock, flags);
871
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000872 rq_completed(md, rw, 0);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100873}
874EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
875
876static void __stop_queue(struct request_queue *q)
877{
878 blk_stop_queue(q);
879}
880
881static void stop_queue(struct request_queue *q)
882{
883 unsigned long flags;
884
885 spin_lock_irqsave(q->queue_lock, flags);
886 __stop_queue(q);
887 spin_unlock_irqrestore(q->queue_lock, flags);
888}
889
890static void __start_queue(struct request_queue *q)
891{
892 if (blk_queue_stopped(q))
893 blk_start_queue(q);
894}
895
896static void start_queue(struct request_queue *q)
897{
898 unsigned long flags;
899
900 spin_lock_irqsave(q->queue_lock, flags);
901 __start_queue(q);
902 spin_unlock_irqrestore(q->queue_lock, flags);
903}
904
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000905static void dm_done(struct request *clone, int error, bool mapped)
906{
907 int r = error;
908 struct dm_rq_target_io *tio = clone->end_io_data;
909 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
910
911 if (mapped && rq_end_io)
912 r = rq_end_io(tio->ti, clone, error, &tio->info);
913
914 if (r <= 0)
915 /* The target wants to complete the I/O */
916 dm_end_request(clone, r);
917 else if (r == DM_ENDIO_INCOMPLETE)
918 /* The target will handle the I/O */
919 return;
920 else if (r == DM_ENDIO_REQUEUE)
921 /* The target wants to requeue the I/O */
922 dm_requeue_unmapped_request(clone);
923 else {
924 DMWARN("unimplemented target endio return value: %d", r);
925 BUG();
926 }
927}
928
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100929/*
930 * Request completion handler for request-based dm
931 */
932static void dm_softirq_done(struct request *rq)
933{
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000934 bool mapped = true;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100935 struct request *clone = rq->completion_data;
936 struct dm_rq_target_io *tio = clone->end_io_data;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100937
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000938 if (rq->cmd_flags & REQ_FAILED)
939 mapped = false;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100940
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000941 dm_done(clone, tio->error, mapped);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100942}
943
944/*
945 * Complete the clone and the original request with the error status
946 * through softirq context.
947 */
948static void dm_complete_request(struct request *clone, int error)
949{
950 struct dm_rq_target_io *tio = clone->end_io_data;
951 struct request *rq = tio->orig;
952
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200953 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000954 /*
955 * Barrier clones share an original request. So can't use
956 * softirq_done with the original.
957 * Pass the clone to dm_done() directly in this special case.
958 * It is safe (even if clone->q->queue_lock is held here)
959 * because there is no I/O dispatching during the completion
960 * of barrier clone.
961 */
962 dm_done(clone, error, true);
963 return;
964 }
965
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100966 tio->error = error;
967 rq->completion_data = clone;
968 blk_complete_request(rq);
969}
970
971/*
972 * Complete the not-mapped clone and the original request with the error status
973 * through softirq context.
974 * Target's rq_end_io() function isn't called.
975 * This may be used when the target's map_rq() function fails.
976 */
977void dm_kill_unmapped_request(struct request *clone, int error)
978{
979 struct dm_rq_target_io *tio = clone->end_io_data;
980 struct request *rq = tio->orig;
981
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200982 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000983 /*
984 * Barrier clones share an original request.
985 * Leave it to dm_end_request(), which handles this special
986 * case.
987 */
988 BUG_ON(error > 0);
989 dm_end_request(clone, error);
990 return;
991 }
992
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100993 rq->cmd_flags |= REQ_FAILED;
994 dm_complete_request(clone, error);
995}
996EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
997
998/*
999 * Called with the queue lock held
1000 */
1001static void end_clone_request(struct request *clone, int error)
1002{
1003 /*
1004 * For just cleaning up the information of the queue in which
1005 * the clone was dispatched.
1006 * The clone is *NOT* freed actually here because it is alloced from
1007 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1008 */
1009 __blk_put_request(clone->q, clone);
1010
1011 /*
1012 * Actual request completion is done in a softirq context which doesn't
1013 * hold the queue lock. Otherwise, deadlock could occur because:
1014 * - another request may be submitted by the upper level driver
1015 * of the stacking during the completion
1016 * - the submission which requires queue lock may be done
1017 * against this queue
1018 */
1019 dm_complete_request(clone, error);
1020}
1021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022static sector_t max_io_len(struct mapped_device *md,
1023 sector_t sector, struct dm_target *ti)
1024{
1025 sector_t offset = sector - ti->begin;
1026 sector_t len = ti->len - offset;
1027
1028 /*
1029 * Does the target need to split even further ?
1030 */
1031 if (ti->split_io) {
1032 sector_t boundary;
1033 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
1034 - offset;
1035 if (len > boundary)
1036 len = boundary;
1037 }
1038
1039 return len;
1040}
1041
1042static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001043 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044{
1045 int r;
Jens Axboe2056a782006-03-23 20:00:26 +01001046 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -07001047 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 clone->bi_end_io = clone_endio;
1050 clone->bi_private = tio;
1051
1052 /*
1053 * Map the clone. If r == 0 we don't need to do
1054 * anything, the target has assumed ownership of
1055 * this io.
1056 */
1057 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +01001058 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001060 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +01001062
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +01001063 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -04001064 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +01001065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001067 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1068 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -07001069 md = tio->io->md;
1070 dec_pending(tio->io, r);
1071 /*
1072 * Store bio_set for cleanup.
1073 */
1074 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -07001076 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001077 } else if (r) {
1078 DMWARN("unimplemented target map return value: %d", r);
1079 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 }
1081}
1082
1083struct clone_info {
1084 struct mapped_device *md;
1085 struct dm_table *map;
1086 struct bio *bio;
1087 struct dm_io *io;
1088 sector_t sector;
1089 sector_t sector_count;
1090 unsigned short idx;
1091};
1092
Peter Osterlund36763472005-09-06 15:16:42 -07001093static void dm_bio_destructor(struct bio *bio)
1094{
Stefan Bader9faf4002006-10-03 01:15:41 -07001095 struct bio_set *bs = bio->bi_private;
1096
1097 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001098}
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100/*
1101 * Creates a little bio that is just does part of a bvec.
1102 */
1103static struct bio *split_bvec(struct bio *bio, sector_t sector,
1104 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -07001105 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106{
1107 struct bio *clone;
1108 struct bio_vec *bv = bio->bi_io_vec + idx;
1109
Stefan Bader9faf4002006-10-03 01:15:41 -07001110 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001111 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 *clone->bi_io_vec = *bv;
1113
1114 clone->bi_sector = sector;
1115 clone->bi_bdev = bio->bi_bdev;
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001116 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 clone->bi_vcnt = 1;
1118 clone->bi_size = to_bytes(len);
1119 clone->bi_io_vec->bv_offset = offset;
1120 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +01001121 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Martin K. Petersen9c470082009-04-09 00:27:12 +01001123 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001124 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001125 bio_integrity_trim(clone,
1126 bio_sector_offset(bio, idx, offset), len);
1127 }
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 return clone;
1130}
1131
1132/*
1133 * Creates a bio that consists of range of complete bvecs.
1134 */
1135static struct bio *clone_bio(struct bio *bio, sector_t sector,
1136 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -07001137 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138{
1139 struct bio *clone;
1140
Stefan Bader9faf4002006-10-03 01:15:41 -07001141 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1142 __bio_clone(clone, bio);
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001143 clone->bi_rw &= ~REQ_HARDBARRIER;
Stefan Bader9faf4002006-10-03 01:15:41 -07001144 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 clone->bi_sector = sector;
1146 clone->bi_idx = idx;
1147 clone->bi_vcnt = idx + bv_count;
1148 clone->bi_size = to_bytes(len);
1149 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1150
Martin K. Petersen9c470082009-04-09 00:27:12 +01001151 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001152 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001153
1154 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1155 bio_integrity_trim(clone,
1156 bio_sector_offset(bio, idx, 0), len);
1157 }
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 return clone;
1160}
1161
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001162static struct dm_target_io *alloc_tio(struct clone_info *ci,
1163 struct dm_target *ti)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001164{
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001165 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001166
1167 tio->io = ci->io;
1168 tio->ti = ti;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001169 memset(&tio->info, 0, sizeof(tio->info));
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001170
1171 return tio;
1172}
1173
1174static void __flush_target(struct clone_info *ci, struct dm_target *ti,
1175 unsigned flush_nr)
1176{
1177 struct dm_target_io *tio = alloc_tio(ci, ti);
1178 struct bio *clone;
1179
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001180 tio->info.flush_request = flush_nr;
1181
1182 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1183 __bio_clone(clone, ci->bio);
1184 clone->bi_destructor = dm_bio_destructor;
1185
1186 __map_bio(ti, clone, tio);
1187}
1188
1189static int __clone_and_map_empty_barrier(struct clone_info *ci)
1190{
1191 unsigned target_nr = 0, flush_nr;
1192 struct dm_target *ti;
1193
1194 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1195 for (flush_nr = 0; flush_nr < ti->num_flush_requests;
1196 flush_nr++)
1197 __flush_target(ci, ti, flush_nr);
1198
1199 ci->sector_count = 0;
1200
1201 return 0;
1202}
1203
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001204static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205{
1206 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001207 struct dm_target *ti;
1208 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001209 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001211 if (unlikely(bio_empty_barrier(bio)))
1212 return __clone_and_map_empty_barrier(ci);
1213
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001214 ti = dm_table_find_target(ci->map, ci->sector);
1215 if (!dm_target_is_valid(ti))
1216 return -EIO;
1217
1218 max = max_io_len(ci->md, ci->sector, ti);
1219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 /*
1221 * Allocate a target io object.
1222 */
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001223 tio = alloc_tio(ci, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 if (ci->sector_count <= max) {
1226 /*
1227 * Optimise for the simple case where we can do all of
1228 * the remaining io with a single clone.
1229 */
1230 clone = clone_bio(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001231 bio->bi_vcnt - ci->idx, ci->sector_count,
1232 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 __map_bio(ti, clone, tio);
1234 ci->sector_count = 0;
1235
1236 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1237 /*
1238 * There are some bvecs that don't span targets.
1239 * Do as many of these as possible.
1240 */
1241 int i;
1242 sector_t remaining = max;
1243 sector_t bv_len;
1244
1245 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1246 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1247
1248 if (bv_len > remaining)
1249 break;
1250
1251 remaining -= bv_len;
1252 len += bv_len;
1253 }
1254
Stefan Bader9faf4002006-10-03 01:15:41 -07001255 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1256 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 __map_bio(ti, clone, tio);
1258
1259 ci->sector += len;
1260 ci->sector_count -= len;
1261 ci->idx = i;
1262
1263 } else {
1264 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001265 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 */
1267 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001268 sector_t remaining = to_sector(bv->bv_len);
1269 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001271 do {
1272 if (offset) {
1273 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001274 if (!dm_target_is_valid(ti))
1275 return -EIO;
1276
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001277 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001279 tio = alloc_tio(ci, ti);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001282 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001284 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001285 bv->bv_offset + offset, len,
1286 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001287
1288 __map_bio(ti, clone, tio);
1289
1290 ci->sector += len;
1291 ci->sector_count -= len;
1292 offset += to_bytes(len);
1293 } while (remaining -= len);
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 ci->idx++;
1296 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001297
1298 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299}
1300
1301/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +01001302 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001304static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305{
1306 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001307 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001309 ci.map = dm_get_live_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001310 if (unlikely(!ci.map)) {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001311 if (!(bio->bi_rw & REQ_HARDBARRIER))
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001312 bio_io_error(bio);
1313 else
Mikulas Patocka5aa27812009-06-22 10:12:18 +01001314 if (!md->barrier_error)
1315 md->barrier_error = -EIO;
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001316 return;
1317 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +01001318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 ci.md = md;
1320 ci.bio = bio;
1321 ci.io = alloc_io(md);
1322 ci.io->error = 0;
1323 atomic_set(&ci.io->io_count, 1);
1324 ci.io->bio = bio;
1325 ci.io->md = md;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +01001326 spin_lock_init(&ci.io->endio_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 ci.sector = bio->bi_sector;
1328 ci.sector_count = bio_sectors(bio);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001329 if (unlikely(bio_empty_barrier(bio)))
1330 ci.sector_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 ci.idx = bio->bi_idx;
1332
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -08001333 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001334 while (ci.sector_count && !error)
1335 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
1337 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001338 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 dm_table_put(ci.map);
1340}
1341/*-----------------------------------------------------------------
1342 * CRUD END
1343 *---------------------------------------------------------------*/
1344
Milan Brozf6fccb12008-07-21 12:00:37 +01001345static int dm_merge_bvec(struct request_queue *q,
1346 struct bvec_merge_data *bvm,
1347 struct bio_vec *biovec)
1348{
1349 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001350 struct dm_table *map = dm_get_live_table(md);
Milan Brozf6fccb12008-07-21 12:00:37 +01001351 struct dm_target *ti;
1352 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +01001353 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001354
1355 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +01001356 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +01001357
1358 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001359 if (!dm_target_is_valid(ti))
1360 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +01001361
1362 /*
1363 * Find maximum amount of I/O that won't need splitting
1364 */
1365 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1366 (sector_t) BIO_MAX_SECTORS);
1367 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1368 if (max_size < 0)
1369 max_size = 0;
1370
1371 /*
1372 * merge_bvec_fn() returns number of bytes
1373 * it can accept at this offset
1374 * max is precomputed maximal io size
1375 */
1376 if (max_size && ti->type->merge)
1377 max_size = ti->type->merge(ti, bvm, biovec, max_size);
Mikulas Patocka8cbeb672009-06-22 10:12:14 +01001378 /*
1379 * If the target doesn't support merge method and some of the devices
1380 * provided their merge_bvec method (we know this by looking at
1381 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1382 * entries. So always set max_size to 0, and the code below allows
1383 * just one page.
1384 */
1385 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1386
1387 max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001388
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001389out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +01001390 dm_table_put(map);
1391
1392out:
Milan Brozf6fccb12008-07-21 12:00:37 +01001393 /*
1394 * Always allow an entire first page
1395 */
1396 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1397 max_size = biovec->bv_len;
1398
Milan Brozf6fccb12008-07-21 12:00:37 +01001399 return max_size;
1400}
1401
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402/*
1403 * The request function that just remaps the bio built up by
1404 * dm_merge_bvec.
1405 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001406static int _dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
Kevin Corry12f03a42006-02-01 03:04:52 -08001408 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +09001410 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001412 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Tejun Heo074a7ac2008-08-25 19:56:14 +09001414 cpu = part_stat_lock();
1415 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1416 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1417 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -08001418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001420 * If we're suspended or the thread is processing barriers
1421 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 */
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001423 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001424 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001425 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +01001427 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1428 bio_rw(bio) == READA) {
1429 bio_io_error(bio);
1430 return 0;
1431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Mikulas Patocka92c63902009-04-09 00:27:15 +01001433 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
Mikulas Patocka92c63902009-04-09 00:27:15 +01001435 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 }
1437
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001438 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001439 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001440 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441}
1442
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001443static int dm_make_request(struct request_queue *q, struct bio *bio)
1444{
1445 struct mapped_device *md = q->queuedata;
1446
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001447 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1448}
1449
1450static int dm_request_based(struct mapped_device *md)
1451{
1452 return blk_queue_stackable(md->queue);
1453}
1454
1455static int dm_request(struct request_queue *q, struct bio *bio)
1456{
1457 struct mapped_device *md = q->queuedata;
1458
1459 if (dm_request_based(md))
1460 return dm_make_request(q, bio);
1461
1462 return _dm_request(q, bio);
1463}
1464
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001465static bool dm_rq_is_flush_request(struct request *rq)
1466{
FUJITA Tomonori144d6ed2010-07-03 17:45:37 +09001467 if (rq->cmd_flags & REQ_FLUSH)
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001468 return true;
1469 else
1470 return false;
1471}
1472
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001473void dm_dispatch_request(struct request *rq)
1474{
1475 int r;
1476
1477 if (blk_queue_io_stat(rq->q))
1478 rq->cmd_flags |= REQ_IO_STAT;
1479
1480 rq->start_time = jiffies;
1481 r = blk_insert_cloned_request(rq->q, rq);
1482 if (r)
1483 dm_complete_request(rq, r);
1484}
1485EXPORT_SYMBOL_GPL(dm_dispatch_request);
1486
1487static void dm_rq_bio_destructor(struct bio *bio)
1488{
1489 struct dm_rq_clone_bio_info *info = bio->bi_private;
1490 struct mapped_device *md = info->tio->md;
1491
1492 free_bio_info(info);
1493 bio_free(bio, md->bs);
1494}
1495
1496static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1497 void *data)
1498{
1499 struct dm_rq_target_io *tio = data;
1500 struct mapped_device *md = tio->md;
1501 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1502
1503 if (!info)
1504 return -ENOMEM;
1505
1506 info->orig = bio_orig;
1507 info->tio = tio;
1508 bio->bi_end_io = end_clone_bio;
1509 bio->bi_private = info;
1510 bio->bi_destructor = dm_rq_bio_destructor;
1511
1512 return 0;
1513}
1514
1515static int setup_clone(struct request *clone, struct request *rq,
1516 struct dm_rq_target_io *tio)
1517{
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001518 int r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001519
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001520 if (dm_rq_is_flush_request(rq)) {
1521 blk_rq_init(NULL, clone);
1522 clone->cmd_type = REQ_TYPE_FS;
1523 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1524 } else {
1525 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1526 dm_rq_bio_constructor, tio);
1527 if (r)
1528 return r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001529
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001530 clone->cmd = rq->cmd;
1531 clone->cmd_len = rq->cmd_len;
1532 clone->sense = rq->sense;
1533 clone->buffer = rq->buffer;
1534 }
1535
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001536 clone->end_io = end_clone_request;
1537 clone->end_io_data = tio;
1538
1539 return 0;
1540}
1541
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001542static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1543 gfp_t gfp_mask)
1544{
1545 struct request *clone;
1546 struct dm_rq_target_io *tio;
1547
1548 tio = alloc_rq_tio(md, gfp_mask);
1549 if (!tio)
1550 return NULL;
1551
1552 tio->md = md;
1553 tio->ti = NULL;
1554 tio->orig = rq;
1555 tio->error = 0;
1556 memset(&tio->info, 0, sizeof(tio->info));
1557
1558 clone = &tio->clone;
1559 if (setup_clone(clone, rq, tio)) {
1560 /* -ENOMEM */
1561 free_rq_tio(tio);
1562 return NULL;
1563 }
1564
1565 return clone;
1566}
1567
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001568/*
1569 * Called with the queue lock held.
1570 */
1571static int dm_prep_fn(struct request_queue *q, struct request *rq)
1572{
1573 struct mapped_device *md = q->queuedata;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001574 struct request *clone;
1575
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001576 if (unlikely(dm_rq_is_flush_request(rq)))
1577 return BLKPREP_OK;
1578
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001579 if (unlikely(rq->special)) {
1580 DMWARN("Already has something in rq->special.");
1581 return BLKPREP_KILL;
1582 }
1583
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001584 clone = clone_rq(rq, md, GFP_ATOMIC);
1585 if (!clone)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001586 return BLKPREP_DEFER;
1587
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001588 rq->special = clone;
1589 rq->cmd_flags |= REQ_DONTPREP;
1590
1591 return BLKPREP_OK;
1592}
1593
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001594/*
1595 * Returns:
1596 * 0 : the request has been processed (not requeued)
1597 * !0 : the request has been requeued
1598 */
1599static int map_request(struct dm_target *ti, struct request *clone,
1600 struct mapped_device *md)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001601{
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001602 int r, requeued = 0;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001603 struct dm_rq_target_io *tio = clone->end_io_data;
1604
1605 /*
1606 * Hold the md reference here for the in-flight I/O.
1607 * We can't rely on the reference count by device opener,
1608 * because the device may be closed during the request completion
1609 * when all bios are completed.
1610 * See the comment in rq_completed() too.
1611 */
1612 dm_get(md);
1613
1614 tio->ti = ti;
1615 r = ti->type->map_rq(ti, clone, &tio->info);
1616 switch (r) {
1617 case DM_MAPIO_SUBMITTED:
1618 /* The target has taken the I/O to submit by itself later */
1619 break;
1620 case DM_MAPIO_REMAPPED:
1621 /* The target has remapped the I/O so dispatch it */
Jun'ichi Nomura6db4ccd2009-12-10 23:52:25 +00001622 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1623 blk_rq_pos(tio->orig));
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001624 dm_dispatch_request(clone);
1625 break;
1626 case DM_MAPIO_REQUEUE:
1627 /* The target wants to requeue the I/O */
1628 dm_requeue_unmapped_request(clone);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001629 requeued = 1;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001630 break;
1631 default:
1632 if (r > 0) {
1633 DMWARN("unimplemented target map return value: %d", r);
1634 BUG();
1635 }
1636
1637 /* The target wants to complete the I/O */
1638 dm_kill_unmapped_request(clone, r);
1639 break;
1640 }
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001641
1642 return requeued;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001643}
1644
1645/*
1646 * q->request_fn for request-based dm.
1647 * Called with the queue lock held.
1648 */
1649static void dm_request_fn(struct request_queue *q)
1650{
1651 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001652 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001653 struct dm_target *ti;
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001654 struct request *rq, *clone;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001655
1656 /*
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001657 * For suspend, check blk_queue_stopped() and increment
1658 * ->pending within a single queue_lock not to increment the
1659 * number of in-flight I/Os after the queue is stopped in
1660 * dm_suspend().
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001661 */
1662 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1663 rq = blk_peek_request(q);
1664 if (!rq)
1665 goto plug_and_out;
1666
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001667 if (unlikely(dm_rq_is_flush_request(rq))) {
1668 BUG_ON(md->flush_request);
1669 md->flush_request = rq;
1670 blk_start_request(rq);
1671 queue_work(md->wq, &md->barrier_work);
1672 goto out;
1673 }
1674
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001675 ti = dm_table_find_target(map, blk_rq_pos(rq));
1676 if (ti->type->busy && ti->type->busy(ti))
1677 goto plug_and_out;
1678
1679 blk_start_request(rq);
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001680 clone = rq->special;
1681 atomic_inc(&md->pending[rq_data_dir(clone)]);
1682
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001683 spin_unlock(q->queue_lock);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001684 if (map_request(ti, clone, md))
1685 goto requeued;
1686
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001687 spin_lock_irq(q->queue_lock);
1688 }
1689
1690 goto out;
1691
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001692requeued:
1693 spin_lock_irq(q->queue_lock);
1694
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001695plug_and_out:
1696 if (!elv_queue_empty(q))
1697 /* Some requests still remain, retry later */
1698 blk_plug_device(q);
1699
1700out:
1701 dm_table_put(map);
1702
1703 return;
1704}
1705
1706int dm_underlying_device_busy(struct request_queue *q)
1707{
1708 return blk_lld_busy(q);
1709}
1710EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1711
1712static int dm_lld_busy(struct request_queue *q)
1713{
1714 int r;
1715 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001716 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001717
1718 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1719 r = 1;
1720 else
1721 r = dm_table_any_busy_target(map);
1722
1723 dm_table_put(map);
1724
1725 return r;
1726}
1727
Jens Axboe165125e2007-07-24 09:28:11 +02001728static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729{
1730 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001731 struct dm_table *map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
1733 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001734 if (dm_request_based(md))
1735 generic_unplug_device(q);
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 dm_table_unplug_all(map);
1738 dm_table_put(map);
1739 }
1740}
1741
1742static int dm_any_congested(void *congested_data, int bdi_bits)
1743{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001744 int r = bdi_bits;
1745 struct mapped_device *md = congested_data;
1746 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001748 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001749 map = dm_get_live_table(md);
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001750 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001751 /*
1752 * Request-based dm cares about only own queue for
1753 * the query about congestion status of request_queue
1754 */
1755 if (dm_request_based(md))
1756 r = md->queue->backing_dev_info.state &
1757 bdi_bits;
1758 else
1759 r = dm_table_any_congested(map, bdi_bits);
1760
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001761 dm_table_put(map);
1762 }
1763 }
1764
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 return r;
1766}
1767
1768/*-----------------------------------------------------------------
1769 * An IDR is used to keep track of allocated minor numbers.
1770 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771static DEFINE_IDR(_minor_idr);
1772
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001773static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001775 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001777 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778}
1779
1780/*
1781 * See if the device with a specific minor # is free.
1782 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001783static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
1785 int r, m;
1786
1787 if (minor >= (1 << MINORBITS))
1788 return -EINVAL;
1789
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001790 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1791 if (!r)
1792 return -ENOMEM;
1793
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001794 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
1796 if (idr_find(&_minor_idr, minor)) {
1797 r = -EBUSY;
1798 goto out;
1799 }
1800
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001801 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001802 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
1805 if (m != minor) {
1806 idr_remove(&_minor_idr, m);
1807 r = -EBUSY;
1808 goto out;
1809 }
1810
1811out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001812 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 return r;
1814}
1815
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001816static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001818 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001821 if (!r)
1822 return -ENOMEM;
1823
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001824 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001826 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001827 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
1830 if (m >= (1 << MINORBITS)) {
1831 idr_remove(&_minor_idr, m);
1832 r = -ENOSPC;
1833 goto out;
1834 }
1835
1836 *minor = m;
1837
1838out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001839 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 return r;
1841}
1842
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001843static const struct block_device_operations dm_blk_dops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Mikulas Patocka53d59142009-04-02 19:55:37 +01001845static void dm_wq_work(struct work_struct *work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001846static void dm_rq_barrier_work(struct work_struct *work);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848/*
1849 * Allocate and initialise a blank device with a given minor.
1850 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001851static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852{
1853 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001854 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001855 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
1857 if (!md) {
1858 DMWARN("unable to allocate device, out of memory.");
1859 return NULL;
1860 }
1861
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001862 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001863 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001866 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001867 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001868 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001869 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001871 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001873 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001874 mutex_init(&md->suspend_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001875 spin_lock_init(&md->deferred_lock);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001876 spin_lock_init(&md->barrier_error_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 rwlock_init(&md->map_lock);
1878 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001879 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001881 atomic_set(&md->uevent_seq, 0);
1882 INIT_LIST_HEAD(&md->uevent_list);
1883 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001885 md->queue = blk_init_queue(dm_request_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001887 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001889 /*
1890 * Request-based dm devices cannot be stacked on top of bio-based dm
1891 * devices. The type of this dm device has not been decided yet,
1892 * although we initialized the queue using blk_init_queue().
1893 * The type is decided at the first table loading time.
1894 * To prevent problematic device stacking, clear the queue flag
1895 * for request stacking support until then.
1896 *
1897 * This queue is new, so no concurrency on the queue_flags.
1898 */
1899 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1900 md->saved_make_request_fn = md->queue->make_request_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 md->queue->queuedata = md;
1902 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1903 md->queue->backing_dev_info.congested_data = md;
1904 blk_queue_make_request(md->queue, dm_request);
Jens Axboedaef2652006-01-10 10:48:02 +01001905 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 md->queue->unplug_fn = dm_unplug_all;
Milan Brozf6fccb12008-07-21 12:00:37 +01001907 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001908 blk_queue_softirq_done(md->queue, dm_softirq_done);
1909 blk_queue_prep_rq(md->queue, dm_prep_fn);
1910 blk_queue_lld_busy(md->queue, dm_lld_busy);
FUJITA Tomonori00fff262010-07-03 17:45:40 +09001911 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
Stefan Bader9faf4002006-10-03 01:15:41 -07001912
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 md->disk = alloc_disk(1);
1914 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001915 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02001917 atomic_set(&md->pending[0], 0);
1918 atomic_set(&md->pending[1], 0);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001919 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001920 INIT_WORK(&md->work, dm_wq_work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001921 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001922 init_waitqueue_head(&md->eventq);
1923
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 md->disk->major = _major;
1925 md->disk->first_minor = minor;
1926 md->disk->fops = &dm_blk_dops;
1927 md->disk->queue = md->queue;
1928 md->disk->private_data = md;
1929 sprintf(md->disk->disk_name, "dm-%d", minor);
1930 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001931 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
Milan Broz304f3f62008-02-08 02:11:17 +00001933 md->wq = create_singlethread_workqueue("kdmflush");
1934 if (!md->wq)
1935 goto bad_thread;
1936
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001937 md->bdev = bdget_disk(md->disk, 0);
1938 if (!md->bdev)
1939 goto bad_bdev;
1940
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001941 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001942 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001943 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001944 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001945
1946 BUG_ON(old_md != MINOR_ALLOCED);
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 return md;
1949
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001950bad_bdev:
1951 destroy_workqueue(md->wq);
Milan Broz304f3f62008-02-08 02:11:17 +00001952bad_thread:
Zdenek Kabelac03022c52009-10-16 23:18:15 +01001953 del_gendisk(md->disk);
Milan Broz304f3f62008-02-08 02:11:17 +00001954 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001955bad_disk:
Al Viro1312f402006-03-12 11:02:03 -05001956 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001957bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001959bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001960 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001961bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 kfree(md);
1963 return NULL;
1964}
1965
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001966static void unlock_fs(struct mapped_device *md);
1967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968static void free_dev(struct mapped_device *md)
1969{
Tejun Heof331c022008-09-03 09:01:48 +02001970 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001971
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001972 unlock_fs(md);
1973 bdput(md->bdev);
Milan Broz304f3f62008-02-08 02:11:17 +00001974 destroy_workqueue(md->wq);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001975 if (md->tio_pool)
1976 mempool_destroy(md->tio_pool);
1977 if (md->io_pool)
1978 mempool_destroy(md->io_pool);
1979 if (md->bs)
1980 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001981 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001983 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001984
1985 spin_lock(&_minor_lock);
1986 md->disk->private_data = NULL;
1987 spin_unlock(&_minor_lock);
1988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001990 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001991 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 kfree(md);
1993}
1994
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01001995static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1996{
1997 struct dm_md_mempools *p;
1998
1999 if (md->io_pool && md->tio_pool && md->bs)
2000 /* the md already has necessary mempools */
2001 goto out;
2002
2003 p = dm_table_get_md_mempools(t);
2004 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2005
2006 md->io_pool = p->io_pool;
2007 p->io_pool = NULL;
2008 md->tio_pool = p->tio_pool;
2009 p->tio_pool = NULL;
2010 md->bs = p->bs;
2011 p->bs = NULL;
2012
2013out:
2014 /* mempool bind completed, now no need any mempools in the table */
2015 dm_table_free_md_mempools(t);
2016}
2017
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018/*
2019 * Bind a table to the device.
2020 */
2021static void event_callback(void *context)
2022{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002023 unsigned long flags;
2024 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 struct mapped_device *md = (struct mapped_device *) context;
2026
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002027 spin_lock_irqsave(&md->uevent_lock, flags);
2028 list_splice_init(&md->uevent_list, &uevents);
2029 spin_unlock_irqrestore(&md->uevent_lock, flags);
2030
Tejun Heoed9e1982008-08-25 19:56:05 +09002031 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002032
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 atomic_inc(&md->event_nr);
2034 wake_up(&md->eventq);
2035}
2036
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002037static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038{
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002039 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002041 mutex_lock(&md->bdev->bd_inode->i_mutex);
2042 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2043 mutex_unlock(&md->bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044}
2045
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002046/*
2047 * Returns old map, which caller must destroy.
2048 */
2049static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2050 struct queue_limits *limits)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002052 struct dm_table *old_map;
Jens Axboe165125e2007-07-24 09:28:11 +02002053 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 sector_t size;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002055 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
2057 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002058
2059 /*
2060 * Wipe any geometry if the size of the table changed.
2061 */
2062 if (size != get_capacity(md->disk))
2063 memset(&md->geometry, 0, sizeof(md->geometry));
2064
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002065 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002067 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002068
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002069 /*
2070 * The queue hasn't been stopped yet, if the old table type wasn't
2071 * for request-based during suspension. So stop it to prevent
2072 * I/O mapping before resume.
2073 * This must be done before setting the queue restrictions,
2074 * because request-based dm may be run just after the setting.
2075 */
2076 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2077 stop_queue(q);
2078
2079 __bind_mempools(md, t);
2080
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002081 write_lock_irqsave(&md->map_lock, flags);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002082 old_map = md->map;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002083 md->map = t;
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002084 dm_table_set_restrictions(t, q, limits);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002085 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002086
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002087 return old_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088}
2089
Alasdair G Kergona7940152009-12-10 23:52:23 +00002090/*
2091 * Returns unbound table for the caller to free.
2092 */
2093static struct dm_table *__unbind(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
2095 struct dm_table *map = md->map;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002096 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
2098 if (!map)
Alasdair G Kergona7940152009-12-10 23:52:23 +00002099 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
2101 dm_table_event_callback(map, NULL, NULL);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002102 write_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 md->map = NULL;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002104 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002105
2106 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
2108
2109/*
2110 * Constructor for a new device.
2111 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002112int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
2114 struct mapped_device *md;
2115
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002116 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 if (!md)
2118 return -ENXIO;
2119
Milan Broz784aae72009-01-06 03:05:12 +00002120 dm_sysfs_init(md);
2121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 *result = md;
2123 return 0;
2124}
2125
David Teigland637842c2006-01-06 00:20:00 -08002126static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127{
2128 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 unsigned minor = MINOR(dev);
2130
2131 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2132 return NULL;
2133
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002134 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
2136 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002137 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02002138 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Kiyoshi Uedaabdc5682010-08-12 04:13:54 +01002139 dm_deleting_md(md) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07002140 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08002141 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002142 goto out;
2143 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002145out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002146 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
David Teigland637842c2006-01-06 00:20:00 -08002148 return md;
2149}
2150
David Teiglandd229a952006-01-06 00:20:01 -08002151struct mapped_device *dm_get_md(dev_t dev)
2152{
2153 struct mapped_device *md = dm_find_md(dev);
2154
2155 if (md)
2156 dm_get(md);
2157
2158 return md;
2159}
2160
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002161void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08002162{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002163 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164}
2165
2166void dm_set_mdptr(struct mapped_device *md, void *ptr)
2167{
2168 md->interface_ptr = ptr;
2169}
2170
2171void dm_get(struct mapped_device *md)
2172{
2173 atomic_inc(&md->holders);
2174}
2175
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002176const char *dm_device_name(struct mapped_device *md)
2177{
2178 return md->name;
2179}
2180EXPORT_SYMBOL_GPL(dm_device_name);
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182void dm_put(struct mapped_device *md)
2183{
Mike Anderson1134e5a2006-03-27 01:17:54 -08002184 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002186 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2187
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002188 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002189 map = dm_get_live_table(md);
Tejun Heof331c022008-09-03 09:01:48 +02002190 idr_replace(&_minor_idr, MINOR_ALLOCED,
2191 MINOR(disk_devt(dm_disk(md))));
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002192 set_bit(DMF_FREEING, &md->flags);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002193 spin_unlock(&_minor_lock);
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002194 if (!dm_suspended_md(md)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 dm_table_presuspend_targets(map);
2196 dm_table_postsuspend_targets(map);
2197 }
Milan Broz784aae72009-01-06 03:05:12 +00002198 dm_sysfs_exit(md);
Mike Anderson1134e5a2006-03-27 01:17:54 -08002199 dm_table_put(map);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002200 dm_table_destroy(__unbind(md));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 free_dev(md);
2202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203}
Edward Goggin79eb8852007-05-09 02:32:56 -07002204EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Mikulas Patocka401600d2009-04-02 19:55:38 +01002206static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00002207{
2208 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002209 DECLARE_WAITQUEUE(wait, current);
2210
2211 dm_unplug_all(md->queue);
2212
2213 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00002214
2215 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01002216 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00002217
2218 smp_mb();
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00002219 if (!md_in_flight(md))
Milan Broz46125c12008-02-08 02:10:30 +00002220 break;
2221
Mikulas Patocka401600d2009-04-02 19:55:38 +01002222 if (interruptible == TASK_INTERRUPTIBLE &&
2223 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00002224 r = -EINTR;
2225 break;
2226 }
2227
2228 io_schedule();
2229 }
2230 set_current_state(TASK_RUNNING);
2231
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002232 remove_wait_queue(&md->wait, &wait);
2233
Milan Broz46125c12008-02-08 02:10:30 +00002234 return r;
2235}
2236
Mikulas Patocka531fe962009-06-22 10:12:17 +01002237static void dm_flush(struct mapped_device *md)
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002238{
2239 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patocka52b1fd52009-06-22 10:12:21 +01002240
2241 bio_init(&md->barrier_bio);
2242 md->barrier_bio.bi_bdev = md->bdev;
2243 md->barrier_bio.bi_rw = WRITE_BARRIER;
2244 __split_and_process_bio(md, &md->barrier_bio);
2245
2246 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002247}
2248
2249static void process_barrier(struct mapped_device *md, struct bio *bio)
2250{
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002251 md->barrier_error = 0;
2252
Mikulas Patocka531fe962009-06-22 10:12:17 +01002253 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002254
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002255 if (!bio_empty_barrier(bio)) {
2256 __split_and_process_bio(md, bio);
2257 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002258 }
2259
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002260 if (md->barrier_error != DM_ENDIO_REQUEUE)
Mikulas Patocka531fe962009-06-22 10:12:17 +01002261 bio_endio(bio, md->barrier_error);
Mikulas Patocka2761e952009-06-22 10:12:18 +01002262 else {
2263 spin_lock_irq(&md->deferred_lock);
2264 bio_list_add_head(&md->deferred, bio);
2265 spin_unlock_irq(&md->deferred_lock);
2266 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002267}
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269/*
2270 * Process the deferred bios
2271 */
Mikulas Patockaef208582009-04-02 19:55:38 +01002272static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273{
Mikulas Patockaef208582009-04-02 19:55:38 +01002274 struct mapped_device *md = container_of(work, struct mapped_device,
2275 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002276 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
Mikulas Patockaef208582009-04-02 19:55:38 +01002278 down_write(&md->io_lock);
2279
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002280 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002281 spin_lock_irq(&md->deferred_lock);
2282 c = bio_list_pop(&md->deferred);
2283 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002284
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002285 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002286 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002287 break;
2288 }
2289
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002290 up_write(&md->io_lock);
2291
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002292 if (dm_request_based(md))
2293 generic_make_request(c);
2294 else {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02002295 if (c->bi_rw & REQ_HARDBARRIER)
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002296 process_barrier(md, c);
2297 else
2298 __split_and_process_bio(md, c);
2299 }
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002300
2301 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002302 }
Milan Broz73d410c2008-02-08 02:10:25 +00002303
Mikulas Patockaef208582009-04-02 19:55:38 +01002304 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305}
2306
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002307static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00002308{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002309 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2310 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01002311 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00002312}
2313
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002314static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
2315{
2316 struct dm_rq_target_io *tio = clone->end_io_data;
2317
2318 tio->info.flush_request = flush_nr;
2319}
2320
2321/* Issue barrier requests to targets and wait for their completion. */
2322static int dm_rq_barrier(struct mapped_device *md)
2323{
2324 int i, j;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002325 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002326 unsigned num_targets = dm_table_get_num_targets(map);
2327 struct dm_target *ti;
2328 struct request *clone;
2329
2330 md->barrier_error = 0;
2331
2332 for (i = 0; i < num_targets; i++) {
2333 ti = dm_table_get_target(map, i);
2334 for (j = 0; j < ti->num_flush_requests; j++) {
2335 clone = clone_rq(md->flush_request, md, GFP_NOIO);
2336 dm_rq_set_flush_nr(clone, j);
2337 atomic_inc(&md->pending[rq_data_dir(clone)]);
2338 map_request(ti, clone, md);
2339 }
2340 }
2341
2342 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2343 dm_table_put(map);
2344
2345 return md->barrier_error;
2346}
2347
2348static void dm_rq_barrier_work(struct work_struct *work)
2349{
2350 int error;
2351 struct mapped_device *md = container_of(work, struct mapped_device,
2352 barrier_work);
2353 struct request_queue *q = md->queue;
2354 struct request *rq;
2355 unsigned long flags;
2356
2357 /*
2358 * Hold the md reference here and leave it at the last part so that
2359 * the md can't be deleted by device opener when the barrier request
2360 * completes.
2361 */
2362 dm_get(md);
2363
2364 error = dm_rq_barrier(md);
2365
2366 rq = md->flush_request;
2367 md->flush_request = NULL;
2368
2369 if (error == DM_ENDIO_REQUEUE) {
2370 spin_lock_irqsave(q->queue_lock, flags);
2371 blk_requeue_request(q, rq);
2372 spin_unlock_irqrestore(q->queue_lock, flags);
2373 } else
2374 blk_end_request_all(rq, error);
2375
2376 blk_run_queue(q);
2377
2378 dm_put(md);
2379}
2380
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381/*
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002382 * Swap in a new table, returning the old one for the caller to destroy.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 */
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002384struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002386 struct dm_table *map = ERR_PTR(-EINVAL);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002387 struct queue_limits limits;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002388 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
Daniel Walkere61290a2008-02-08 02:10:08 +00002390 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392 /* device must be suspended */
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002393 if (!dm_suspended_md(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002394 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002396 r = dm_calculate_queue_limits(table, &limits);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002397 if (r) {
2398 map = ERR_PTR(r);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002399 goto out;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002400 }
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002401
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002402 /* cannot change the device type, once a table is bound */
2403 if (md->map &&
2404 (dm_table_get_type(md->map) != dm_table_get_type(table))) {
2405 DMWARN("can't change the device type after a table is bound");
2406 goto out;
2407 }
2408
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002409 map = __bind(md, table, &limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002411out:
Daniel Walkere61290a2008-02-08 02:10:08 +00002412 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002413 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414}
2415
2416/*
2417 * Functions to lock and unlock any filesystem running on the
2418 * device.
2419 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002420static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002422 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
2424 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002425
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002426 md->frozen_sb = freeze_bdev(md->bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002427 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002428 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002429 md->frozen_sb = NULL;
2430 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002431 }
2432
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002433 set_bit(DMF_FROZEN, &md->flags);
2434
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 return 0;
2436}
2437
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002438static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002440 if (!test_bit(DMF_FROZEN, &md->flags))
2441 return;
2442
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002443 thaw_bdev(md->bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002445 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446}
2447
2448/*
2449 * We need to be able to change a mapping table under a mounted
2450 * filesystem. For example we might want to move some data in
2451 * the background. Before the table can be swapped with
2452 * dm_bind_table, dm_suspend must be called to flush any in
2453 * flight bios and ensure that any further io gets deferred.
2454 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002455/*
2456 * Suspend mechanism in request-based dm.
2457 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002458 * 1. Flush all I/Os by lock_fs() if needed.
2459 * 2. Stop dispatching any I/O by stopping the request_queue.
2460 * 3. Wait for all in-flight I/Os to be completed or requeued.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002461 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002462 * To abort suspend, start the request_queue.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002463 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002464int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002466 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00002467 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002468 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002469 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
Daniel Walkere61290a2008-02-08 02:10:08 +00002471 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002472
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002473 if (dm_suspended_md(md)) {
Milan Broz73d410c2008-02-08 02:10:25 +00002474 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08002475 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00002476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002478 map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002480 /*
2481 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2482 * This flag is cleared before dm_suspend returns.
2483 */
2484 if (noflush)
2485 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2486
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002487 /* This does not get reverted if there's an error later. */
2488 dm_table_presuspend_targets(map);
2489
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002490 /*
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002491 * Flush I/O to the device.
2492 * Any I/O submitted after lock_fs() may not be flushed.
2493 * noflush takes precedence over do_lockfs.
2494 * (lock_fs() flushes I/Os and waits for them to complete.)
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002495 */
2496 if (!noflush && do_lockfs) {
2497 r = lock_fs(md);
2498 if (r)
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01002499 goto out;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002500 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
2502 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002503 * Here we must make sure that no processes are submitting requests
2504 * to target drivers i.e. no one may be executing
2505 * __split_and_process_bio. This is called from dm_request and
2506 * dm_wq_work.
2507 *
2508 * To get all processes out of __split_and_process_bio in dm_request,
2509 * we take the write lock. To prevent any process from reentering
2510 * __split_and_process_bio from dm_request, we set
2511 * DMF_QUEUE_IO_TO_THREAD.
2512 *
2513 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2514 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2515 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2516 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002518 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002519 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2520 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002521 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002523 /*
2524 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2525 * can be kicked until md->queue is stopped. So stop md->queue before
2526 * flushing md->wq.
2527 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002528 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002529 stop_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002530
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002531 flush_workqueue(md->wq);
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002534 * At this point no more requests are entering target request routines.
2535 * We call dm_wait_for_completion to wait for all existing requests
2536 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01002538 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002540 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002541 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01002542 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00002543 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00002546 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002547 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00002548
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002549 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002550 start_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002551
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002552 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002553 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002554 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002555
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002556 /*
2557 * If dm_wait_for_completion returned 0, the device is completely
2558 * quiescent now. There is no request-processing activity. All new
2559 * requests are being added to md->deferred list.
2560 */
2561
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 set_bit(DMF_SUSPENDED, &md->flags);
2563
Kiyoshi Ueda4d4471c2009-12-10 23:52:26 +00002564 dm_table_postsuspend_targets(map);
2565
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002566out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08002568
2569out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00002570 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002571 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572}
2573
2574int dm_resume(struct mapped_device *md)
2575{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002576 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002577 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578
Daniel Walkere61290a2008-02-08 02:10:08 +00002579 mutex_lock(&md->suspend_lock);
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002580 if (!dm_suspended_md(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002581 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002582
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002583 map = dm_get_live_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002584 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002585 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
Milan Broz8757b772006-10-03 01:15:36 -07002587 r = dm_table_resume_targets(map);
2588 if (r)
2589 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002590
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002591 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002592
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002593 /*
2594 * Flushing deferred I/Os must be done after targets are resumed
2595 * so that mapping of targets can work correctly.
2596 * Request-based dm is queueing the deferred I/Os in its request_queue.
2597 */
2598 if (dm_request_based(md))
2599 start_queue(md->queue);
2600
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002601 unlock_fs(md);
2602
2603 clear_bit(DMF_SUSPENDED, &md->flags);
2604
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 dm_table_unplug_all(map);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002606 r = 0;
2607out:
2608 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00002609 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002610
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002611 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612}
2613
2614/*-----------------------------------------------------------------
2615 * Event notification.
2616 *---------------------------------------------------------------*/
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002617int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
Milan Broz60935eb2009-06-22 10:12:30 +01002618 unsigned cookie)
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002619{
Milan Broz60935eb2009-06-22 10:12:30 +01002620 char udev_cookie[DM_COOKIE_LENGTH];
2621 char *envp[] = { udev_cookie, NULL };
2622
2623 if (!cookie)
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002624 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
Milan Broz60935eb2009-06-22 10:12:30 +01002625 else {
2626 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2627 DM_COOKIE_ENV_VAR_NAME, cookie);
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002628 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2629 action, envp);
Milan Broz60935eb2009-06-22 10:12:30 +01002630 }
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002631}
2632
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002633uint32_t dm_next_uevent_seq(struct mapped_device *md)
2634{
2635 return atomic_add_return(1, &md->uevent_seq);
2636}
2637
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638uint32_t dm_get_event_nr(struct mapped_device *md)
2639{
2640 return atomic_read(&md->event_nr);
2641}
2642
2643int dm_wait_event(struct mapped_device *md, int event_nr)
2644{
2645 return wait_event_interruptible(md->eventq,
2646 (event_nr != atomic_read(&md->event_nr)));
2647}
2648
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002649void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2650{
2651 unsigned long flags;
2652
2653 spin_lock_irqsave(&md->uevent_lock, flags);
2654 list_add(elist, &md->uevent_list);
2655 spin_unlock_irqrestore(&md->uevent_lock, flags);
2656}
2657
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658/*
2659 * The gendisk is only valid as long as you have a reference
2660 * count on 'md'.
2661 */
2662struct gendisk *dm_disk(struct mapped_device *md)
2663{
2664 return md->disk;
2665}
2666
Milan Broz784aae72009-01-06 03:05:12 +00002667struct kobject *dm_kobject(struct mapped_device *md)
2668{
2669 return &md->kobj;
2670}
2671
2672/*
2673 * struct mapped_device should not be exported outside of dm.c
2674 * so use this check to verify that kobj is part of md structure
2675 */
2676struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2677{
2678 struct mapped_device *md;
2679
2680 md = container_of(kobj, struct mapped_device, kobj);
2681 if (&md->kobj != kobj)
2682 return NULL;
2683
Milan Broz4d89b7b2009-06-22 10:12:11 +01002684 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +00002685 dm_deleting_md(md))
Milan Broz4d89b7b2009-06-22 10:12:11 +01002686 return NULL;
2687
Milan Broz784aae72009-01-06 03:05:12 +00002688 dm_get(md);
2689 return md;
2690}
2691
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002692int dm_suspended_md(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693{
2694 return test_bit(DMF_SUSPENDED, &md->flags);
2695}
2696
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002697int dm_suspended(struct dm_target *ti)
2698{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002699 return dm_suspended_md(dm_table_get_md(ti->table));
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002700}
2701EXPORT_SYMBOL_GPL(dm_suspended);
2702
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002703int dm_noflush_suspending(struct dm_target *ti)
2704{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002705 return __noflush_suspending(dm_table_get_md(ti->table));
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002706}
2707EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2708
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002709struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2710{
2711 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2712
2713 if (!pools)
2714 return NULL;
2715
2716 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2717 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2718 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2719 if (!pools->io_pool)
2720 goto free_pools_and_out;
2721
2722 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2723 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2724 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2725 if (!pools->tio_pool)
2726 goto free_io_pool_and_out;
2727
2728 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2729 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2730 if (!pools->bs)
2731 goto free_tio_pool_and_out;
2732
2733 return pools;
2734
2735free_tio_pool_and_out:
2736 mempool_destroy(pools->tio_pool);
2737
2738free_io_pool_and_out:
2739 mempool_destroy(pools->io_pool);
2740
2741free_pools_and_out:
2742 kfree(pools);
2743
2744 return NULL;
2745}
2746
2747void dm_free_md_mempools(struct dm_md_mempools *pools)
2748{
2749 if (!pools)
2750 return;
2751
2752 if (pools->io_pool)
2753 mempool_destroy(pools->io_pool);
2754
2755 if (pools->tio_pool)
2756 mempool_destroy(pools->tio_pool);
2757
2758 if (pools->bs)
2759 bioset_free(pools->bs);
2760
2761 kfree(pools);
2762}
2763
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07002764static const struct block_device_operations dm_blk_dops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 .open = dm_blk_open,
2766 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07002767 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002768 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 .owner = THIS_MODULE
2770};
2771
2772EXPORT_SYMBOL(dm_get_mapinfo);
2773
2774/*
2775 * module hooks
2776 */
2777module_init(dm_init);
2778module_exit(dm_exit);
2779
2780module_param(major, uint, 0);
2781MODULE_PARM_DESC(major, "The major number of the device mapper");
2782MODULE_DESCRIPTION(DM_NAME " driver");
2783MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2784MODULE_LICENSE("GPL");