blob: de570a55876451a0326d071da4cf68fd772eec2e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Mike Snitzerf4790822013-09-12 18:06:12 -040010#include "dm.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010012#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14#include <linux/ctype.h>
15#include <linux/init.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/pagemap.h>
19#include <linux/slab.h>
20#include <linux/time.h>
21#include <linux/workqueue.h>
Mikulas Patocka35991652012-06-03 00:29:58 +010022#include <linux/delay.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070023#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070024#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000027#define DM_PG_INIT_DELAY_MSECS 2000
28#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30/* Path properties */
31struct pgpath {
32 struct list_head list;
33
34 struct priority_group *pg; /* Owning PG */
Kiyoshi Ueda66800732008-10-10 13:36:58 +010035 unsigned is_active; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 unsigned fail_count; /* Cumulative failure count */
37
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080038 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000039 struct delayed_work activate_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040};
41
42#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
43
44/*
45 * Paths are grouped into Priority Groups and numbered from 1 upwards.
46 * Each has a path selector which controls which path gets used.
47 */
48struct priority_group {
49 struct list_head list;
50
51 struct multipath *m; /* Owning multipath instance */
52 struct path_selector ps;
53
54 unsigned pg_num; /* Reference number */
55 unsigned bypassed; /* Temporarily bypass this PG? */
56
57 unsigned nr_pgpaths; /* Number of paths in PG */
58 struct list_head pgpaths;
59};
60
61/* Multipath context */
62struct multipath {
63 struct list_head list;
64 struct dm_target *ti;
65
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070066 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070067 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000068
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010069 spinlock_t lock;
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 unsigned nr_priority_groups;
72 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000073
74 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 unsigned pg_init_required; /* pg_init needs calling? */
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -070077 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000078 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80 unsigned nr_valid_paths; /* Total number of usable paths */
81 struct pgpath *current_pgpath;
82 struct priority_group *current_pg;
83 struct priority_group *next_pg; /* Switch to this PG if set */
84 unsigned repeat_count; /* I/Os left before calling PS again */
85
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010086 unsigned queue_io:1; /* Must we queue all I/O? */
87 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
88 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
Mike Snitzera58a9352012-07-27 15:08:04 +010089 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010090
Dave Wysochanskic9e45582007-10-19 22:47:53 +010091 unsigned pg_init_retries; /* Number of times to retry pg_init */
92 unsigned pg_init_count; /* Number of times pg_init called */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000093 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010095 unsigned queue_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 struct work_struct process_queued_ios;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +010097 struct list_head queued_ios;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
99 struct work_struct trigger_event;
100
101 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100102 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * can resubmit bios on error.
104 */
105 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +0000106
107 struct mutex work_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108};
109
110/*
111 * Context information attached to each bio we process.
112 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100113struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100115 size_t nr_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116};
117
118typedef int (*action_fn) (struct pgpath *pgpath);
119
Christoph Lametere18b8902006-12-06 20:33:20 -0800120static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000123static void process_queued_ios(struct work_struct *work);
124static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700125static void activate_path(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127
128/*-----------------------------------------------
129 * Allocation routines
130 *-----------------------------------------------*/
131
132static struct pgpath *alloc_pgpath(void)
133{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700134 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Mike Anderson224cb3e2008-08-29 09:36:09 +0200136 if (pgpath) {
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100137 pgpath->is_active = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000138 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 return pgpath;
142}
143
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100144static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
146 kfree(pgpath);
147}
148
149static struct priority_group *alloc_priority_group(void)
150{
151 struct priority_group *pg;
152
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700153 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700155 if (pg)
156 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 return pg;
159}
160
161static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
162{
163 struct pgpath *pgpath, *tmp;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700164 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
167 list_del(&pgpath->list);
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700168 if (m->hw_handler_name)
169 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 dm_put_device(ti, pgpath->path.dev);
171 free_pgpath(pgpath);
172 }
173}
174
175static void free_priority_group(struct priority_group *pg,
176 struct dm_target *ti)
177{
178 struct path_selector *ps = &pg->ps;
179
180 if (ps->type) {
181 ps->type->destroy(ps);
182 dm_put_path_selector(ps->type);
183 }
184
185 free_pgpaths(&pg->pgpaths, ti);
186 kfree(pg);
187}
188
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700189static struct multipath *alloc_multipath(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 struct multipath *m;
Mike Snitzerf4790822013-09-12 18:06:12 -0400192 unsigned min_ios = dm_get_reserved_rq_based_ios();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700194 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 INIT_LIST_HEAD(&m->priority_groups);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100197 INIT_LIST_HEAD(&m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 spin_lock_init(&m->lock);
199 m->queue_io = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000200 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000201 INIT_WORK(&m->process_queued_ios, process_queued_ios);
202 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000203 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000204 mutex_init(&m->work_mutex);
Mike Snitzerf4790822013-09-12 18:06:12 -0400205 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 if (!m->mpio_pool) {
207 kfree(m);
208 return NULL;
209 }
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700210 m->ti = ti;
211 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 }
213
214 return m;
215}
216
217static void free_multipath(struct multipath *m)
218{
219 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
222 list_del(&pg->list);
223 free_priority_group(pg, m->ti);
224 }
225
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700226 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700227 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 mempool_destroy(m->mpio_pool);
229 kfree(m);
230}
231
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100232static int set_mapinfo(struct multipath *m, union map_info *info)
233{
234 struct dm_mpath_io *mpio;
235
236 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
237 if (!mpio)
238 return -ENOMEM;
239
240 memset(mpio, 0, sizeof(*mpio));
241 info->ptr = mpio;
242
243 return 0;
244}
245
246static void clear_mapinfo(struct multipath *m, union map_info *info)
247{
248 struct dm_mpath_io *mpio = info->ptr;
249
250 info->ptr = NULL;
251 mempool_free(mpio, m->mpio_pool);
252}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254/*-----------------------------------------------
255 * Path selection
256 *-----------------------------------------------*/
257
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000258static void __pg_init_all_paths(struct multipath *m)
259{
260 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000261 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000262
263 m->pg_init_count++;
264 m->pg_init_required = 0;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000265 if (m->pg_init_delay_retry)
266 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
267 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000268 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
269 /* Skip failed paths */
270 if (!pgpath->is_active)
271 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000272 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
273 pg_init_delay))
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000274 m->pg_init_in_progress++;
275 }
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
279{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 m->current_pg = pgpath->pg;
281
282 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700283 if (m->hw_handler_name) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 m->pg_init_required = 1;
285 m->queue_io = 1;
286 } else {
287 m->pg_init_required = 0;
288 m->queue_io = 0;
289 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100290
291 m->pg_init_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100294static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
295 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800297 struct dm_path *path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100299 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 if (!path)
301 return -ENXIO;
302
303 m->current_pgpath = path_to_pgpath(path);
304
305 if (m->current_pg != pg)
306 __switch_pg(m, m->current_pgpath);
307
308 return 0;
309}
310
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100311static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
313 struct priority_group *pg;
314 unsigned bypassed = 1;
315
316 if (!m->nr_valid_paths)
317 goto failed;
318
319 /* Were we instructed to switch PG? */
320 if (m->next_pg) {
321 pg = m->next_pg;
322 m->next_pg = NULL;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100323 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 return;
325 }
326
327 /* Don't change PG until it has no remaining paths */
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100328 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 return;
330
331 /*
332 * Loop through priority groups until we find a valid path.
333 * First time we skip PGs marked 'bypassed'.
Mike Christief220fd42012-06-03 00:29:45 +0100334 * Second time we only try the ones we skipped, but set
335 * pg_init_delay_retry so we do not hammer controllers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 */
337 do {
338 list_for_each_entry(pg, &m->priority_groups, list) {
339 if (pg->bypassed == bypassed)
340 continue;
Mike Christief220fd42012-06-03 00:29:45 +0100341 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
342 if (!bypassed)
343 m->pg_init_delay_retry = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 return;
Mike Christief220fd42012-06-03 00:29:45 +0100345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347 } while (bypassed--);
348
349failed:
350 m->current_pgpath = NULL;
351 m->current_pg = NULL;
352}
353
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800354/*
355 * Check whether bios must be queued in the device-mapper core rather
356 * than here in the target.
357 *
358 * m->lock must be held on entry.
359 *
360 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
361 * same value then we are not between multipath_presuspend()
362 * and multipath_resume() calls and we have no need to check
363 * for the DMF_NOFLUSH_SUSPENDING flag.
364 */
365static int __must_push_back(struct multipath *m)
366{
367 return (m->queue_if_no_path != m->saved_queue_if_no_path &&
368 dm_noflush_suspending(m->ti));
369}
370
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100371static int map_io(struct multipath *m, struct request *clone,
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100372 union map_info *map_context, unsigned was_queued)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800374 int r = DM_MAPIO_REMAPPED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100375 size_t nr_bytes = blk_rq_bytes(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 unsigned long flags;
377 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100378 struct block_device *bdev;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100379 struct dm_mpath_io *mpio = map_context->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381 spin_lock_irqsave(&m->lock, flags);
382
383 /* Do we need to select a new pgpath? */
384 if (!m->current_pgpath ||
385 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100386 __choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 pgpath = m->current_pgpath;
389
390 if (was_queued)
391 m->queue_size--;
392
393 if ((pgpath && m->queue_io) ||
Alasdair G Kergon436d4102005-07-12 15:53:03 -0700394 (!pgpath && m->queue_if_no_path)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 /* Queue for the daemon to resubmit */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100396 list_add_tail(&clone->queuelist, &m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 m->queue_size++;
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700398 if ((m->pg_init_required && !m->pg_init_in_progress) ||
399 !m->queue_io)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700400 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 pgpath = NULL;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800402 r = DM_MAPIO_SUBMITTED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100403 } else if (pgpath) {
404 bdev = pgpath->path.dev->bdev;
405 clone->q = bdev_get_queue(bdev);
406 clone->rq_disk = bdev->bd_disk;
407 } else if (__must_push_back(m))
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800408 r = DM_MAPIO_REQUEUE;
409 else
410 r = -EIO; /* Failed */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 mpio->pgpath = pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100413 mpio->nr_bytes = nr_bytes;
414
415 if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
416 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
417 nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 spin_unlock_irqrestore(&m->lock, flags);
420
421 return r;
422}
423
424/*
425 * If we run out of usable paths, should we queue I/O or error it?
426 */
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700427static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
428 unsigned save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
430 unsigned long flags;
431
432 spin_lock_irqsave(&m->lock, flags);
433
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700434 if (save_old_value)
435 m->saved_queue_if_no_path = m->queue_if_no_path;
436 else
437 m->saved_queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 m->queue_if_no_path = queue_if_no_path;
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700439 if (!m->queue_if_no_path && m->queue_size)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700440 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 spin_unlock_irqrestore(&m->lock, flags);
443
444 return 0;
445}
446
447/*-----------------------------------------------------------------
448 * The multipath daemon is responsible for resubmitting queued ios.
449 *---------------------------------------------------------------*/
450
451static void dispatch_queued_ios(struct multipath *m)
452{
453 int r;
454 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 union map_info *info;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100456 struct request *clone, *n;
457 LIST_HEAD(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
459 spin_lock_irqsave(&m->lock, flags);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100460 list_splice_init(&m->queued_ios, &cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 spin_unlock_irqrestore(&m->lock, flags);
462
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100463 list_for_each_entry_safe(clone, n, &cl, queuelist) {
464 list_del_init(&clone->queuelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100466 info = dm_get_rq_mapinfo(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100468 r = map_io(m, clone, info, 1);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100469 if (r < 0) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100470 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100471 dm_kill_unmapped_request(clone, r);
472 } else if (r == DM_MAPIO_REMAPPED)
473 dm_dispatch_request(clone);
474 else if (r == DM_MAPIO_REQUEUE) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100475 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100476 dm_requeue_unmapped_request(clone);
477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 }
479}
480
David Howellsc4028952006-11-22 14:57:56 +0000481static void process_queued_ios(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
David Howellsc4028952006-11-22 14:57:56 +0000483 struct multipath *m =
484 container_of(work, struct multipath, process_queued_ios);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000485 struct pgpath *pgpath = NULL;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +0100486 unsigned must_queue = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 unsigned long flags;
488
489 spin_lock_irqsave(&m->lock, flags);
490
491 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100492 __choose_pgpath(m, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494 pgpath = m->current_pgpath;
495
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700496 if ((pgpath && !m->queue_io) ||
497 (!pgpath && !m->queue_if_no_path))
498 must_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000500 if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
501 __pg_init_all_paths(m);
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (!must_queue)
505 dispatch_queued_ios(m);
506}
507
508/*
509 * An event is triggered whenever a path is taken out of use.
510 * Includes path failure and PG bypass.
511 */
David Howellsc4028952006-11-22 14:57:56 +0000512static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
David Howellsc4028952006-11-22 14:57:56 +0000514 struct multipath *m =
515 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517 dm_table_event(m->ti->table);
518}
519
520/*-----------------------------------------------------------------
521 * Constructor/argument parsing:
522 * <#multipath feature args> [<arg>]*
523 * <#hw_handler args> [hw_handler [<arg>]*]
524 * <#priority groups>
525 * <initial priority group>
526 * [<selector> <#selector args> [<arg>]*
527 * <#paths> <#per-path selector args>
528 * [<path> [<arg>]* ]+ ]+
529 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100530static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct dm_target *ti)
532{
533 int r;
534 struct path_selector_type *pst;
535 unsigned ps_argc;
536
Mike Snitzer498f0102011-08-02 12:32:04 +0100537 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700538 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 };
540
Mike Snitzer498f0102011-08-02 12:32:04 +0100541 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700543 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 return -EINVAL;
545 }
546
Mike Snitzer498f0102011-08-02 12:32:04 +0100547 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100548 if (r) {
549 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 r = pst->create(&pg->ps, ps_argc, as->argv);
554 if (r) {
555 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700556 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 return r;
558 }
559
560 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100561 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 return 0;
564}
565
Mike Snitzer498f0102011-08-02 12:32:04 +0100566static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 struct dm_target *ti)
568{
569 int r;
570 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700571 struct multipath *m = ti->private;
Mike Snitzera58a9352012-07-27 15:08:04 +0100572 struct request_queue *q = NULL;
573 const char *attached_handler_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 /* we need at least a path arg */
576 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700577 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100578 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
580
581 p = alloc_pgpath();
582 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100583 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Mike Snitzer498f0102011-08-02 12:32:04 +0100585 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000586 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700588 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 goto bad;
590 }
591
Mike Snitzera58a9352012-07-27 15:08:04 +0100592 if (m->retain_attached_hw_handler || m->hw_handler_name)
593 q = bdev_get_queue(p->path.dev->bdev);
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100594
Mike Snitzera58a9352012-07-27 15:08:04 +0100595 if (m->retain_attached_hw_handler) {
596 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
597 if (attached_handler_name) {
598 /*
599 * Reset hw_handler_name to match the attached handler
600 * and clear any hw_handler_params associated with the
601 * ignored handler.
602 *
603 * NB. This modifies the table line to show the actual
604 * handler instead of the original table passed in.
605 */
606 kfree(m->hw_handler_name);
607 m->hw_handler_name = attached_handler_name;
608
609 kfree(m->hw_handler_params);
610 m->hw_handler_params = NULL;
611 }
612 }
613
614 if (m->hw_handler_name) {
615 /*
616 * Increments scsi_dh reference, even when using an
617 * already-attached handler.
618 */
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100619 r = scsi_dh_attach(q, m->hw_handler_name);
620 if (r == -EBUSY) {
621 /*
Mike Snitzera58a9352012-07-27 15:08:04 +0100622 * Already attached to different hw_handler:
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100623 * try to reattach with correct one.
624 */
625 scsi_dh_detach(q);
626 r = scsi_dh_attach(q, m->hw_handler_name);
627 }
628
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700629 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100630 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700631 dm_put_device(ti, p->path.dev);
632 goto bad;
633 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700634
635 if (m->hw_handler_params) {
636 r = scsi_dh_set_params(q, m->hw_handler_params);
637 if (r < 0) {
638 ti->error = "unable to set hardware "
639 "handler parameters";
640 scsi_dh_detach(q);
641 dm_put_device(ti, p->path.dev);
642 goto bad;
643 }
644 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700645 }
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
648 if (r) {
649 dm_put_device(ti, p->path.dev);
650 goto bad;
651 }
652
653 return p;
654
655 bad:
656 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100657 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658}
659
Mike Snitzer498f0102011-08-02 12:32:04 +0100660static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700661 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662{
Mike Snitzer498f0102011-08-02 12:32:04 +0100663 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700664 {1, 1024, "invalid number of paths"},
665 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 };
667
668 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100669 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700671 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 if (as->argc < 2) {
674 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100675 ti->error = "not enough priority group arguments";
676 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
678
679 pg = alloc_priority_group();
680 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700681 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100682 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
684 pg->m = m;
685
686 r = parse_path_selector(as, pg, ti);
687 if (r)
688 goto bad;
689
690 /*
691 * read the paths
692 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100693 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (r)
695 goto bad;
696
Mike Snitzer498f0102011-08-02 12:32:04 +0100697 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 if (r)
699 goto bad;
700
Mike Snitzer498f0102011-08-02 12:32:04 +0100701 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 for (i = 0; i < pg->nr_pgpaths; i++) {
703 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100704 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
Mike Snitzer498f0102011-08-02 12:32:04 +0100706 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100707 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100708 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Mike Snitzer498f0102011-08-02 12:32:04 +0100712 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 path_args.argv = as->argv;
714
715 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100716 if (IS_ERR(pgpath)) {
717 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 pgpath->pg = pg;
722 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100723 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 }
725
726 return pg;
727
728 bad:
729 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100730 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731}
732
Mike Snitzer498f0102011-08-02 12:32:04 +0100733static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700736 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700737 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Mike Snitzer498f0102011-08-02 12:32:04 +0100739 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700740 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 };
742
Mike Snitzer498f0102011-08-02 12:32:04 +0100743 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 return -EINVAL;
745
746 if (!hw_argc)
747 return 0;
748
Mike Snitzer498f0102011-08-02 12:32:04 +0100749 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Mike Snitzer510193a2012-05-12 01:43:21 +0100750 if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
751 "scsi_dh_%s", m->hw_handler_name)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700752 ti->error = "unknown hardware handler type";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700753 ret = -EINVAL;
754 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 }
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000756
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700757 if (hw_argc > 1) {
758 char *p;
759 int i, j, len = 4;
760
761 for (i = 0; i <= hw_argc - 2; i++)
762 len += strlen(as->argv[i]) + 1;
763 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
764 if (!p) {
765 ti->error = "memory allocation failed";
766 ret = -ENOMEM;
767 goto fail;
768 }
769 j = sprintf(p, "%d", hw_argc - 1);
770 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
771 j = sprintf(p, "%s", as->argv[i]);
772 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100773 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700776fail:
777 kfree(m->hw_handler_name);
778 m->hw_handler_name = NULL;
779 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780}
781
Mike Snitzer498f0102011-08-02 12:32:04 +0100782static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783{
784 int r;
785 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700786 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100787 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Mike Snitzer498f0102011-08-02 12:32:04 +0100789 static struct dm_arg _args[] = {
Mike Snitzera58a9352012-07-27 15:08:04 +0100790 {0, 6, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100791 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000792 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 };
794
Mike Snitzer498f0102011-08-02 12:32:04 +0100795 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 if (r)
797 return -EINVAL;
798
799 if (!argc)
800 return 0;
801
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100802 do {
Mike Snitzer498f0102011-08-02 12:32:04 +0100803 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100804 argc--;
805
Mike Snitzer498f0102011-08-02 12:32:04 +0100806 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100807 r = queue_if_no_path(m, 1, 0);
808 continue;
809 }
810
Mike Snitzera58a9352012-07-27 15:08:04 +0100811 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
812 m->retain_attached_hw_handler = 1;
813 continue;
814 }
815
Mike Snitzer498f0102011-08-02 12:32:04 +0100816 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100817 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100818 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100819 argc--;
820 continue;
821 }
822
Mike Snitzer498f0102011-08-02 12:32:04 +0100823 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000824 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100825 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000826 argc--;
827 continue;
828 }
829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100831 r = -EINVAL;
832 } while (argc && !r);
833
834 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835}
836
837static int multipath_ctr(struct dm_target *ti, unsigned int argc,
838 char **argv)
839{
Mike Snitzer498f0102011-08-02 12:32:04 +0100840 /* target arguments */
841 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +0000842 {0, 1024, "invalid number of priority groups"},
843 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 };
845
846 int r;
847 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +0100848 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 unsigned pg_count = 0;
850 unsigned next_pg_num;
851
852 as.argc = argc;
853 as.argv = argv;
854
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700855 m = alloc_multipath(ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700857 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return -EINVAL;
859 }
860
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700861 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 if (r)
863 goto bad;
864
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700865 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (r)
867 goto bad;
868
Mike Snitzer498f0102011-08-02 12:32:04 +0100869 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 if (r)
871 goto bad;
872
Mike Snitzer498f0102011-08-02 12:32:04 +0100873 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 if (r)
875 goto bad;
876
Mike Snitzera490a072011-03-24 13:54:33 +0000877 if ((!m->nr_priority_groups && next_pg_num) ||
878 (m->nr_priority_groups && !next_pg_num)) {
879 ti->error = "invalid initial priority group";
880 r = -EINVAL;
881 goto bad;
882 }
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 /* parse the priority groups */
885 while (as.argc) {
886 struct priority_group *pg;
887
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700888 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100889 if (IS_ERR(pg)) {
890 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 goto bad;
892 }
893
894 m->nr_valid_paths += pg->nr_pgpaths;
895 list_add_tail(&pg->list, &m->priority_groups);
896 pg_count++;
897 pg->pg_num = pg_count;
898 if (!--next_pg_num)
899 m->next_pg = pg;
900 }
901
902 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700903 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 r = -EINVAL;
905 goto bad;
906 }
907
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000908 ti->num_flush_bios = 1;
909 ti->num_discard_bios = 1;
Mike Snitzer042bcef2013-05-10 14:37:16 +0100910 ti->num_write_same_bios = 1;
Mikulas Patocka86279212009-06-22 10:12:24 +0100911
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return 0;
913
914 bad:
915 free_multipath(m);
916 return r;
917}
918
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000919static void multipath_wait_for_pg_init_completion(struct multipath *m)
920{
921 DECLARE_WAITQUEUE(wait, current);
922 unsigned long flags;
923
924 add_wait_queue(&m->pg_init_wait, &wait);
925
926 while (1) {
927 set_current_state(TASK_UNINTERRUPTIBLE);
928
929 spin_lock_irqsave(&m->lock, flags);
930 if (!m->pg_init_in_progress) {
931 spin_unlock_irqrestore(&m->lock, flags);
932 break;
933 }
934 spin_unlock_irqrestore(&m->lock, flags);
935
936 io_schedule();
937 }
938 set_current_state(TASK_RUNNING);
939
940 remove_wait_queue(&m->pg_init_wait, &wait);
941}
942
943static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700945 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000946 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -0700947 flush_workqueue(kmultipathd);
Tejun Heo43829732012-08-20 14:51:24 -0700948 flush_work(&m->trigger_event);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +0000949}
950
951static void multipath_dtr(struct dm_target *ti)
952{
953 struct multipath *m = ti->private;
954
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000955 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 free_multipath(m);
957}
958
959/*
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100960 * Map cloned requests
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100962static int multipath_map(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 union map_info *map_context)
964{
965 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 struct multipath *m = (struct multipath *) ti->private;
967
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100968 if (set_mapinfo(m, map_context) < 0)
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100969 /* ENOMEM, requeue */
970 return DM_MAPIO_REQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100972 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100973 r = map_io(m, clone, map_context, 0);
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800974 if (r < 0 || r == DM_MAPIO_REQUEUE)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100975 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
977 return r;
978}
979
980/*
981 * Take a path out of use.
982 */
983static int fail_path(struct pgpath *pgpath)
984{
985 unsigned long flags;
986 struct multipath *m = pgpath->pg->m;
987
988 spin_lock_irqsave(&m->lock, flags);
989
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100990 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 goto out;
992
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700993 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100996 pgpath->is_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 pgpath->fail_count++;
998
999 m->nr_valid_paths--;
1000
1001 if (pgpath == m->current_pgpath)
1002 m->current_pgpath = NULL;
1003
Mike Andersonb15546f2007-10-19 22:48:02 +01001004 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1005 pgpath->path.dev->name, m->nr_valid_paths);
1006
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001007 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009out:
1010 spin_unlock_irqrestore(&m->lock, flags);
1011
1012 return 0;
1013}
1014
1015/*
1016 * Reinstate a previously-failed path
1017 */
1018static int reinstate_path(struct pgpath *pgpath)
1019{
1020 int r = 0;
1021 unsigned long flags;
1022 struct multipath *m = pgpath->pg->m;
1023
1024 spin_lock_irqsave(&m->lock, flags);
1025
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001026 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 goto out;
1028
Alasdair G Kergondef052d2008-07-21 12:00:31 +01001029 if (!pgpath->pg->ps.type->reinstate_path) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 DMWARN("Reinstate path not supported by path selector %s",
1031 pgpath->pg->ps.type->name);
1032 r = -EINVAL;
1033 goto out;
1034 }
1035
1036 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1037 if (r)
1038 goto out;
1039
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001040 pgpath->is_active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001042 if (!m->nr_valid_paths++ && m->queue_size) {
1043 m->current_pgpath = NULL;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001044 queue_work(kmultipathd, &m->process_queued_ios);
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001045 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001046 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001047 m->pg_init_in_progress++;
1048 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Mike Andersonb15546f2007-10-19 22:48:02 +01001050 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1051 pgpath->path.dev->name, m->nr_valid_paths);
1052
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001053 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055out:
1056 spin_unlock_irqrestore(&m->lock, flags);
1057
1058 return r;
1059}
1060
1061/*
1062 * Fail or reinstate all paths that match the provided struct dm_dev.
1063 */
1064static int action_dev(struct multipath *m, struct dm_dev *dev,
1065 action_fn action)
1066{
Mike Snitzer19040c02011-03-24 13:54:31 +00001067 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 struct pgpath *pgpath;
1069 struct priority_group *pg;
1070
1071 list_for_each_entry(pg, &m->priority_groups, list) {
1072 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1073 if (pgpath->path.dev == dev)
1074 r = action(pgpath);
1075 }
1076 }
1077
1078 return r;
1079}
1080
1081/*
1082 * Temporarily try to avoid having to use the specified PG
1083 */
1084static void bypass_pg(struct multipath *m, struct priority_group *pg,
1085 int bypassed)
1086{
1087 unsigned long flags;
1088
1089 spin_lock_irqsave(&m->lock, flags);
1090
1091 pg->bypassed = bypassed;
1092 m->current_pgpath = NULL;
1093 m->current_pg = NULL;
1094
1095 spin_unlock_irqrestore(&m->lock, flags);
1096
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001097 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098}
1099
1100/*
1101 * Switch to using the specified PG from the next I/O that gets mapped
1102 */
1103static int switch_pg_num(struct multipath *m, const char *pgstr)
1104{
1105 struct priority_group *pg;
1106 unsigned pgnum;
1107 unsigned long flags;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001108 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001110 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 (pgnum > m->nr_priority_groups)) {
1112 DMWARN("invalid PG number supplied to switch_pg_num");
1113 return -EINVAL;
1114 }
1115
1116 spin_lock_irqsave(&m->lock, flags);
1117 list_for_each_entry(pg, &m->priority_groups, list) {
1118 pg->bypassed = 0;
1119 if (--pgnum)
1120 continue;
1121
1122 m->current_pgpath = NULL;
1123 m->current_pg = NULL;
1124 m->next_pg = pg;
1125 }
1126 spin_unlock_irqrestore(&m->lock, flags);
1127
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001128 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 return 0;
1130}
1131
1132/*
1133 * Set/clear bypassed status of a PG.
1134 * PGs are numbered upwards from 1 in the order they were declared.
1135 */
1136static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1137{
1138 struct priority_group *pg;
1139 unsigned pgnum;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001140 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001142 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 (pgnum > m->nr_priority_groups)) {
1144 DMWARN("invalid PG number supplied to bypass_pg");
1145 return -EINVAL;
1146 }
1147
1148 list_for_each_entry(pg, &m->priority_groups, list) {
1149 if (!--pgnum)
1150 break;
1151 }
1152
1153 bypass_pg(m, pg, bypassed);
1154 return 0;
1155}
1156
1157/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001158 * Should we retry pg_init immediately?
1159 */
1160static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1161{
1162 unsigned long flags;
1163 int limit_reached = 0;
1164
1165 spin_lock_irqsave(&m->lock, flags);
1166
1167 if (m->pg_init_count <= m->pg_init_retries)
1168 m->pg_init_required = 1;
1169 else
1170 limit_reached = 1;
1171
1172 spin_unlock_irqrestore(&m->lock, flags);
1173
1174 return limit_reached;
1175}
1176
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001177static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001178{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001179 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001180 struct priority_group *pg = pgpath->pg;
1181 struct multipath *m = pg->m;
1182 unsigned long flags;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001183 unsigned delay_retry = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001184
1185 /* device or driver problems */
1186 switch (errors) {
1187 case SCSI_DH_OK:
1188 break;
1189 case SCSI_DH_NOSYS:
1190 if (!m->hw_handler_name) {
1191 errors = 0;
1192 break;
1193 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001194 DMERR("Could not failover the device: Handler scsi_dh_%s "
1195 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001196 /*
1197 * Fail path for now, so we do not ping pong
1198 */
1199 fail_path(pgpath);
1200 break;
1201 case SCSI_DH_DEV_TEMP_BUSY:
1202 /*
1203 * Probably doing something like FW upgrade on the
1204 * controller so try the other pg.
1205 */
1206 bypass_pg(m, pg, 1);
1207 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001208 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001209 /* Wait before retrying. */
1210 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001211 case SCSI_DH_IMM_RETRY:
1212 case SCSI_DH_RES_TEMP_UNAVAIL:
1213 if (pg_init_limit_reached(m, pgpath))
1214 fail_path(pgpath);
1215 errors = 0;
1216 break;
1217 default:
1218 /*
1219 * We probably do not want to fail the path for a device
1220 * error, but this is what the old dm did. In future
1221 * patches we can do more advanced handling.
1222 */
1223 fail_path(pgpath);
1224 }
1225
1226 spin_lock_irqsave(&m->lock, flags);
1227 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001228 if (pgpath == m->current_pgpath) {
1229 DMERR("Could not failover device. Error %d.", errors);
1230 m->current_pgpath = NULL;
1231 m->current_pg = NULL;
1232 }
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001233 } else if (!m->pg_init_required)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001234 pg->bypassed = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001235
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001236 if (--m->pg_init_in_progress)
1237 /* Activations of other paths are still on going */
1238 goto out;
1239
1240 if (!m->pg_init_required)
1241 m->queue_io = 0;
1242
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001243 m->pg_init_delay_retry = delay_retry;
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001244 queue_work(kmultipathd, &m->process_queued_ios);
1245
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001246 /*
1247 * Wake up any thread waiting to suspend.
1248 */
1249 wake_up(&m->pg_init_wait);
1250
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001251out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001252 spin_unlock_irqrestore(&m->lock, flags);
1253}
1254
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001255static void activate_path(struct work_struct *work)
1256{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001257 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001258 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001259
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001260 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001261 pg_init_done, pgpath);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001262}
1263
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001264static int noretry_error(int error)
1265{
1266 switch (error) {
1267 case -EOPNOTSUPP:
1268 case -EREMOTEIO:
1269 case -EILSEQ:
1270 case -ENODATA:
Jun'ichi Nomuracc9d3c32013-09-13 14:54:30 +09001271 case -ENOSPC:
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001272 return 1;
1273 }
1274
1275 /* Anything else could be a path failure, so should be retried */
1276 return 0;
1277}
1278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279/*
1280 * end_io handling
1281 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001282static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001283 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001285 /*
1286 * We don't queue any clone request inside the multipath target
1287 * during end I/O handling, since those clone requests don't have
1288 * bio clones. If we queue them inside the multipath target,
1289 * we need to make bio clones, that requires memory allocation.
1290 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1291 * don't have bio clones.)
1292 * Instead of queueing the clone request here, we queue the original
1293 * request into dm core, which will remake a clone request and
1294 * clone bios for it and resubmit it later.
1295 */
1296 int r = DM_ENDIO_REQUEUE;
Stefan Bader640eb3b2005-11-21 21:32:35 -08001297 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001299 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 return 0; /* I/O complete */
1301
Mike Snitzerf84cb8a2013-09-19 12:13:58 -04001302 if (noretry_error(error)) {
1303 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1304 !clone->q->limits.max_write_same_sectors) {
1305 struct queue_limits *limits;
1306
1307 /* device doesn't really support WRITE SAME, disable it */
1308 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1309 limits->max_write_same_sectors = 0;
1310 }
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001311 return error;
Mike Snitzerf84cb8a2013-09-19 12:13:58 -04001312 }
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001313
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001314 if (mpio->pgpath)
1315 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Stefan Bader640eb3b2005-11-21 21:32:35 -08001317 spin_lock_irqsave(&m->lock, flags);
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001318 if (!m->nr_valid_paths) {
1319 if (!m->queue_if_no_path) {
1320 if (!__must_push_back(m))
1321 r = -EIO;
1322 } else {
1323 if (error == -EBADE)
1324 r = error;
1325 }
1326 }
Stefan Bader640eb3b2005-11-21 21:32:35 -08001327 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001329 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330}
1331
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001332static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 int error, union map_info *map_context)
1334{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001335 struct multipath *m = ti->private;
1336 struct dm_mpath_io *mpio = map_context->ptr;
Wei Yongjuna71a2612012-10-12 16:59:42 +01001337 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 struct path_selector *ps;
1339 int r;
1340
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001341 BUG_ON(!mpio);
1342
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001343 r = do_end_io(m, clone, error, mpio);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001344 pgpath = mpio->pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 if (pgpath) {
1346 ps = &pgpath->pg->ps;
1347 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001348 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001350 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
1352 return r;
1353}
1354
1355/*
1356 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001357 * the last path fails we must error any remaining I/O.
1358 * Note that if the freeze_bdev fails while suspending, the
1359 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 */
1361static void multipath_presuspend(struct dm_target *ti)
1362{
1363 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Alasdair G Kergon485ef692005-09-27 21:45:45 -07001365 queue_if_no_path(m, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366}
1367
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001368static void multipath_postsuspend(struct dm_target *ti)
1369{
Mike Anderson6380f262009-12-10 23:52:21 +00001370 struct multipath *m = ti->private;
1371
1372 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001373 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001374 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001375}
1376
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001377/*
1378 * Restore the queue_if_no_path setting.
1379 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380static void multipath_resume(struct dm_target *ti)
1381{
1382 struct multipath *m = (struct multipath *) ti->private;
1383 unsigned long flags;
1384
1385 spin_lock_irqsave(&m->lock, flags);
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001386 m->queue_if_no_path = m->saved_queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 spin_unlock_irqrestore(&m->lock, flags);
1388}
1389
1390/*
1391 * Info output has the following format:
1392 * num_multipath_feature_args [multipath_feature_args]*
1393 * num_handler_status_args [handler_status_args]*
1394 * num_groups init_group_number
1395 * [A|D|E num_ps_status_args [ps_status_args]*
1396 * num_paths num_selector_args
1397 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1398 *
1399 * Table output has the following format (identical to the constructor string):
1400 * num_feature_args [features_args]*
1401 * num_handler_args hw_handler [hw_handler_args]*
1402 * num_groups init_group_number
1403 * [priority selector-name num_ps_args [ps_args]*
1404 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1405 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001406static void multipath_status(struct dm_target *ti, status_type_t type,
1407 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408{
1409 int sz = 0;
1410 unsigned long flags;
1411 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 struct priority_group *pg;
1413 struct pgpath *p;
1414 unsigned pg_num;
1415 char state;
1416
1417 spin_lock_irqsave(&m->lock, flags);
1418
1419 /* Features */
1420 if (type == STATUSTYPE_INFO)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001421 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1422 else {
1423 DMEMIT("%u ", m->queue_if_no_path +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001424 (m->pg_init_retries > 0) * 2 +
Mike Snitzera58a9352012-07-27 15:08:04 +01001425 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1426 m->retain_attached_hw_handler);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001427 if (m->queue_if_no_path)
1428 DMEMIT("queue_if_no_path ");
1429 if (m->pg_init_retries)
1430 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001431 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1432 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Mike Snitzera58a9352012-07-27 15:08:04 +01001433 if (m->retain_attached_hw_handler)
1434 DMEMIT("retain_attached_hw_handler ");
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001437 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 DMEMIT("0 ");
1439 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001440 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
1442 DMEMIT("%u ", m->nr_priority_groups);
1443
1444 if (m->next_pg)
1445 pg_num = m->next_pg->pg_num;
1446 else if (m->current_pg)
1447 pg_num = m->current_pg->pg_num;
1448 else
Mike Snitzera490a072011-03-24 13:54:33 +00001449 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
1451 DMEMIT("%u ", pg_num);
1452
1453 switch (type) {
1454 case STATUSTYPE_INFO:
1455 list_for_each_entry(pg, &m->priority_groups, list) {
1456 if (pg->bypassed)
1457 state = 'D'; /* Disabled */
1458 else if (pg == m->current_pg)
1459 state = 'A'; /* Currently Active */
1460 else
1461 state = 'E'; /* Enabled */
1462
1463 DMEMIT("%c ", state);
1464
1465 if (pg->ps.type->status)
1466 sz += pg->ps.type->status(&pg->ps, NULL, type,
1467 result + sz,
1468 maxlen - sz);
1469 else
1470 DMEMIT("0 ");
1471
1472 DMEMIT("%u %u ", pg->nr_pgpaths,
1473 pg->ps.type->info_args);
1474
1475 list_for_each_entry(p, &pg->pgpaths, list) {
1476 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001477 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 p->fail_count);
1479 if (pg->ps.type->status)
1480 sz += pg->ps.type->status(&pg->ps,
1481 &p->path, type, result + sz,
1482 maxlen - sz);
1483 }
1484 }
1485 break;
1486
1487 case STATUSTYPE_TABLE:
1488 list_for_each_entry(pg, &m->priority_groups, list) {
1489 DMEMIT("%s ", pg->ps.type->name);
1490
1491 if (pg->ps.type->status)
1492 sz += pg->ps.type->status(&pg->ps, NULL, type,
1493 result + sz,
1494 maxlen - sz);
1495 else
1496 DMEMIT("0 ");
1497
1498 DMEMIT("%u %u ", pg->nr_pgpaths,
1499 pg->ps.type->table_args);
1500
1501 list_for_each_entry(p, &pg->pgpaths, list) {
1502 DMEMIT("%s ", p->path.dev->name);
1503 if (pg->ps.type->status)
1504 sz += pg->ps.type->status(&pg->ps,
1505 &p->path, type, result + sz,
1506 maxlen - sz);
1507 }
1508 }
1509 break;
1510 }
1511
1512 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513}
1514
1515static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1516{
Mike Anderson6380f262009-12-10 23:52:21 +00001517 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 struct dm_dev *dev;
1519 struct multipath *m = (struct multipath *) ti->private;
1520 action_fn action;
1521
Mike Anderson6380f262009-12-10 23:52:21 +00001522 mutex_lock(&m->work_mutex);
1523
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001524 if (dm_suspended(ti)) {
1525 r = -EBUSY;
1526 goto out;
1527 }
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001530 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001531 r = queue_if_no_path(m, 1, 0);
1532 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001533 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001534 r = queue_if_no_path(m, 0, 0);
1535 goto out;
1536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 }
1538
Mike Anderson6380f262009-12-10 23:52:21 +00001539 if (argc != 2) {
1540 DMWARN("Unrecognised multipath message received.");
1541 goto out;
1542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Mike Snitzer498f0102011-08-02 12:32:04 +01001544 if (!strcasecmp(argv[0], "disable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001545 r = bypass_pg_num(m, argv[1], 1);
1546 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001547 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001548 r = bypass_pg_num(m, argv[1], 0);
1549 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001550 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001551 r = switch_pg_num(m, argv[1]);
1552 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001553 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001555 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001557 else {
1558 DMWARN("Unrecognised multipath message received.");
1559 goto out;
1560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001562 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001564 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001566 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 }
1568
1569 r = action_dev(m, dev, action);
1570
1571 dm_put_device(ti, dev);
1572
Mike Anderson6380f262009-12-10 23:52:21 +00001573out:
1574 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576}
1577
Al Viro647b3d02007-08-28 22:15:59 -04001578static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
Milan Broz9af4aa32006-10-03 01:15:20 -07001579 unsigned long arg)
1580{
Mikulas Patocka35991652012-06-03 00:29:58 +01001581 struct multipath *m = ti->private;
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001582 struct pgpath *pgpath;
Mikulas Patocka35991652012-06-03 00:29:58 +01001583 struct block_device *bdev;
1584 fmode_t mode;
Milan Broz9af4aa32006-10-03 01:15:20 -07001585 unsigned long flags;
Mikulas Patocka35991652012-06-03 00:29:58 +01001586 int r;
1587
Mikulas Patocka35991652012-06-03 00:29:58 +01001588 bdev = NULL;
1589 mode = 0;
1590 r = 0;
Milan Broz9af4aa32006-10-03 01:15:20 -07001591
1592 spin_lock_irqsave(&m->lock, flags);
1593
1594 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001595 __choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001596
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001597 pgpath = m->current_pgpath;
1598
1599 if (pgpath) {
1600 bdev = pgpath->path.dev->bdev;
1601 mode = pgpath->path.dev->mode;
Milan Broze90dae12006-10-03 01:15:22 -07001602 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001603
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001604 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
Hannes Reinecke6c182cd2013-07-10 23:41:15 +01001605 r = -ENOTCONN;
Milan Broz9af4aa32006-10-03 01:15:20 -07001606 else if (!bdev)
1607 r = -EIO;
1608
1609 spin_unlock_irqrestore(&m->lock, flags);
1610
Paolo Bonziniec8013b2012-01-12 16:01:29 +01001611 /*
1612 * Only pass ioctls through if the device sizes match exactly.
1613 */
1614 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1615 r = scsi_verify_blk_ioctl(NULL, cmd);
1616
Hannes Reinecke6c182cd2013-07-10 23:41:15 +01001617 if (r == -ENOTCONN && !fatal_signal_pending(current))
Mikulas Patocka35991652012-06-03 00:29:58 +01001618 queue_work(kmultipathd, &m->process_queued_ios);
Mikulas Patocka35991652012-06-03 00:29:58 +01001619
Al Viro633a08b2007-08-29 20:34:12 -04001620 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
Milan Broz9af4aa32006-10-03 01:15:20 -07001621}
1622
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001623static int multipath_iterate_devices(struct dm_target *ti,
1624 iterate_devices_callout_fn fn, void *data)
1625{
1626 struct multipath *m = ti->private;
1627 struct priority_group *pg;
1628 struct pgpath *p;
1629 int ret = 0;
1630
1631 list_for_each_entry(pg, &m->priority_groups, list) {
1632 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001633 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001634 if (ret)
1635 goto out;
1636 }
1637 }
1638
1639out:
1640 return ret;
1641}
1642
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001643static int __pgpath_busy(struct pgpath *pgpath)
1644{
1645 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1646
1647 return dm_underlying_device_busy(q);
1648}
1649
1650/*
1651 * We return "busy", only when we can map I/Os but underlying devices
1652 * are busy (so even if we map I/Os now, the I/Os will wait on
1653 * the underlying queue).
1654 * In other words, if we want to kill I/Os or queue them inside us
1655 * due to map unavailability, we don't return "busy". Otherwise,
1656 * dm core won't give us the I/Os and we can't do what we want.
1657 */
1658static int multipath_busy(struct dm_target *ti)
1659{
1660 int busy = 0, has_active = 0;
1661 struct multipath *m = ti->private;
1662 struct priority_group *pg;
1663 struct pgpath *pgpath;
1664 unsigned long flags;
1665
1666 spin_lock_irqsave(&m->lock, flags);
1667
1668 /* Guess which priority_group will be used at next mapping time */
1669 if (unlikely(!m->current_pgpath && m->next_pg))
1670 pg = m->next_pg;
1671 else if (likely(m->current_pg))
1672 pg = m->current_pg;
1673 else
1674 /*
1675 * We don't know which pg will be used at next mapping time.
1676 * We don't call __choose_pgpath() here to avoid to trigger
1677 * pg_init just by busy checking.
1678 * So we don't know whether underlying devices we will be using
1679 * at next mapping time are busy or not. Just try mapping.
1680 */
1681 goto out;
1682
1683 /*
1684 * If there is one non-busy active path at least, the path selector
1685 * will be able to select it. So we consider such a pg as not busy.
1686 */
1687 busy = 1;
1688 list_for_each_entry(pgpath, &pg->pgpaths, list)
1689 if (pgpath->is_active) {
1690 has_active = 1;
1691
1692 if (!__pgpath_busy(pgpath)) {
1693 busy = 0;
1694 break;
1695 }
1696 }
1697
1698 if (!has_active)
1699 /*
1700 * No active path in this pg, so this pg won't be used and
1701 * the current_pg will be changed at next mapping time.
1702 * We need to try mapping to determine it.
1703 */
1704 busy = 0;
1705
1706out:
1707 spin_unlock_irqrestore(&m->lock, flags);
1708
1709 return busy;
1710}
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712/*-----------------------------------------------------------------
1713 * Module setup
1714 *---------------------------------------------------------------*/
1715static struct target_type multipath_target = {
1716 .name = "multipath",
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001717 .version = {1, 5, 1},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 .module = THIS_MODULE,
1719 .ctr = multipath_ctr,
1720 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001721 .map_rq = multipath_map,
1722 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001724 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 .resume = multipath_resume,
1726 .status = multipath_status,
1727 .message = multipath_message,
Milan Broz9af4aa32006-10-03 01:15:20 -07001728 .ioctl = multipath_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001729 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001730 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731};
1732
1733static int __init dm_multipath_init(void)
1734{
1735 int r;
1736
1737 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001738 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 if (!_mpio_cache)
1740 return -ENOMEM;
1741
1742 r = dm_register_target(&multipath_target);
1743 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001744 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 kmem_cache_destroy(_mpio_cache);
1746 return -EINVAL;
1747 }
1748
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001749 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001750 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001751 DMERR("failed to create workqueue kmpathd");
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001752 dm_unregister_target(&multipath_target);
1753 kmem_cache_destroy(_mpio_cache);
1754 return -ENOMEM;
1755 }
1756
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001757 /*
1758 * A separate workqueue is used to handle the device handlers
1759 * to avoid overloading existing workqueue. Overloading the
1760 * old workqueue would also create a bottleneck in the
1761 * path of the storage hardware device activation.
1762 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001763 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1764 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001765 if (!kmpath_handlerd) {
1766 DMERR("failed to create workqueue kmpath_handlerd");
1767 destroy_workqueue(kmultipathd);
1768 dm_unregister_target(&multipath_target);
1769 kmem_cache_destroy(_mpio_cache);
1770 return -ENOMEM;
1771 }
1772
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001773 DMINFO("version %u.%u.%u loaded",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 multipath_target.version[0], multipath_target.version[1],
1775 multipath_target.version[2]);
1776
1777 return r;
1778}
1779
1780static void __exit dm_multipath_exit(void)
1781{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001782 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001783 destroy_workqueue(kmultipathd);
1784
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001785 dm_unregister_target(&multipath_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 kmem_cache_destroy(_mpio_cache);
1787}
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789module_init(dm_multipath_init);
1790module_exit(dm_multipath_exit);
1791
1792MODULE_DESCRIPTION(DM_NAME " multipath target");
1793MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1794MODULE_LICENSE("GPL");