blob: 6eb9dc9ef8f36c4b709df6f3b46170598964e88d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Mike Snitzerf4790822013-09-12 18:06:12 -040010#include "dm.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010012#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14#include <linux/ctype.h>
15#include <linux/init.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/pagemap.h>
19#include <linux/slab.h>
20#include <linux/time.h>
21#include <linux/workqueue.h>
Mikulas Patocka35991652012-06-03 00:29:58 +010022#include <linux/delay.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070023#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070024#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000027#define DM_PG_INIT_DELAY_MSECS 2000
28#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30/* Path properties */
31struct pgpath {
32 struct list_head list;
33
34 struct priority_group *pg; /* Owning PG */
Kiyoshi Ueda66800732008-10-10 13:36:58 +010035 unsigned is_active; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 unsigned fail_count; /* Cumulative failure count */
37
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080038 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000039 struct delayed_work activate_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040};
41
42#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
43
44/*
45 * Paths are grouped into Priority Groups and numbered from 1 upwards.
46 * Each has a path selector which controls which path gets used.
47 */
48struct priority_group {
49 struct list_head list;
50
51 struct multipath *m; /* Owning multipath instance */
52 struct path_selector ps;
53
54 unsigned pg_num; /* Reference number */
55 unsigned bypassed; /* Temporarily bypass this PG? */
56
57 unsigned nr_pgpaths; /* Number of paths in PG */
58 struct list_head pgpaths;
59};
60
61/* Multipath context */
62struct multipath {
63 struct list_head list;
64 struct dm_target *ti;
65
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070066 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070067 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000068
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010069 spinlock_t lock;
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 unsigned nr_priority_groups;
72 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000073
74 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 unsigned pg_init_required; /* pg_init needs calling? */
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -070077 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000078 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80 unsigned nr_valid_paths; /* Total number of usable paths */
81 struct pgpath *current_pgpath;
82 struct priority_group *current_pg;
83 struct priority_group *next_pg; /* Switch to this PG if set */
84 unsigned repeat_count; /* I/Os left before calling PS again */
85
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010086 unsigned queue_io:1; /* Must we queue all I/O? */
87 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
88 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
Mike Snitzera58a9352012-07-27 15:08:04 +010089 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +000090 unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010091
Dave Wysochanskic9e45582007-10-19 22:47:53 +010092 unsigned pg_init_retries; /* Number of times to retry pg_init */
93 unsigned pg_init_count; /* Number of times pg_init called */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000094 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010096 unsigned queue_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 struct work_struct process_queued_ios;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +010098 struct list_head queued_ios;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100 struct work_struct trigger_event;
101
102 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100103 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 * can resubmit bios on error.
105 */
106 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +0000107
108 struct mutex work_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109};
110
111/*
112 * Context information attached to each bio we process.
113 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100114struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100116 size_t nr_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117};
118
119typedef int (*action_fn) (struct pgpath *pgpath);
120
Christoph Lametere18b8902006-12-06 20:33:20 -0800121static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700123static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000124static void process_queued_ios(struct work_struct *work);
125static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700126static void activate_path(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128
129/*-----------------------------------------------
130 * Allocation routines
131 *-----------------------------------------------*/
132
133static struct pgpath *alloc_pgpath(void)
134{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700135 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Mike Anderson224cb3e2008-08-29 09:36:09 +0200137 if (pgpath) {
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100138 pgpath->is_active = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000139 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 return pgpath;
143}
144
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100145static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
147 kfree(pgpath);
148}
149
150static struct priority_group *alloc_priority_group(void)
151{
152 struct priority_group *pg;
153
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700154 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700156 if (pg)
157 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 return pg;
160}
161
162static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
163{
164 struct pgpath *pgpath, *tmp;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700165 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
168 list_del(&pgpath->list);
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700169 if (m->hw_handler_name)
170 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 dm_put_device(ti, pgpath->path.dev);
172 free_pgpath(pgpath);
173 }
174}
175
176static void free_priority_group(struct priority_group *pg,
177 struct dm_target *ti)
178{
179 struct path_selector *ps = &pg->ps;
180
181 if (ps->type) {
182 ps->type->destroy(ps);
183 dm_put_path_selector(ps->type);
184 }
185
186 free_pgpaths(&pg->pgpaths, ti);
187 kfree(pg);
188}
189
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700190static struct multipath *alloc_multipath(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct multipath *m;
Mike Snitzerf4790822013-09-12 18:06:12 -0400193 unsigned min_ios = dm_get_reserved_rq_based_ios();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700195 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 INIT_LIST_HEAD(&m->priority_groups);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100198 INIT_LIST_HEAD(&m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 spin_lock_init(&m->lock);
200 m->queue_io = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000201 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000202 INIT_WORK(&m->process_queued_ios, process_queued_ios);
203 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000204 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000205 mutex_init(&m->work_mutex);
Mike Snitzerf4790822013-09-12 18:06:12 -0400206 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 if (!m->mpio_pool) {
208 kfree(m);
209 return NULL;
210 }
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700211 m->ti = ti;
212 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214
215 return m;
216}
217
218static void free_multipath(struct multipath *m)
219{
220 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
223 list_del(&pg->list);
224 free_priority_group(pg, m->ti);
225 }
226
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700227 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700228 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 mempool_destroy(m->mpio_pool);
230 kfree(m);
231}
232
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100233static int set_mapinfo(struct multipath *m, union map_info *info)
234{
235 struct dm_mpath_io *mpio;
236
237 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
238 if (!mpio)
239 return -ENOMEM;
240
241 memset(mpio, 0, sizeof(*mpio));
242 info->ptr = mpio;
243
244 return 0;
245}
246
247static void clear_mapinfo(struct multipath *m, union map_info *info)
248{
249 struct dm_mpath_io *mpio = info->ptr;
250
251 info->ptr = NULL;
252 mempool_free(mpio, m->mpio_pool);
253}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255/*-----------------------------------------------
256 * Path selection
257 *-----------------------------------------------*/
258
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000259static void __pg_init_all_paths(struct multipath *m)
260{
261 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000262 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000263
264 m->pg_init_count++;
265 m->pg_init_required = 0;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000266 if (m->pg_init_delay_retry)
267 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
268 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000269 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
270 /* Skip failed paths */
271 if (!pgpath->is_active)
272 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000273 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
274 pg_init_delay))
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000275 m->pg_init_in_progress++;
276 }
277}
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
280{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 m->current_pg = pgpath->pg;
282
283 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700284 if (m->hw_handler_name) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 m->pg_init_required = 1;
286 m->queue_io = 1;
287 } else {
288 m->pg_init_required = 0;
289 m->queue_io = 0;
290 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100291
292 m->pg_init_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100295static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
296 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800298 struct dm_path *path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100300 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (!path)
302 return -ENXIO;
303
304 m->current_pgpath = path_to_pgpath(path);
305
306 if (m->current_pg != pg)
307 __switch_pg(m, m->current_pgpath);
308
309 return 0;
310}
311
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100312static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
314 struct priority_group *pg;
315 unsigned bypassed = 1;
316
317 if (!m->nr_valid_paths)
318 goto failed;
319
320 /* Were we instructed to switch PG? */
321 if (m->next_pg) {
322 pg = m->next_pg;
323 m->next_pg = NULL;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100324 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 return;
326 }
327
328 /* Don't change PG until it has no remaining paths */
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100329 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 return;
331
332 /*
333 * Loop through priority groups until we find a valid path.
334 * First time we skip PGs marked 'bypassed'.
Mike Christief220fd42012-06-03 00:29:45 +0100335 * Second time we only try the ones we skipped, but set
336 * pg_init_delay_retry so we do not hammer controllers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 */
338 do {
339 list_for_each_entry(pg, &m->priority_groups, list) {
340 if (pg->bypassed == bypassed)
341 continue;
Mike Christief220fd42012-06-03 00:29:45 +0100342 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
343 if (!bypassed)
344 m->pg_init_delay_retry = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return;
Mike Christief220fd42012-06-03 00:29:45 +0100346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 }
348 } while (bypassed--);
349
350failed:
351 m->current_pgpath = NULL;
352 m->current_pg = NULL;
353}
354
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800355/*
356 * Check whether bios must be queued in the device-mapper core rather
357 * than here in the target.
358 *
359 * m->lock must be held on entry.
360 *
361 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
362 * same value then we are not between multipath_presuspend()
363 * and multipath_resume() calls and we have no need to check
364 * for the DMF_NOFLUSH_SUSPENDING flag.
365 */
366static int __must_push_back(struct multipath *m)
367{
368 return (m->queue_if_no_path != m->saved_queue_if_no_path &&
369 dm_noflush_suspending(m->ti));
370}
371
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100372static int map_io(struct multipath *m, struct request *clone,
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100373 union map_info *map_context, unsigned was_queued)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800375 int r = DM_MAPIO_REMAPPED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100376 size_t nr_bytes = blk_rq_bytes(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 unsigned long flags;
378 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100379 struct block_device *bdev;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100380 struct dm_mpath_io *mpio = map_context->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 spin_lock_irqsave(&m->lock, flags);
383
384 /* Do we need to select a new pgpath? */
385 if (!m->current_pgpath ||
386 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100387 __choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 pgpath = m->current_pgpath;
390
391 if (was_queued)
392 m->queue_size--;
393
Hannes Reineckeb63349a2013-10-01 11:49:56 +0200394 if (m->pg_init_required) {
395 if (!m->pg_init_in_progress)
396 queue_work(kmultipathd, &m->process_queued_ios);
397 r = DM_MAPIO_REQUEUE;
398 } else if ((pgpath && m->queue_io) ||
399 (!pgpath && m->queue_if_no_path)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /* Queue for the daemon to resubmit */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100401 list_add_tail(&clone->queuelist, &m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 m->queue_size++;
Hannes Reineckeb63349a2013-10-01 11:49:56 +0200403 if (!m->queue_io)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700404 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 pgpath = NULL;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800406 r = DM_MAPIO_SUBMITTED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100407 } else if (pgpath) {
408 bdev = pgpath->path.dev->bdev;
409 clone->q = bdev_get_queue(bdev);
410 clone->rq_disk = bdev->bd_disk;
411 } else if (__must_push_back(m))
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800412 r = DM_MAPIO_REQUEUE;
413 else
414 r = -EIO; /* Failed */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416 mpio->pgpath = pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100417 mpio->nr_bytes = nr_bytes;
418
419 if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
420 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
421 nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 spin_unlock_irqrestore(&m->lock, flags);
424
425 return r;
426}
427
428/*
429 * If we run out of usable paths, should we queue I/O or error it?
430 */
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700431static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
432 unsigned save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 unsigned long flags;
435
436 spin_lock_irqsave(&m->lock, flags);
437
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700438 if (save_old_value)
439 m->saved_queue_if_no_path = m->queue_if_no_path;
440 else
441 m->saved_queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 m->queue_if_no_path = queue_if_no_path;
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700443 if (!m->queue_if_no_path && m->queue_size)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700444 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 spin_unlock_irqrestore(&m->lock, flags);
447
448 return 0;
449}
450
451/*-----------------------------------------------------------------
452 * The multipath daemon is responsible for resubmitting queued ios.
453 *---------------------------------------------------------------*/
454
455static void dispatch_queued_ios(struct multipath *m)
456{
457 int r;
458 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 union map_info *info;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100460 struct request *clone, *n;
461 LIST_HEAD(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 spin_lock_irqsave(&m->lock, flags);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100464 list_splice_init(&m->queued_ios, &cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 spin_unlock_irqrestore(&m->lock, flags);
466
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100467 list_for_each_entry_safe(clone, n, &cl, queuelist) {
468 list_del_init(&clone->queuelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100470 info = dm_get_rq_mapinfo(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100472 r = map_io(m, clone, info, 1);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100473 if (r < 0) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100474 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100475 dm_kill_unmapped_request(clone, r);
476 } else if (r == DM_MAPIO_REMAPPED)
477 dm_dispatch_request(clone);
478 else if (r == DM_MAPIO_REQUEUE) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100479 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100480 dm_requeue_unmapped_request(clone);
481 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 }
483}
484
David Howellsc4028952006-11-22 14:57:56 +0000485static void process_queued_ios(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
David Howellsc4028952006-11-22 14:57:56 +0000487 struct multipath *m =
488 container_of(work, struct multipath, process_queued_ios);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000489 struct pgpath *pgpath = NULL;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +0100490 unsigned must_queue = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 unsigned long flags;
492
493 spin_lock_irqsave(&m->lock, flags);
494
495 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100496 __choose_pgpath(m, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 pgpath = m->current_pgpath;
499
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700500 if ((pgpath && !m->queue_io) ||
501 (!pgpath && !m->queue_if_no_path))
502 must_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000504 if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
505 !m->pg_init_disabled)
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000506 __pg_init_all_paths(m);
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 if (!must_queue)
510 dispatch_queued_ios(m);
511}
512
513/*
514 * An event is triggered whenever a path is taken out of use.
515 * Includes path failure and PG bypass.
516 */
David Howellsc4028952006-11-22 14:57:56 +0000517static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
David Howellsc4028952006-11-22 14:57:56 +0000519 struct multipath *m =
520 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522 dm_table_event(m->ti->table);
523}
524
525/*-----------------------------------------------------------------
526 * Constructor/argument parsing:
527 * <#multipath feature args> [<arg>]*
528 * <#hw_handler args> [hw_handler [<arg>]*]
529 * <#priority groups>
530 * <initial priority group>
531 * [<selector> <#selector args> [<arg>]*
532 * <#paths> <#per-path selector args>
533 * [<path> [<arg>]* ]+ ]+
534 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100535static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 struct dm_target *ti)
537{
538 int r;
539 struct path_selector_type *pst;
540 unsigned ps_argc;
541
Mike Snitzer498f0102011-08-02 12:32:04 +0100542 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700543 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 };
545
Mike Snitzer498f0102011-08-02 12:32:04 +0100546 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700548 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 return -EINVAL;
550 }
551
Mike Snitzer498f0102011-08-02 12:32:04 +0100552 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100553 if (r) {
554 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 r = pst->create(&pg->ps, ps_argc, as->argv);
559 if (r) {
560 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700561 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 return r;
563 }
564
565 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100566 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 return 0;
569}
570
Mike Snitzer498f0102011-08-02 12:32:04 +0100571static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 struct dm_target *ti)
573{
574 int r;
575 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700576 struct multipath *m = ti->private;
Mike Snitzera58a9352012-07-27 15:08:04 +0100577 struct request_queue *q = NULL;
578 const char *attached_handler_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 /* we need at least a path arg */
581 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700582 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100583 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
585
586 p = alloc_pgpath();
587 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100588 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Mike Snitzer498f0102011-08-02 12:32:04 +0100590 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000591 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700593 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 goto bad;
595 }
596
Mike Snitzera58a9352012-07-27 15:08:04 +0100597 if (m->retain_attached_hw_handler || m->hw_handler_name)
598 q = bdev_get_queue(p->path.dev->bdev);
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100599
Mike Snitzera58a9352012-07-27 15:08:04 +0100600 if (m->retain_attached_hw_handler) {
601 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
602 if (attached_handler_name) {
603 /*
604 * Reset hw_handler_name to match the attached handler
605 * and clear any hw_handler_params associated with the
606 * ignored handler.
607 *
608 * NB. This modifies the table line to show the actual
609 * handler instead of the original table passed in.
610 */
611 kfree(m->hw_handler_name);
612 m->hw_handler_name = attached_handler_name;
613
614 kfree(m->hw_handler_params);
615 m->hw_handler_params = NULL;
616 }
617 }
618
619 if (m->hw_handler_name) {
620 /*
621 * Increments scsi_dh reference, even when using an
622 * already-attached handler.
623 */
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100624 r = scsi_dh_attach(q, m->hw_handler_name);
625 if (r == -EBUSY) {
626 /*
Mike Snitzera58a9352012-07-27 15:08:04 +0100627 * Already attached to different hw_handler:
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100628 * try to reattach with correct one.
629 */
630 scsi_dh_detach(q);
631 r = scsi_dh_attach(q, m->hw_handler_name);
632 }
633
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700634 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100635 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700636 dm_put_device(ti, p->path.dev);
637 goto bad;
638 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700639
640 if (m->hw_handler_params) {
641 r = scsi_dh_set_params(q, m->hw_handler_params);
642 if (r < 0) {
643 ti->error = "unable to set hardware "
644 "handler parameters";
645 scsi_dh_detach(q);
646 dm_put_device(ti, p->path.dev);
647 goto bad;
648 }
649 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700650 }
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
653 if (r) {
654 dm_put_device(ti, p->path.dev);
655 goto bad;
656 }
657
658 return p;
659
660 bad:
661 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100662 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
Mike Snitzer498f0102011-08-02 12:32:04 +0100665static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700666 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Mike Snitzer498f0102011-08-02 12:32:04 +0100668 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700669 {1, 1024, "invalid number of paths"},
670 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 };
672
673 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100674 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700676 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 if (as->argc < 2) {
679 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100680 ti->error = "not enough priority group arguments";
681 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
684 pg = alloc_priority_group();
685 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700686 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100687 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 }
689 pg->m = m;
690
691 r = parse_path_selector(as, pg, ti);
692 if (r)
693 goto bad;
694
695 /*
696 * read the paths
697 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100698 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (r)
700 goto bad;
701
Mike Snitzer498f0102011-08-02 12:32:04 +0100702 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (r)
704 goto bad;
705
Mike Snitzer498f0102011-08-02 12:32:04 +0100706 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 for (i = 0; i < pg->nr_pgpaths; i++) {
708 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100709 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Mike Snitzer498f0102011-08-02 12:32:04 +0100711 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100712 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100713 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Mike Snitzer498f0102011-08-02 12:32:04 +0100717 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 path_args.argv = as->argv;
719
720 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100721 if (IS_ERR(pgpath)) {
722 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 pgpath->pg = pg;
727 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100728 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
730
731 return pg;
732
733 bad:
734 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100735 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Mike Snitzer498f0102011-08-02 12:32:04 +0100738static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700741 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700742 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Mike Snitzer498f0102011-08-02 12:32:04 +0100744 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700745 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 };
747
Mike Snitzer498f0102011-08-02 12:32:04 +0100748 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return -EINVAL;
750
751 if (!hw_argc)
752 return 0;
753
Mike Snitzer498f0102011-08-02 12:32:04 +0100754 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Mike Snitzer510193a2012-05-12 01:43:21 +0100755 if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
756 "scsi_dh_%s", m->hw_handler_name)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700757 ti->error = "unknown hardware handler type";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700758 ret = -EINVAL;
759 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 }
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000761
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700762 if (hw_argc > 1) {
763 char *p;
764 int i, j, len = 4;
765
766 for (i = 0; i <= hw_argc - 2; i++)
767 len += strlen(as->argv[i]) + 1;
768 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
769 if (!p) {
770 ti->error = "memory allocation failed";
771 ret = -ENOMEM;
772 goto fail;
773 }
774 j = sprintf(p, "%d", hw_argc - 1);
775 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
776 j = sprintf(p, "%s", as->argv[i]);
777 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100778 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700781fail:
782 kfree(m->hw_handler_name);
783 m->hw_handler_name = NULL;
784 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785}
786
Mike Snitzer498f0102011-08-02 12:32:04 +0100787static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 int r;
790 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700791 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100792 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Mike Snitzer498f0102011-08-02 12:32:04 +0100794 static struct dm_arg _args[] = {
Mike Snitzera58a9352012-07-27 15:08:04 +0100795 {0, 6, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100796 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000797 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 };
799
Mike Snitzer498f0102011-08-02 12:32:04 +0100800 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 if (r)
802 return -EINVAL;
803
804 if (!argc)
805 return 0;
806
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100807 do {
Mike Snitzer498f0102011-08-02 12:32:04 +0100808 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100809 argc--;
810
Mike Snitzer498f0102011-08-02 12:32:04 +0100811 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100812 r = queue_if_no_path(m, 1, 0);
813 continue;
814 }
815
Mike Snitzera58a9352012-07-27 15:08:04 +0100816 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
817 m->retain_attached_hw_handler = 1;
818 continue;
819 }
820
Mike Snitzer498f0102011-08-02 12:32:04 +0100821 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100822 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100823 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100824 argc--;
825 continue;
826 }
827
Mike Snitzer498f0102011-08-02 12:32:04 +0100828 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000829 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100830 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000831 argc--;
832 continue;
833 }
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100836 r = -EINVAL;
837 } while (argc && !r);
838
839 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
841
842static int multipath_ctr(struct dm_target *ti, unsigned int argc,
843 char **argv)
844{
Mike Snitzer498f0102011-08-02 12:32:04 +0100845 /* target arguments */
846 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +0000847 {0, 1024, "invalid number of priority groups"},
848 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 };
850
851 int r;
852 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +0100853 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 unsigned pg_count = 0;
855 unsigned next_pg_num;
856
857 as.argc = argc;
858 as.argv = argv;
859
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700860 m = alloc_multipath(ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700862 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return -EINVAL;
864 }
865
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700866 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 if (r)
868 goto bad;
869
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700870 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 if (r)
872 goto bad;
873
Mike Snitzer498f0102011-08-02 12:32:04 +0100874 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 if (r)
876 goto bad;
877
Mike Snitzer498f0102011-08-02 12:32:04 +0100878 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 if (r)
880 goto bad;
881
Mike Snitzera490a072011-03-24 13:54:33 +0000882 if ((!m->nr_priority_groups && next_pg_num) ||
883 (m->nr_priority_groups && !next_pg_num)) {
884 ti->error = "invalid initial priority group";
885 r = -EINVAL;
886 goto bad;
887 }
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 /* parse the priority groups */
890 while (as.argc) {
891 struct priority_group *pg;
892
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700893 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100894 if (IS_ERR(pg)) {
895 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 goto bad;
897 }
898
899 m->nr_valid_paths += pg->nr_pgpaths;
900 list_add_tail(&pg->list, &m->priority_groups);
901 pg_count++;
902 pg->pg_num = pg_count;
903 if (!--next_pg_num)
904 m->next_pg = pg;
905 }
906
907 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700908 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 r = -EINVAL;
910 goto bad;
911 }
912
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000913 ti->num_flush_bios = 1;
914 ti->num_discard_bios = 1;
Mike Snitzer042bcef2013-05-10 14:37:16 +0100915 ti->num_write_same_bios = 1;
Mikulas Patocka86279212009-06-22 10:12:24 +0100916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 return 0;
918
919 bad:
920 free_multipath(m);
921 return r;
922}
923
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000924static void multipath_wait_for_pg_init_completion(struct multipath *m)
925{
926 DECLARE_WAITQUEUE(wait, current);
927 unsigned long flags;
928
929 add_wait_queue(&m->pg_init_wait, &wait);
930
931 while (1) {
932 set_current_state(TASK_UNINTERRUPTIBLE);
933
934 spin_lock_irqsave(&m->lock, flags);
935 if (!m->pg_init_in_progress) {
936 spin_unlock_irqrestore(&m->lock, flags);
937 break;
938 }
939 spin_unlock_irqrestore(&m->lock, flags);
940
941 io_schedule();
942 }
943 set_current_state(TASK_RUNNING);
944
945 remove_wait_queue(&m->pg_init_wait, &wait);
946}
947
948static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949{
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000950 unsigned long flags;
951
952 spin_lock_irqsave(&m->lock, flags);
953 m->pg_init_disabled = 1;
954 spin_unlock_irqrestore(&m->lock, flags);
955
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700956 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000957 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -0700958 flush_workqueue(kmultipathd);
Tejun Heo43829732012-08-20 14:51:24 -0700959 flush_work(&m->trigger_event);
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000960
961 spin_lock_irqsave(&m->lock, flags);
962 m->pg_init_disabled = 0;
963 spin_unlock_irqrestore(&m->lock, flags);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +0000964}
965
966static void multipath_dtr(struct dm_target *ti)
967{
968 struct multipath *m = ti->private;
969
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000970 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 free_multipath(m);
972}
973
974/*
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100975 * Map cloned requests
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100977static int multipath_map(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 union map_info *map_context)
979{
980 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 struct multipath *m = (struct multipath *) ti->private;
982
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100983 if (set_mapinfo(m, map_context) < 0)
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100984 /* ENOMEM, requeue */
985 return DM_MAPIO_REQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100987 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100988 r = map_io(m, clone, map_context, 0);
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800989 if (r < 0 || r == DM_MAPIO_REQUEUE)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100990 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
992 return r;
993}
994
995/*
996 * Take a path out of use.
997 */
998static int fail_path(struct pgpath *pgpath)
999{
1000 unsigned long flags;
1001 struct multipath *m = pgpath->pg->m;
1002
1003 spin_lock_irqsave(&m->lock, flags);
1004
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001005 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 goto out;
1007
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001008 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001011 pgpath->is_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 pgpath->fail_count++;
1013
1014 m->nr_valid_paths--;
1015
1016 if (pgpath == m->current_pgpath)
1017 m->current_pgpath = NULL;
1018
Mike Andersonb15546f2007-10-19 22:48:02 +01001019 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1020 pgpath->path.dev->name, m->nr_valid_paths);
1021
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001022 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024out:
1025 spin_unlock_irqrestore(&m->lock, flags);
1026
1027 return 0;
1028}
1029
1030/*
1031 * Reinstate a previously-failed path
1032 */
1033static int reinstate_path(struct pgpath *pgpath)
1034{
1035 int r = 0;
1036 unsigned long flags;
1037 struct multipath *m = pgpath->pg->m;
1038
1039 spin_lock_irqsave(&m->lock, flags);
1040
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001041 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 goto out;
1043
Alasdair G Kergondef052d2008-07-21 12:00:31 +01001044 if (!pgpath->pg->ps.type->reinstate_path) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 DMWARN("Reinstate path not supported by path selector %s",
1046 pgpath->pg->ps.type->name);
1047 r = -EINVAL;
1048 goto out;
1049 }
1050
1051 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1052 if (r)
1053 goto out;
1054
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001055 pgpath->is_active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001057 if (!m->nr_valid_paths++ && m->queue_size) {
1058 m->current_pgpath = NULL;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001059 queue_work(kmultipathd, &m->process_queued_ios);
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001060 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001061 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001062 m->pg_init_in_progress++;
1063 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
Mike Andersonb15546f2007-10-19 22:48:02 +01001065 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1066 pgpath->path.dev->name, m->nr_valid_paths);
1067
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001068 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
1070out:
1071 spin_unlock_irqrestore(&m->lock, flags);
1072
1073 return r;
1074}
1075
1076/*
1077 * Fail or reinstate all paths that match the provided struct dm_dev.
1078 */
1079static int action_dev(struct multipath *m, struct dm_dev *dev,
1080 action_fn action)
1081{
Mike Snitzer19040c02011-03-24 13:54:31 +00001082 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 struct pgpath *pgpath;
1084 struct priority_group *pg;
1085
1086 list_for_each_entry(pg, &m->priority_groups, list) {
1087 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1088 if (pgpath->path.dev == dev)
1089 r = action(pgpath);
1090 }
1091 }
1092
1093 return r;
1094}
1095
1096/*
1097 * Temporarily try to avoid having to use the specified PG
1098 */
1099static void bypass_pg(struct multipath *m, struct priority_group *pg,
1100 int bypassed)
1101{
1102 unsigned long flags;
1103
1104 spin_lock_irqsave(&m->lock, flags);
1105
1106 pg->bypassed = bypassed;
1107 m->current_pgpath = NULL;
1108 m->current_pg = NULL;
1109
1110 spin_unlock_irqrestore(&m->lock, flags);
1111
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001112 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113}
1114
1115/*
1116 * Switch to using the specified PG from the next I/O that gets mapped
1117 */
1118static int switch_pg_num(struct multipath *m, const char *pgstr)
1119{
1120 struct priority_group *pg;
1121 unsigned pgnum;
1122 unsigned long flags;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001123 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001125 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 (pgnum > m->nr_priority_groups)) {
1127 DMWARN("invalid PG number supplied to switch_pg_num");
1128 return -EINVAL;
1129 }
1130
1131 spin_lock_irqsave(&m->lock, flags);
1132 list_for_each_entry(pg, &m->priority_groups, list) {
1133 pg->bypassed = 0;
1134 if (--pgnum)
1135 continue;
1136
1137 m->current_pgpath = NULL;
1138 m->current_pg = NULL;
1139 m->next_pg = pg;
1140 }
1141 spin_unlock_irqrestore(&m->lock, flags);
1142
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001143 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return 0;
1145}
1146
1147/*
1148 * Set/clear bypassed status of a PG.
1149 * PGs are numbered upwards from 1 in the order they were declared.
1150 */
1151static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1152{
1153 struct priority_group *pg;
1154 unsigned pgnum;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001155 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001157 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 (pgnum > m->nr_priority_groups)) {
1159 DMWARN("invalid PG number supplied to bypass_pg");
1160 return -EINVAL;
1161 }
1162
1163 list_for_each_entry(pg, &m->priority_groups, list) {
1164 if (!--pgnum)
1165 break;
1166 }
1167
1168 bypass_pg(m, pg, bypassed);
1169 return 0;
1170}
1171
1172/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001173 * Should we retry pg_init immediately?
1174 */
1175static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1176{
1177 unsigned long flags;
1178 int limit_reached = 0;
1179
1180 spin_lock_irqsave(&m->lock, flags);
1181
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +00001182 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001183 m->pg_init_required = 1;
1184 else
1185 limit_reached = 1;
1186
1187 spin_unlock_irqrestore(&m->lock, flags);
1188
1189 return limit_reached;
1190}
1191
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001192static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001193{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001194 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001195 struct priority_group *pg = pgpath->pg;
1196 struct multipath *m = pg->m;
1197 unsigned long flags;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001198 unsigned delay_retry = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001199
1200 /* device or driver problems */
1201 switch (errors) {
1202 case SCSI_DH_OK:
1203 break;
1204 case SCSI_DH_NOSYS:
1205 if (!m->hw_handler_name) {
1206 errors = 0;
1207 break;
1208 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001209 DMERR("Could not failover the device: Handler scsi_dh_%s "
1210 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001211 /*
1212 * Fail path for now, so we do not ping pong
1213 */
1214 fail_path(pgpath);
1215 break;
1216 case SCSI_DH_DEV_TEMP_BUSY:
1217 /*
1218 * Probably doing something like FW upgrade on the
1219 * controller so try the other pg.
1220 */
1221 bypass_pg(m, pg, 1);
1222 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001223 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001224 /* Wait before retrying. */
1225 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001226 case SCSI_DH_IMM_RETRY:
1227 case SCSI_DH_RES_TEMP_UNAVAIL:
1228 if (pg_init_limit_reached(m, pgpath))
1229 fail_path(pgpath);
1230 errors = 0;
1231 break;
1232 default:
1233 /*
1234 * We probably do not want to fail the path for a device
1235 * error, but this is what the old dm did. In future
1236 * patches we can do more advanced handling.
1237 */
1238 fail_path(pgpath);
1239 }
1240
1241 spin_lock_irqsave(&m->lock, flags);
1242 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001243 if (pgpath == m->current_pgpath) {
1244 DMERR("Could not failover device. Error %d.", errors);
1245 m->current_pgpath = NULL;
1246 m->current_pg = NULL;
1247 }
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001248 } else if (!m->pg_init_required)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001249 pg->bypassed = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001250
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001251 if (--m->pg_init_in_progress)
1252 /* Activations of other paths are still on going */
1253 goto out;
1254
1255 if (!m->pg_init_required)
1256 m->queue_io = 0;
1257
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001258 m->pg_init_delay_retry = delay_retry;
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001259 queue_work(kmultipathd, &m->process_queued_ios);
1260
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001261 /*
1262 * Wake up any thread waiting to suspend.
1263 */
1264 wake_up(&m->pg_init_wait);
1265
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001266out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001267 spin_unlock_irqrestore(&m->lock, flags);
1268}
1269
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001270static void activate_path(struct work_struct *work)
1271{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001272 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001273 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001274
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001275 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001276 pg_init_done, pgpath);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001277}
1278
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001279static int noretry_error(int error)
1280{
1281 switch (error) {
1282 case -EOPNOTSUPP:
1283 case -EREMOTEIO:
1284 case -EILSEQ:
1285 case -ENODATA:
Jun'ichi Nomuracc9d3c32013-09-13 14:54:30 +09001286 case -ENOSPC:
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001287 return 1;
1288 }
1289
1290 /* Anything else could be a path failure, so should be retried */
1291 return 0;
1292}
1293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294/*
1295 * end_io handling
1296 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001297static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001298 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001300 /*
1301 * We don't queue any clone request inside the multipath target
1302 * during end I/O handling, since those clone requests don't have
1303 * bio clones. If we queue them inside the multipath target,
1304 * we need to make bio clones, that requires memory allocation.
1305 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1306 * don't have bio clones.)
1307 * Instead of queueing the clone request here, we queue the original
1308 * request into dm core, which will remake a clone request and
1309 * clone bios for it and resubmit it later.
1310 */
1311 int r = DM_ENDIO_REQUEUE;
Stefan Bader640eb3b2005-11-21 21:32:35 -08001312 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001314 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 return 0; /* I/O complete */
1316
Mike Snitzerf84cb8a2013-09-19 12:13:58 -04001317 if (noretry_error(error)) {
1318 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1319 !clone->q->limits.max_write_same_sectors) {
1320 struct queue_limits *limits;
1321
1322 /* device doesn't really support WRITE SAME, disable it */
1323 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1324 limits->max_write_same_sectors = 0;
1325 }
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001326 return error;
Mike Snitzerf84cb8a2013-09-19 12:13:58 -04001327 }
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001328
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001329 if (mpio->pgpath)
1330 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Stefan Bader640eb3b2005-11-21 21:32:35 -08001332 spin_lock_irqsave(&m->lock, flags);
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001333 if (!m->nr_valid_paths) {
1334 if (!m->queue_if_no_path) {
1335 if (!__must_push_back(m))
1336 r = -EIO;
1337 } else {
1338 if (error == -EBADE)
1339 r = error;
1340 }
1341 }
Stefan Bader640eb3b2005-11-21 21:32:35 -08001342 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001344 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345}
1346
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001347static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 int error, union map_info *map_context)
1349{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001350 struct multipath *m = ti->private;
1351 struct dm_mpath_io *mpio = map_context->ptr;
Wei Yongjuna71a2612012-10-12 16:59:42 +01001352 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 struct path_selector *ps;
1354 int r;
1355
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001356 BUG_ON(!mpio);
1357
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001358 r = do_end_io(m, clone, error, mpio);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001359 pgpath = mpio->pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 if (pgpath) {
1361 ps = &pgpath->pg->ps;
1362 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001363 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001365 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 return r;
1368}
1369
1370/*
1371 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001372 * the last path fails we must error any remaining I/O.
1373 * Note that if the freeze_bdev fails while suspending, the
1374 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 */
1376static void multipath_presuspend(struct dm_target *ti)
1377{
1378 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Alasdair G Kergon485ef692005-09-27 21:45:45 -07001380 queue_if_no_path(m, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381}
1382
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001383static void multipath_postsuspend(struct dm_target *ti)
1384{
Mike Anderson6380f262009-12-10 23:52:21 +00001385 struct multipath *m = ti->private;
1386
1387 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001388 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001389 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001390}
1391
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001392/*
1393 * Restore the queue_if_no_path setting.
1394 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395static void multipath_resume(struct dm_target *ti)
1396{
1397 struct multipath *m = (struct multipath *) ti->private;
1398 unsigned long flags;
1399
1400 spin_lock_irqsave(&m->lock, flags);
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001401 m->queue_if_no_path = m->saved_queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 spin_unlock_irqrestore(&m->lock, flags);
1403}
1404
1405/*
1406 * Info output has the following format:
1407 * num_multipath_feature_args [multipath_feature_args]*
1408 * num_handler_status_args [handler_status_args]*
1409 * num_groups init_group_number
1410 * [A|D|E num_ps_status_args [ps_status_args]*
1411 * num_paths num_selector_args
1412 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1413 *
1414 * Table output has the following format (identical to the constructor string):
1415 * num_feature_args [features_args]*
1416 * num_handler_args hw_handler [hw_handler_args]*
1417 * num_groups init_group_number
1418 * [priority selector-name num_ps_args [ps_args]*
1419 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1420 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001421static void multipath_status(struct dm_target *ti, status_type_t type,
1422 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423{
1424 int sz = 0;
1425 unsigned long flags;
1426 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 struct priority_group *pg;
1428 struct pgpath *p;
1429 unsigned pg_num;
1430 char state;
1431
1432 spin_lock_irqsave(&m->lock, flags);
1433
1434 /* Features */
1435 if (type == STATUSTYPE_INFO)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001436 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1437 else {
1438 DMEMIT("%u ", m->queue_if_no_path +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001439 (m->pg_init_retries > 0) * 2 +
Mike Snitzera58a9352012-07-27 15:08:04 +01001440 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1441 m->retain_attached_hw_handler);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001442 if (m->queue_if_no_path)
1443 DMEMIT("queue_if_no_path ");
1444 if (m->pg_init_retries)
1445 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001446 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1447 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Mike Snitzera58a9352012-07-27 15:08:04 +01001448 if (m->retain_attached_hw_handler)
1449 DMEMIT("retain_attached_hw_handler ");
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001452 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 DMEMIT("0 ");
1454 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001455 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
1457 DMEMIT("%u ", m->nr_priority_groups);
1458
1459 if (m->next_pg)
1460 pg_num = m->next_pg->pg_num;
1461 else if (m->current_pg)
1462 pg_num = m->current_pg->pg_num;
1463 else
Mike Snitzera490a072011-03-24 13:54:33 +00001464 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 DMEMIT("%u ", pg_num);
1467
1468 switch (type) {
1469 case STATUSTYPE_INFO:
1470 list_for_each_entry(pg, &m->priority_groups, list) {
1471 if (pg->bypassed)
1472 state = 'D'; /* Disabled */
1473 else if (pg == m->current_pg)
1474 state = 'A'; /* Currently Active */
1475 else
1476 state = 'E'; /* Enabled */
1477
1478 DMEMIT("%c ", state);
1479
1480 if (pg->ps.type->status)
1481 sz += pg->ps.type->status(&pg->ps, NULL, type,
1482 result + sz,
1483 maxlen - sz);
1484 else
1485 DMEMIT("0 ");
1486
1487 DMEMIT("%u %u ", pg->nr_pgpaths,
1488 pg->ps.type->info_args);
1489
1490 list_for_each_entry(p, &pg->pgpaths, list) {
1491 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001492 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 p->fail_count);
1494 if (pg->ps.type->status)
1495 sz += pg->ps.type->status(&pg->ps,
1496 &p->path, type, result + sz,
1497 maxlen - sz);
1498 }
1499 }
1500 break;
1501
1502 case STATUSTYPE_TABLE:
1503 list_for_each_entry(pg, &m->priority_groups, list) {
1504 DMEMIT("%s ", pg->ps.type->name);
1505
1506 if (pg->ps.type->status)
1507 sz += pg->ps.type->status(&pg->ps, NULL, type,
1508 result + sz,
1509 maxlen - sz);
1510 else
1511 DMEMIT("0 ");
1512
1513 DMEMIT("%u %u ", pg->nr_pgpaths,
1514 pg->ps.type->table_args);
1515
1516 list_for_each_entry(p, &pg->pgpaths, list) {
1517 DMEMIT("%s ", p->path.dev->name);
1518 if (pg->ps.type->status)
1519 sz += pg->ps.type->status(&pg->ps,
1520 &p->path, type, result + sz,
1521 maxlen - sz);
1522 }
1523 }
1524 break;
1525 }
1526
1527 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528}
1529
1530static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1531{
Mike Anderson6380f262009-12-10 23:52:21 +00001532 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 struct dm_dev *dev;
1534 struct multipath *m = (struct multipath *) ti->private;
1535 action_fn action;
1536
Mike Anderson6380f262009-12-10 23:52:21 +00001537 mutex_lock(&m->work_mutex);
1538
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001539 if (dm_suspended(ti)) {
1540 r = -EBUSY;
1541 goto out;
1542 }
1543
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001545 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001546 r = queue_if_no_path(m, 1, 0);
1547 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001548 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001549 r = queue_if_no_path(m, 0, 0);
1550 goto out;
1551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 }
1553
Mike Anderson6380f262009-12-10 23:52:21 +00001554 if (argc != 2) {
1555 DMWARN("Unrecognised multipath message received.");
1556 goto out;
1557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
Mike Snitzer498f0102011-08-02 12:32:04 +01001559 if (!strcasecmp(argv[0], "disable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001560 r = bypass_pg_num(m, argv[1], 1);
1561 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001562 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001563 r = bypass_pg_num(m, argv[1], 0);
1564 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001565 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001566 r = switch_pg_num(m, argv[1]);
1567 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001568 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001570 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001572 else {
1573 DMWARN("Unrecognised multipath message received.");
1574 goto out;
1575 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001577 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001579 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001581 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 }
1583
1584 r = action_dev(m, dev, action);
1585
1586 dm_put_device(ti, dev);
1587
Mike Anderson6380f262009-12-10 23:52:21 +00001588out:
1589 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
1592
Al Viro647b3d02007-08-28 22:15:59 -04001593static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
Milan Broz9af4aa32006-10-03 01:15:20 -07001594 unsigned long arg)
1595{
Mikulas Patocka35991652012-06-03 00:29:58 +01001596 struct multipath *m = ti->private;
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001597 struct pgpath *pgpath;
Mikulas Patocka35991652012-06-03 00:29:58 +01001598 struct block_device *bdev;
1599 fmode_t mode;
Milan Broz9af4aa32006-10-03 01:15:20 -07001600 unsigned long flags;
Mikulas Patocka35991652012-06-03 00:29:58 +01001601 int r;
1602
Mikulas Patocka35991652012-06-03 00:29:58 +01001603 bdev = NULL;
1604 mode = 0;
1605 r = 0;
Milan Broz9af4aa32006-10-03 01:15:20 -07001606
1607 spin_lock_irqsave(&m->lock, flags);
1608
1609 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001610 __choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001611
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001612 pgpath = m->current_pgpath;
1613
1614 if (pgpath) {
1615 bdev = pgpath->path.dev->bdev;
1616 mode = pgpath->path.dev->mode;
Milan Broze90dae12006-10-03 01:15:22 -07001617 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001618
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001619 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
Hannes Reinecke6c182cd2013-07-10 23:41:15 +01001620 r = -ENOTCONN;
Milan Broz9af4aa32006-10-03 01:15:20 -07001621 else if (!bdev)
1622 r = -EIO;
1623
1624 spin_unlock_irqrestore(&m->lock, flags);
1625
Paolo Bonziniec8013b2012-01-12 16:01:29 +01001626 /*
1627 * Only pass ioctls through if the device sizes match exactly.
1628 */
1629 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1630 r = scsi_verify_blk_ioctl(NULL, cmd);
1631
Hannes Reinecke6c182cd2013-07-10 23:41:15 +01001632 if (r == -ENOTCONN && !fatal_signal_pending(current))
Mikulas Patocka35991652012-06-03 00:29:58 +01001633 queue_work(kmultipathd, &m->process_queued_ios);
Mikulas Patocka35991652012-06-03 00:29:58 +01001634
Al Viro633a08b2007-08-29 20:34:12 -04001635 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
Milan Broz9af4aa32006-10-03 01:15:20 -07001636}
1637
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001638static int multipath_iterate_devices(struct dm_target *ti,
1639 iterate_devices_callout_fn fn, void *data)
1640{
1641 struct multipath *m = ti->private;
1642 struct priority_group *pg;
1643 struct pgpath *p;
1644 int ret = 0;
1645
1646 list_for_each_entry(pg, &m->priority_groups, list) {
1647 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001648 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001649 if (ret)
1650 goto out;
1651 }
1652 }
1653
1654out:
1655 return ret;
1656}
1657
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001658static int __pgpath_busy(struct pgpath *pgpath)
1659{
1660 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1661
1662 return dm_underlying_device_busy(q);
1663}
1664
1665/*
1666 * We return "busy", only when we can map I/Os but underlying devices
1667 * are busy (so even if we map I/Os now, the I/Os will wait on
1668 * the underlying queue).
1669 * In other words, if we want to kill I/Os or queue them inside us
1670 * due to map unavailability, we don't return "busy". Otherwise,
1671 * dm core won't give us the I/Os and we can't do what we want.
1672 */
1673static int multipath_busy(struct dm_target *ti)
1674{
1675 int busy = 0, has_active = 0;
1676 struct multipath *m = ti->private;
1677 struct priority_group *pg;
1678 struct pgpath *pgpath;
1679 unsigned long flags;
1680
1681 spin_lock_irqsave(&m->lock, flags);
1682
Hannes Reineckeb63349a2013-10-01 11:49:56 +02001683 /* pg_init in progress, requeue until done */
1684 if (m->pg_init_in_progress) {
1685 busy = 1;
1686 goto out;
1687 }
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001688 /* Guess which priority_group will be used at next mapping time */
1689 if (unlikely(!m->current_pgpath && m->next_pg))
1690 pg = m->next_pg;
1691 else if (likely(m->current_pg))
1692 pg = m->current_pg;
1693 else
1694 /*
1695 * We don't know which pg will be used at next mapping time.
1696 * We don't call __choose_pgpath() here to avoid to trigger
1697 * pg_init just by busy checking.
1698 * So we don't know whether underlying devices we will be using
1699 * at next mapping time are busy or not. Just try mapping.
1700 */
1701 goto out;
1702
1703 /*
1704 * If there is one non-busy active path at least, the path selector
1705 * will be able to select it. So we consider such a pg as not busy.
1706 */
1707 busy = 1;
1708 list_for_each_entry(pgpath, &pg->pgpaths, list)
1709 if (pgpath->is_active) {
1710 has_active = 1;
1711
1712 if (!__pgpath_busy(pgpath)) {
1713 busy = 0;
1714 break;
1715 }
1716 }
1717
1718 if (!has_active)
1719 /*
1720 * No active path in this pg, so this pg won't be used and
1721 * the current_pg will be changed at next mapping time.
1722 * We need to try mapping to determine it.
1723 */
1724 busy = 0;
1725
1726out:
1727 spin_unlock_irqrestore(&m->lock, flags);
1728
1729 return busy;
1730}
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732/*-----------------------------------------------------------------
1733 * Module setup
1734 *---------------------------------------------------------------*/
1735static struct target_type multipath_target = {
1736 .name = "multipath",
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +00001737 .version = {1, 6, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 .module = THIS_MODULE,
1739 .ctr = multipath_ctr,
1740 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001741 .map_rq = multipath_map,
1742 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001744 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 .resume = multipath_resume,
1746 .status = multipath_status,
1747 .message = multipath_message,
Milan Broz9af4aa32006-10-03 01:15:20 -07001748 .ioctl = multipath_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001749 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001750 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751};
1752
1753static int __init dm_multipath_init(void)
1754{
1755 int r;
1756
1757 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001758 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 if (!_mpio_cache)
1760 return -ENOMEM;
1761
1762 r = dm_register_target(&multipath_target);
1763 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001764 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 kmem_cache_destroy(_mpio_cache);
1766 return -EINVAL;
1767 }
1768
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001769 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001770 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001771 DMERR("failed to create workqueue kmpathd");
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001772 dm_unregister_target(&multipath_target);
1773 kmem_cache_destroy(_mpio_cache);
1774 return -ENOMEM;
1775 }
1776
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001777 /*
1778 * A separate workqueue is used to handle the device handlers
1779 * to avoid overloading existing workqueue. Overloading the
1780 * old workqueue would also create a bottleneck in the
1781 * path of the storage hardware device activation.
1782 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001783 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1784 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001785 if (!kmpath_handlerd) {
1786 DMERR("failed to create workqueue kmpath_handlerd");
1787 destroy_workqueue(kmultipathd);
1788 dm_unregister_target(&multipath_target);
1789 kmem_cache_destroy(_mpio_cache);
1790 return -ENOMEM;
1791 }
1792
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001793 DMINFO("version %u.%u.%u loaded",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 multipath_target.version[0], multipath_target.version[1],
1795 multipath_target.version[2]);
1796
1797 return r;
1798}
1799
1800static void __exit dm_multipath_exit(void)
1801{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001802 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001803 destroy_workqueue(kmultipathd);
1804
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001805 dm_unregister_target(&multipath_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 kmem_cache_destroy(_mpio_cache);
1807}
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809module_init(dm_multipath_init);
1810module_exit(dm_multipath_exit);
1811
1812MODULE_DESCRIPTION(DM_NAME " multipath target");
1813MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1814MODULE_LICENSE("GPL");