blob: 09fb205a34a0f5b659a106fcaecd127134d467ff [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Mike Snitzerf4790822013-09-12 18:06:12 -040010#include "dm.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010012#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Mike Snitzere5863d92014-12-17 21:08:12 -050014#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/ctype.h>
16#include <linux/init.h>
17#include <linux/mempool.h>
18#include <linux/module.h>
19#include <linux/pagemap.h>
20#include <linux/slab.h>
21#include <linux/time.h>
22#include <linux/workqueue.h>
Mikulas Patocka35991652012-06-03 00:29:58 +010023#include <linux/delay.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070024#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
Mike Snitzer78ce23b2016-01-31 17:38:28 -050026#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Alasdair G Kergon72d94862006-06-26 00:27:35 -070028#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000029#define DM_PG_INIT_DELAY_MSECS 2000
30#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32/* Path properties */
33struct pgpath {
34 struct list_head list;
35
36 struct priority_group *pg; /* Owning PG */
Kiyoshi Ueda66800732008-10-10 13:36:58 +010037 unsigned is_active; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 unsigned fail_count; /* Cumulative failure count */
39
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080040 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000041 struct delayed_work activate_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042};
43
44#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
45
46/*
47 * Paths are grouped into Priority Groups and numbered from 1 upwards.
48 * Each has a path selector which controls which path gets used.
49 */
50struct priority_group {
51 struct list_head list;
52
53 struct multipath *m; /* Owning multipath instance */
54 struct path_selector ps;
55
56 unsigned pg_num; /* Reference number */
57 unsigned bypassed; /* Temporarily bypass this PG? */
58
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
61};
62
63/* Multipath context */
64struct multipath {
65 struct list_head list;
66 struct dm_target *ti;
67
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070068 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070069 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000070
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010071 spinlock_t lock;
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 unsigned nr_priority_groups;
74 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000075
76 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 unsigned pg_init_required; /* pg_init needs calling? */
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -070079 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000080 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82 unsigned nr_valid_paths; /* Total number of usable paths */
83 struct pgpath *current_pgpath;
84 struct priority_group *current_pg;
85 struct priority_group *next_pg; /* Switch to this PG if set */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010087 unsigned queue_io:1; /* Must we queue all I/O? */
88 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
89 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
Mike Snitzera58a9352012-07-27 15:08:04 +010090 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +000091 unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010092
Dave Wysochanskic9e45582007-10-19 22:47:53 +010093 unsigned pg_init_retries; /* Number of times to retry pg_init */
94 unsigned pg_init_count; /* Number of times pg_init called */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000095 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 struct work_struct trigger_event;
98
99 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100100 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * can resubmit bios on error.
102 */
103 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +0000104
105 struct mutex work_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106};
107
108/*
109 * Context information attached to each bio we process.
110 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100111struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100113 size_t nr_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114};
115
116typedef int (*action_fn) (struct pgpath *pgpath);
117
Christoph Lametere18b8902006-12-06 20:33:20 -0800118static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700120static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000121static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700122static void activate_path(struct work_struct *work);
Hannes Reineckee8099172014-02-28 15:33:44 +0100123static int __pgpath_busy(struct pgpath *pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125
126/*-----------------------------------------------
127 * Allocation routines
128 *-----------------------------------------------*/
129
130static struct pgpath *alloc_pgpath(void)
131{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700132 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Mike Anderson224cb3e2008-08-29 09:36:09 +0200134 if (pgpath) {
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100135 pgpath->is_active = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000136 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 return pgpath;
140}
141
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100142static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
144 kfree(pgpath);
145}
146
147static struct priority_group *alloc_priority_group(void)
148{
149 struct priority_group *pg;
150
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700151 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700153 if (pg)
154 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
156 return pg;
157}
158
159static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
160{
161 struct pgpath *pgpath, *tmp;
162
163 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
164 list_del(&pgpath->list);
165 dm_put_device(ti, pgpath->path.dev);
166 free_pgpath(pgpath);
167 }
168}
169
170static void free_priority_group(struct priority_group *pg,
171 struct dm_target *ti)
172{
173 struct path_selector *ps = &pg->ps;
174
175 if (ps->type) {
176 ps->type->destroy(ps);
177 dm_put_path_selector(ps->type);
178 }
179
180 free_pgpaths(&pg->pgpaths, ti);
181 kfree(pg);
182}
183
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500184static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
186 struct multipath *m;
187
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700188 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 INIT_LIST_HEAD(&m->priority_groups);
191 spin_lock_init(&m->lock);
192 m->queue_io = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000193 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000194 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000195 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000196 mutex_init(&m->work_mutex);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500197
198 m->mpio_pool = NULL;
199 if (!use_blk_mq) {
200 unsigned min_ios = dm_get_reserved_rq_based_ios();
201
202 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
203 if (!m->mpio_pool) {
204 kfree(m);
205 return NULL;
206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 }
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500208
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700209 m->ti = ti;
210 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212
213 return m;
214}
215
216static void free_multipath(struct multipath *m)
217{
218 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
221 list_del(&pg->list);
222 free_priority_group(pg, m->ti);
223 }
224
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700225 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700226 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 mempool_destroy(m->mpio_pool);
228 kfree(m);
229}
230
Mike Snitzer2eff1922016-02-03 09:13:14 -0500231static struct dm_mpath_io *get_mpio(union map_info *info)
232{
233 return info->ptr;
234}
235
236static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100237{
238 struct dm_mpath_io *mpio;
239
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500240 if (!m->mpio_pool) {
241 /* Use blk-mq pdu memory requested via per_io_data_size */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500242 mpio = get_mpio(info);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500243 memset(mpio, 0, sizeof(*mpio));
244 return mpio;
245 }
246
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100247 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
248 if (!mpio)
Mike Snitzer2eff1922016-02-03 09:13:14 -0500249 return NULL;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100250
251 memset(mpio, 0, sizeof(*mpio));
252 info->ptr = mpio;
253
Mike Snitzer2eff1922016-02-03 09:13:14 -0500254 return mpio;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100255}
256
Mike Snitzer2eff1922016-02-03 09:13:14 -0500257static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100258{
Mike Snitzer2eff1922016-02-03 09:13:14 -0500259 /* Only needed for non blk-mq (.request_fn) multipath */
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500260 if (m->mpio_pool) {
261 struct dm_mpath_io *mpio = info->ptr;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100262
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500263 info->ptr = NULL;
264 mempool_free(mpio, m->mpio_pool);
265 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100266}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268/*-----------------------------------------------
269 * Path selection
270 *-----------------------------------------------*/
271
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100272static int __pg_init_all_paths(struct multipath *m)
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000273{
274 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000275 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000276
Hannes Reinecke17f4ff42014-02-28 15:33:42 +0100277 if (m->pg_init_in_progress || m->pg_init_disabled)
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100278 return 0;
Hannes Reinecke17f4ff42014-02-28 15:33:42 +0100279
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000280 m->pg_init_count++;
281 m->pg_init_required = 0;
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100282
283 /* Check here to reset pg_init_required */
284 if (!m->current_pg)
285 return 0;
286
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000287 if (m->pg_init_delay_retry)
288 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
289 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000290 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
291 /* Skip failed paths */
292 if (!pgpath->is_active)
293 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000294 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
295 pg_init_delay))
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000296 m->pg_init_in_progress++;
297 }
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100298 return m->pg_init_in_progress;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000299}
300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
302{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 m->current_pg = pgpath->pg;
304
305 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700306 if (m->hw_handler_name) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 m->pg_init_required = 1;
308 m->queue_io = 1;
309 } else {
310 m->pg_init_required = 0;
311 m->queue_io = 0;
312 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100313
314 m->pg_init_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315}
316
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100317static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
318 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800320 struct dm_path *path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Mike Snitzer90a43232016-02-17 21:29:17 -0500322 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 if (!path)
324 return -ENXIO;
325
326 m->current_pgpath = path_to_pgpath(path);
327
328 if (m->current_pg != pg)
329 __switch_pg(m, m->current_pgpath);
330
331 return 0;
332}
333
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100334static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
336 struct priority_group *pg;
337 unsigned bypassed = 1;
338
Benjamin Marzinski1f271972014-08-13 13:53:42 -0500339 if (!m->nr_valid_paths) {
340 m->queue_io = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 goto failed;
Benjamin Marzinski1f271972014-08-13 13:53:42 -0500342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* Were we instructed to switch PG? */
345 if (m->next_pg) {
346 pg = m->next_pg;
347 m->next_pg = NULL;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100348 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return;
350 }
351
352 /* Don't change PG until it has no remaining paths */
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100353 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 return;
355
356 /*
357 * Loop through priority groups until we find a valid path.
358 * First time we skip PGs marked 'bypassed'.
Mike Christief220fd42012-06-03 00:29:45 +0100359 * Second time we only try the ones we skipped, but set
360 * pg_init_delay_retry so we do not hammer controllers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 */
362 do {
363 list_for_each_entry(pg, &m->priority_groups, list) {
364 if (pg->bypassed == bypassed)
365 continue;
Mike Christief220fd42012-06-03 00:29:45 +0100366 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
367 if (!bypassed)
368 m->pg_init_delay_retry = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 return;
Mike Christief220fd42012-06-03 00:29:45 +0100370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
372 } while (bypassed--);
373
374failed:
375 m->current_pgpath = NULL;
376 m->current_pg = NULL;
377}
378
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800379/*
380 * Check whether bios must be queued in the device-mapper core rather
381 * than here in the target.
382 *
383 * m->lock must be held on entry.
384 *
385 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
386 * same value then we are not between multipath_presuspend()
387 * and multipath_resume() calls and we have no need to check
388 * for the DMF_NOFLUSH_SUSPENDING flag.
389 */
390static int __must_push_back(struct multipath *m)
391{
Hannes Reineckee8099172014-02-28 15:33:44 +0100392 return (m->queue_if_no_path ||
393 (m->queue_if_no_path != m->saved_queue_if_no_path &&
394 dm_noflush_suspending(m->ti)));
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800395}
396
Hannes Reinecke36fcffc2014-02-28 15:33:47 +0100397/*
398 * Map cloned requests
399 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500400static int __multipath_map(struct dm_target *ti, struct request *clone,
401 union map_info *map_context,
402 struct request *rq, struct request **__clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
Mike Snitzer7943bd62016-02-02 21:53:15 -0500404 struct multipath *m = ti->private;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100405 int r = DM_MAPIO_REQUEUE;
Mike Snitzere5863d92014-12-17 21:08:12 -0500406 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100408 struct block_device *bdev;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100409 struct dm_mpath_io *mpio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600411 spin_lock_irq(&m->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 /* Do we need to select a new pgpath? */
Mike Snitzer21136f82016-02-10 11:58:45 -0500414 if (!m->current_pgpath || !m->queue_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100415 __choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
417 pgpath = m->current_pgpath;
418
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100419 if (!pgpath) {
420 if (!__must_push_back(m))
421 r = -EIO; /* Failed */
422 goto out_unlock;
Mike Snitzer6afbc012014-07-08 11:55:09 -0400423 } else if (m->queue_io || m->pg_init_required) {
Hannes Reineckee3bde042014-02-28 15:33:46 +0100424 __pg_init_all_paths(m);
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100425 goto out_unlock;
426 }
Mike Snitzer6afbc012014-07-08 11:55:09 -0400427
Mike Snitzer2eff1922016-02-03 09:13:14 -0500428 mpio = set_mpio(m, map_context);
429 if (!mpio)
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100430 /* ENOMEM, requeue */
431 goto out_unlock;
432
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100433 mpio->pgpath = pgpath;
434 mpio->nr_bytes = nr_bytes;
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600435
436 bdev = pgpath->path.dev->bdev;
437
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600438 spin_unlock_irq(&m->lock);
439
Mike Snitzere5863d92014-12-17 21:08:12 -0500440 if (clone) {
Mike Snitzerc5248f72016-02-20 14:02:49 -0500441 /*
442 * Old request-based interface: allocated clone is passed in.
443 * Used by: .request_fn stacked on .request_fn path(s).
444 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500445 clone->q = bdev_get_queue(bdev);
446 clone->rq_disk = bdev->bd_disk;
447 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
448 } else {
Mike Snitzereca7ee62016-02-20 13:45:38 -0500449 /*
450 * blk-mq request-based interface; used by both:
451 * .request_fn stacked on blk-mq path(s) and
452 * blk-mq stacked on blk-mq path(s).
453 */
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500454 *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
455 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400456 if (IS_ERR(*__clone)) {
Mike Snitzere5863d92014-12-17 21:08:12 -0500457 /* ENOMEM, requeue */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500458 clear_request_fn_mpio(m, map_context);
Mike Snitzere5863d92014-12-17 21:08:12 -0500459 return r;
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400460 }
Mike Snitzere5863d92014-12-17 21:08:12 -0500461 (*__clone)->bio = (*__clone)->biotail = NULL;
462 (*__clone)->rq_disk = bdev->bd_disk;
463 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
464 }
465
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100466 if (pgpath->pg->ps.type->start_io)
467 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
468 &pgpath->path,
469 nr_bytes);
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600470 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Hannes Reineckee3bde042014-02-28 15:33:46 +0100472out_unlock:
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600473 spin_unlock_irq(&m->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475 return r;
476}
477
Mike Snitzere5863d92014-12-17 21:08:12 -0500478static int multipath_map(struct dm_target *ti, struct request *clone,
479 union map_info *map_context)
480{
481 return __multipath_map(ti, clone, map_context, NULL, NULL);
482}
483
484static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
485 union map_info *map_context,
486 struct request **clone)
487{
488 return __multipath_map(ti, NULL, map_context, rq, clone);
489}
490
491static void multipath_release_clone(struct request *clone)
492{
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500493 blk_mq_free_request(clone);
Mike Snitzere5863d92014-12-17 21:08:12 -0500494}
495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496/*
497 * If we run out of usable paths, should we queue I/O or error it?
498 */
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700499static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
500 unsigned save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
502 unsigned long flags;
503
504 spin_lock_irqsave(&m->lock, flags);
505
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700506 if (save_old_value)
507 m->saved_queue_if_no_path = m->queue_if_no_path;
508 else
509 m->saved_queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 m->queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 spin_unlock_irqrestore(&m->lock, flags);
512
Hannes Reinecke63d832c2014-05-26 14:45:39 +0200513 if (!queue_if_no_path)
514 dm_table_run_md_queue_async(m->ti->table);
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 return 0;
517}
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519/*
520 * An event is triggered whenever a path is taken out of use.
521 * Includes path failure and PG bypass.
522 */
David Howellsc4028952006-11-22 14:57:56 +0000523static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
David Howellsc4028952006-11-22 14:57:56 +0000525 struct multipath *m =
526 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
528 dm_table_event(m->ti->table);
529}
530
531/*-----------------------------------------------------------------
532 * Constructor/argument parsing:
533 * <#multipath feature args> [<arg>]*
534 * <#hw_handler args> [hw_handler [<arg>]*]
535 * <#priority groups>
536 * <initial priority group>
537 * [<selector> <#selector args> [<arg>]*
538 * <#paths> <#per-path selector args>
539 * [<path> [<arg>]* ]+ ]+
540 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100541static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 struct dm_target *ti)
543{
544 int r;
545 struct path_selector_type *pst;
546 unsigned ps_argc;
547
Mike Snitzer498f0102011-08-02 12:32:04 +0100548 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700549 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 };
551
Mike Snitzer498f0102011-08-02 12:32:04 +0100552 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700554 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return -EINVAL;
556 }
557
Mike Snitzer498f0102011-08-02 12:32:04 +0100558 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100559 if (r) {
560 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
564 r = pst->create(&pg->ps, ps_argc, as->argv);
565 if (r) {
566 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700567 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return r;
569 }
570
571 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100572 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574 return 0;
575}
576
Mike Snitzer498f0102011-08-02 12:32:04 +0100577static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 struct dm_target *ti)
579{
580 int r;
581 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700582 struct multipath *m = ti->private;
Mike Snitzera58a9352012-07-27 15:08:04 +0100583 struct request_queue *q = NULL;
584 const char *attached_handler_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586 /* we need at least a path arg */
587 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700588 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100589 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 }
591
592 p = alloc_pgpath();
593 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100594 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Mike Snitzer498f0102011-08-02 12:32:04 +0100596 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000597 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700599 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 goto bad;
601 }
602
Mike Snitzera58a9352012-07-27 15:08:04 +0100603 if (m->retain_attached_hw_handler || m->hw_handler_name)
604 q = bdev_get_queue(p->path.dev->bdev);
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100605
Mike Snitzera58a9352012-07-27 15:08:04 +0100606 if (m->retain_attached_hw_handler) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200607retain:
Mike Snitzera58a9352012-07-27 15:08:04 +0100608 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
609 if (attached_handler_name) {
610 /*
611 * Reset hw_handler_name to match the attached handler
612 * and clear any hw_handler_params associated with the
613 * ignored handler.
614 *
615 * NB. This modifies the table line to show the actual
616 * handler instead of the original table passed in.
617 */
618 kfree(m->hw_handler_name);
619 m->hw_handler_name = attached_handler_name;
620
621 kfree(m->hw_handler_params);
622 m->hw_handler_params = NULL;
623 }
624 }
625
626 if (m->hw_handler_name) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100627 r = scsi_dh_attach(q, m->hw_handler_name);
628 if (r == -EBUSY) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200629 char b[BDEVNAME_SIZE];
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100630
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200631 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
632 bdevname(p->path.dev->bdev, b));
633 goto retain;
634 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700635 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100636 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700637 dm_put_device(ti, p->path.dev);
638 goto bad;
639 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700640
641 if (m->hw_handler_params) {
642 r = scsi_dh_set_params(q, m->hw_handler_params);
643 if (r < 0) {
644 ti->error = "unable to set hardware "
645 "handler parameters";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700646 dm_put_device(ti, p->path.dev);
647 goto bad;
648 }
649 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700650 }
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
653 if (r) {
654 dm_put_device(ti, p->path.dev);
655 goto bad;
656 }
657
658 return p;
659
660 bad:
661 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100662 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
Mike Snitzer498f0102011-08-02 12:32:04 +0100665static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700666 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Mike Snitzer498f0102011-08-02 12:32:04 +0100668 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700669 {1, 1024, "invalid number of paths"},
670 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 };
672
673 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100674 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700676 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 if (as->argc < 2) {
679 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100680 ti->error = "not enough priority group arguments";
681 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
684 pg = alloc_priority_group();
685 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700686 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100687 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 }
689 pg->m = m;
690
691 r = parse_path_selector(as, pg, ti);
692 if (r)
693 goto bad;
694
695 /*
696 * read the paths
697 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100698 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (r)
700 goto bad;
701
Mike Snitzer498f0102011-08-02 12:32:04 +0100702 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (r)
704 goto bad;
705
Mike Snitzer498f0102011-08-02 12:32:04 +0100706 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 for (i = 0; i < pg->nr_pgpaths; i++) {
708 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100709 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Mike Snitzer498f0102011-08-02 12:32:04 +0100711 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100712 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100713 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Mike Snitzer498f0102011-08-02 12:32:04 +0100717 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 path_args.argv = as->argv;
719
720 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100721 if (IS_ERR(pgpath)) {
722 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 pgpath->pg = pg;
727 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100728 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
730
731 return pg;
732
733 bad:
734 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100735 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Mike Snitzer498f0102011-08-02 12:32:04 +0100738static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700741 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700742 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Mike Snitzer498f0102011-08-02 12:32:04 +0100744 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700745 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 };
747
Mike Snitzer498f0102011-08-02 12:32:04 +0100748 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return -EINVAL;
750
751 if (!hw_argc)
752 return 0;
753
Mike Snitzer498f0102011-08-02 12:32:04 +0100754 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000755
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700756 if (hw_argc > 1) {
757 char *p;
758 int i, j, len = 4;
759
760 for (i = 0; i <= hw_argc - 2; i++)
761 len += strlen(as->argv[i]) + 1;
762 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
763 if (!p) {
764 ti->error = "memory allocation failed";
765 ret = -ENOMEM;
766 goto fail;
767 }
768 j = sprintf(p, "%d", hw_argc - 1);
769 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
770 j = sprintf(p, "%s", as->argv[i]);
771 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100772 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
774 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700775fail:
776 kfree(m->hw_handler_name);
777 m->hw_handler_name = NULL;
778 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
Mike Snitzer498f0102011-08-02 12:32:04 +0100781static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 int r;
784 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700785 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100786 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
Mike Snitzer498f0102011-08-02 12:32:04 +0100788 static struct dm_arg _args[] = {
Mike Snitzera58a9352012-07-27 15:08:04 +0100789 {0, 6, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100790 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000791 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 };
793
Mike Snitzer498f0102011-08-02 12:32:04 +0100794 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (r)
796 return -EINVAL;
797
798 if (!argc)
799 return 0;
800
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100801 do {
Mike Snitzer498f0102011-08-02 12:32:04 +0100802 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100803 argc--;
804
Mike Snitzer498f0102011-08-02 12:32:04 +0100805 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100806 r = queue_if_no_path(m, 1, 0);
807 continue;
808 }
809
Mike Snitzera58a9352012-07-27 15:08:04 +0100810 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
811 m->retain_attached_hw_handler = 1;
812 continue;
813 }
814
Mike Snitzer498f0102011-08-02 12:32:04 +0100815 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100816 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100817 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100818 argc--;
819 continue;
820 }
821
Mike Snitzer498f0102011-08-02 12:32:04 +0100822 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000823 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100824 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000825 argc--;
826 continue;
827 }
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100830 r = -EINVAL;
831 } while (argc && !r);
832
833 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834}
835
836static int multipath_ctr(struct dm_target *ti, unsigned int argc,
837 char **argv)
838{
Mike Snitzer498f0102011-08-02 12:32:04 +0100839 /* target arguments */
840 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +0000841 {0, 1024, "invalid number of priority groups"},
842 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 };
844
845 int r;
846 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +0100847 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 unsigned pg_count = 0;
849 unsigned next_pg_num;
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500850 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 as.argc = argc;
853 as.argv = argv;
854
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500855 m = alloc_multipath(ti, use_blk_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700857 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return -EINVAL;
859 }
860
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700861 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 if (r)
863 goto bad;
864
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700865 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (r)
867 goto bad;
868
Mike Snitzer498f0102011-08-02 12:32:04 +0100869 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 if (r)
871 goto bad;
872
Mike Snitzer498f0102011-08-02 12:32:04 +0100873 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 if (r)
875 goto bad;
876
Mike Snitzera490a072011-03-24 13:54:33 +0000877 if ((!m->nr_priority_groups && next_pg_num) ||
878 (m->nr_priority_groups && !next_pg_num)) {
879 ti->error = "invalid initial priority group";
880 r = -EINVAL;
881 goto bad;
882 }
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 /* parse the priority groups */
885 while (as.argc) {
886 struct priority_group *pg;
887
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700888 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100889 if (IS_ERR(pg)) {
890 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 goto bad;
892 }
893
894 m->nr_valid_paths += pg->nr_pgpaths;
895 list_add_tail(&pg->list, &m->priority_groups);
896 pg_count++;
897 pg->pg_num = pg_count;
898 if (!--next_pg_num)
899 m->next_pg = pg;
900 }
901
902 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700903 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 r = -EINVAL;
905 goto bad;
906 }
907
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000908 ti->num_flush_bios = 1;
909 ti->num_discard_bios = 1;
Mike Snitzer042bcef2013-05-10 14:37:16 +0100910 ti->num_write_same_bios = 1;
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500911 if (use_blk_mq)
912 ti->per_io_data_size = sizeof(struct dm_mpath_io);
Mikulas Patocka86279212009-06-22 10:12:24 +0100913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 return 0;
915
916 bad:
917 free_multipath(m);
918 return r;
919}
920
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000921static void multipath_wait_for_pg_init_completion(struct multipath *m)
922{
923 DECLARE_WAITQUEUE(wait, current);
924 unsigned long flags;
925
926 add_wait_queue(&m->pg_init_wait, &wait);
927
928 while (1) {
929 set_current_state(TASK_UNINTERRUPTIBLE);
930
931 spin_lock_irqsave(&m->lock, flags);
932 if (!m->pg_init_in_progress) {
933 spin_unlock_irqrestore(&m->lock, flags);
934 break;
935 }
936 spin_unlock_irqrestore(&m->lock, flags);
937
938 io_schedule();
939 }
940 set_current_state(TASK_RUNNING);
941
942 remove_wait_queue(&m->pg_init_wait, &wait);
943}
944
945static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000947 unsigned long flags;
948
949 spin_lock_irqsave(&m->lock, flags);
950 m->pg_init_disabled = 1;
951 spin_unlock_irqrestore(&m->lock, flags);
952
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700953 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000954 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -0700955 flush_workqueue(kmultipathd);
Tejun Heo43829732012-08-20 14:51:24 -0700956 flush_work(&m->trigger_event);
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000957
958 spin_lock_irqsave(&m->lock, flags);
959 m->pg_init_disabled = 0;
960 spin_unlock_irqrestore(&m->lock, flags);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +0000961}
962
963static void multipath_dtr(struct dm_target *ti)
964{
965 struct multipath *m = ti->private;
966
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000967 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 free_multipath(m);
969}
970
971/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 * Take a path out of use.
973 */
974static int fail_path(struct pgpath *pgpath)
975{
976 unsigned long flags;
977 struct multipath *m = pgpath->pg->m;
978
979 spin_lock_irqsave(&m->lock, flags);
980
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100981 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 goto out;
983
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700984 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100987 pgpath->is_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 pgpath->fail_count++;
989
990 m->nr_valid_paths--;
991
992 if (pgpath == m->current_pgpath)
993 m->current_pgpath = NULL;
994
Mike Andersonb15546f2007-10-19 22:48:02 +0100995 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
996 pgpath->path.dev->name, m->nr_valid_paths);
997
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +0000998 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000out:
1001 spin_unlock_irqrestore(&m->lock, flags);
1002
1003 return 0;
1004}
1005
1006/*
1007 * Reinstate a previously-failed path
1008 */
1009static int reinstate_path(struct pgpath *pgpath)
1010{
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001011 int r = 0, run_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 unsigned long flags;
1013 struct multipath *m = pgpath->pg->m;
1014
1015 spin_lock_irqsave(&m->lock, flags);
1016
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001017 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 goto out;
1019
Alasdair G Kergondef052d2008-07-21 12:00:31 +01001020 if (!pgpath->pg->ps.type->reinstate_path) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 DMWARN("Reinstate path not supported by path selector %s",
1022 pgpath->pg->ps.type->name);
1023 r = -EINVAL;
1024 goto out;
1025 }
1026
1027 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1028 if (r)
1029 goto out;
1030
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001031 pgpath->is_active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Hannes Reineckee8099172014-02-28 15:33:44 +01001033 if (!m->nr_valid_paths++) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001034 m->current_pgpath = NULL;
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001035 run_queue = 1;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001036 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001037 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001038 m->pg_init_in_progress++;
1039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
Mike Andersonb15546f2007-10-19 22:48:02 +01001041 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1042 pgpath->path.dev->name, m->nr_valid_paths);
1043
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001044 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046out:
1047 spin_unlock_irqrestore(&m->lock, flags);
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001048 if (run_queue)
1049 dm_table_run_md_queue_async(m->ti->table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 return r;
1052}
1053
1054/*
1055 * Fail or reinstate all paths that match the provided struct dm_dev.
1056 */
1057static int action_dev(struct multipath *m, struct dm_dev *dev,
1058 action_fn action)
1059{
Mike Snitzer19040c02011-03-24 13:54:31 +00001060 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 struct pgpath *pgpath;
1062 struct priority_group *pg;
1063
1064 list_for_each_entry(pg, &m->priority_groups, list) {
1065 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1066 if (pgpath->path.dev == dev)
1067 r = action(pgpath);
1068 }
1069 }
1070
1071 return r;
1072}
1073
1074/*
1075 * Temporarily try to avoid having to use the specified PG
1076 */
1077static void bypass_pg(struct multipath *m, struct priority_group *pg,
1078 int bypassed)
1079{
1080 unsigned long flags;
1081
1082 spin_lock_irqsave(&m->lock, flags);
1083
1084 pg->bypassed = bypassed;
1085 m->current_pgpath = NULL;
1086 m->current_pg = NULL;
1087
1088 spin_unlock_irqrestore(&m->lock, flags);
1089
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001090 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091}
1092
1093/*
1094 * Switch to using the specified PG from the next I/O that gets mapped
1095 */
1096static int switch_pg_num(struct multipath *m, const char *pgstr)
1097{
1098 struct priority_group *pg;
1099 unsigned pgnum;
1100 unsigned long flags;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001101 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001103 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 (pgnum > m->nr_priority_groups)) {
1105 DMWARN("invalid PG number supplied to switch_pg_num");
1106 return -EINVAL;
1107 }
1108
1109 spin_lock_irqsave(&m->lock, flags);
1110 list_for_each_entry(pg, &m->priority_groups, list) {
1111 pg->bypassed = 0;
1112 if (--pgnum)
1113 continue;
1114
1115 m->current_pgpath = NULL;
1116 m->current_pg = NULL;
1117 m->next_pg = pg;
1118 }
1119 spin_unlock_irqrestore(&m->lock, flags);
1120
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001121 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 return 0;
1123}
1124
1125/*
1126 * Set/clear bypassed status of a PG.
1127 * PGs are numbered upwards from 1 in the order they were declared.
1128 */
1129static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1130{
1131 struct priority_group *pg;
1132 unsigned pgnum;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001133 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001135 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 (pgnum > m->nr_priority_groups)) {
1137 DMWARN("invalid PG number supplied to bypass_pg");
1138 return -EINVAL;
1139 }
1140
1141 list_for_each_entry(pg, &m->priority_groups, list) {
1142 if (!--pgnum)
1143 break;
1144 }
1145
1146 bypass_pg(m, pg, bypassed);
1147 return 0;
1148}
1149
1150/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001151 * Should we retry pg_init immediately?
1152 */
1153static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1154{
1155 unsigned long flags;
1156 int limit_reached = 0;
1157
1158 spin_lock_irqsave(&m->lock, flags);
1159
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +00001160 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001161 m->pg_init_required = 1;
1162 else
1163 limit_reached = 1;
1164
1165 spin_unlock_irqrestore(&m->lock, flags);
1166
1167 return limit_reached;
1168}
1169
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001170static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001171{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001172 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001173 struct priority_group *pg = pgpath->pg;
1174 struct multipath *m = pg->m;
1175 unsigned long flags;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001176 unsigned delay_retry = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001177
1178 /* device or driver problems */
1179 switch (errors) {
1180 case SCSI_DH_OK:
1181 break;
1182 case SCSI_DH_NOSYS:
1183 if (!m->hw_handler_name) {
1184 errors = 0;
1185 break;
1186 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001187 DMERR("Could not failover the device: Handler scsi_dh_%s "
1188 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001189 /*
1190 * Fail path for now, so we do not ping pong
1191 */
1192 fail_path(pgpath);
1193 break;
1194 case SCSI_DH_DEV_TEMP_BUSY:
1195 /*
1196 * Probably doing something like FW upgrade on the
1197 * controller so try the other pg.
1198 */
1199 bypass_pg(m, pg, 1);
1200 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001201 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001202 /* Wait before retrying. */
1203 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001204 case SCSI_DH_IMM_RETRY:
1205 case SCSI_DH_RES_TEMP_UNAVAIL:
1206 if (pg_init_limit_reached(m, pgpath))
1207 fail_path(pgpath);
1208 errors = 0;
1209 break;
1210 default:
1211 /*
1212 * We probably do not want to fail the path for a device
1213 * error, but this is what the old dm did. In future
1214 * patches we can do more advanced handling.
1215 */
1216 fail_path(pgpath);
1217 }
1218
1219 spin_lock_irqsave(&m->lock, flags);
1220 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001221 if (pgpath == m->current_pgpath) {
1222 DMERR("Could not failover device. Error %d.", errors);
1223 m->current_pgpath = NULL;
1224 m->current_pg = NULL;
1225 }
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001226 } else if (!m->pg_init_required)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001227 pg->bypassed = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001228
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001229 if (--m->pg_init_in_progress)
1230 /* Activations of other paths are still on going */
1231 goto out;
1232
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001233 if (m->pg_init_required) {
1234 m->pg_init_delay_retry = delay_retry;
1235 if (__pg_init_all_paths(m))
1236 goto out;
1237 }
1238 m->queue_io = 0;
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001239
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001240 /*
1241 * Wake up any thread waiting to suspend.
1242 */
1243 wake_up(&m->pg_init_wait);
1244
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001245out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001246 spin_unlock_irqrestore(&m->lock, flags);
1247}
1248
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001249static void activate_path(struct work_struct *work)
1250{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001251 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001252 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001253
Hannes Reinecke3a017502014-02-28 15:33:49 +01001254 if (pgpath->is_active)
1255 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1256 pg_init_done, pgpath);
1257 else
1258 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001259}
1260
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001261static int noretry_error(int error)
1262{
1263 switch (error) {
1264 case -EOPNOTSUPP:
1265 case -EREMOTEIO:
1266 case -EILSEQ:
1267 case -ENODATA:
Jun'ichi Nomuracc9d3c32013-09-13 14:54:30 +09001268 case -ENOSPC:
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001269 return 1;
1270 }
1271
1272 /* Anything else could be a path failure, so should be retried */
1273 return 0;
1274}
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276/*
1277 * end_io handling
1278 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001279static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001280 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001282 /*
1283 * We don't queue any clone request inside the multipath target
1284 * during end I/O handling, since those clone requests don't have
1285 * bio clones. If we queue them inside the multipath target,
1286 * we need to make bio clones, that requires memory allocation.
1287 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1288 * don't have bio clones.)
1289 * Instead of queueing the clone request here, we queue the original
1290 * request into dm core, which will remake a clone request and
1291 * clone bios for it and resubmit it later.
1292 */
1293 int r = DM_ENDIO_REQUEUE;
Stefan Bader640eb3b2005-11-21 21:32:35 -08001294 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001296 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 return 0; /* I/O complete */
1298
Mike Snitzer7eee4ae2014-06-02 15:50:06 -04001299 if (noretry_error(error))
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001300 return error;
1301
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001302 if (mpio->pgpath)
1303 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
Stefan Bader640eb3b2005-11-21 21:32:35 -08001305 spin_lock_irqsave(&m->lock, flags);
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001306 if (!m->nr_valid_paths) {
1307 if (!m->queue_if_no_path) {
1308 if (!__must_push_back(m))
1309 r = -EIO;
1310 } else {
1311 if (error == -EBADE)
1312 r = error;
1313 }
1314 }
Stefan Bader640eb3b2005-11-21 21:32:35 -08001315 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001317 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318}
1319
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001320static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 int error, union map_info *map_context)
1322{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001323 struct multipath *m = ti->private;
Mike Snitzer2eff1922016-02-03 09:13:14 -05001324 struct dm_mpath_io *mpio = get_mpio(map_context);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001325 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 struct path_selector *ps;
1327 int r;
1328
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001329 BUG_ON(!mpio);
1330
Mike Snitzer2eff1922016-02-03 09:13:14 -05001331 r = do_end_io(m, clone, error, mpio);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001332 pgpath = mpio->pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 if (pgpath) {
1334 ps = &pgpath->pg->ps;
1335 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001336 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 }
Mike Snitzer2eff1922016-02-03 09:13:14 -05001338 clear_request_fn_mpio(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340 return r;
1341}
1342
1343/*
1344 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001345 * the last path fails we must error any remaining I/O.
1346 * Note that if the freeze_bdev fails while suspending, the
1347 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 */
1349static void multipath_presuspend(struct dm_target *ti)
1350{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001351 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
Alasdair G Kergon485ef692005-09-27 21:45:45 -07001353 queue_if_no_path(m, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354}
1355
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001356static void multipath_postsuspend(struct dm_target *ti)
1357{
Mike Anderson6380f262009-12-10 23:52:21 +00001358 struct multipath *m = ti->private;
1359
1360 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001361 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001362 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001363}
1364
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001365/*
1366 * Restore the queue_if_no_path setting.
1367 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368static void multipath_resume(struct dm_target *ti)
1369{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001370 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 unsigned long flags;
1372
1373 spin_lock_irqsave(&m->lock, flags);
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001374 m->queue_if_no_path = m->saved_queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 spin_unlock_irqrestore(&m->lock, flags);
1376}
1377
1378/*
1379 * Info output has the following format:
1380 * num_multipath_feature_args [multipath_feature_args]*
1381 * num_handler_status_args [handler_status_args]*
1382 * num_groups init_group_number
1383 * [A|D|E num_ps_status_args [ps_status_args]*
1384 * num_paths num_selector_args
1385 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1386 *
1387 * Table output has the following format (identical to the constructor string):
1388 * num_feature_args [features_args]*
1389 * num_handler_args hw_handler [hw_handler_args]*
1390 * num_groups init_group_number
1391 * [priority selector-name num_ps_args [ps_args]*
1392 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1393 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001394static void multipath_status(struct dm_target *ti, status_type_t type,
1395 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396{
1397 int sz = 0;
1398 unsigned long flags;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001399 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 struct priority_group *pg;
1401 struct pgpath *p;
1402 unsigned pg_num;
1403 char state;
1404
1405 spin_lock_irqsave(&m->lock, flags);
1406
1407 /* Features */
1408 if (type == STATUSTYPE_INFO)
Hannes Reineckee8099172014-02-28 15:33:44 +01001409 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001410 else {
1411 DMEMIT("%u ", m->queue_if_no_path +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001412 (m->pg_init_retries > 0) * 2 +
Mike Snitzera58a9352012-07-27 15:08:04 +01001413 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1414 m->retain_attached_hw_handler);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001415 if (m->queue_if_no_path)
1416 DMEMIT("queue_if_no_path ");
1417 if (m->pg_init_retries)
1418 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001419 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1420 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Mike Snitzera58a9352012-07-27 15:08:04 +01001421 if (m->retain_attached_hw_handler)
1422 DMEMIT("retain_attached_hw_handler ");
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001425 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 DMEMIT("0 ");
1427 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001428 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 DMEMIT("%u ", m->nr_priority_groups);
1431
1432 if (m->next_pg)
1433 pg_num = m->next_pg->pg_num;
1434 else if (m->current_pg)
1435 pg_num = m->current_pg->pg_num;
1436 else
Mike Snitzera490a072011-03-24 13:54:33 +00001437 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439 DMEMIT("%u ", pg_num);
1440
1441 switch (type) {
1442 case STATUSTYPE_INFO:
1443 list_for_each_entry(pg, &m->priority_groups, list) {
1444 if (pg->bypassed)
1445 state = 'D'; /* Disabled */
1446 else if (pg == m->current_pg)
1447 state = 'A'; /* Currently Active */
1448 else
1449 state = 'E'; /* Enabled */
1450
1451 DMEMIT("%c ", state);
1452
1453 if (pg->ps.type->status)
1454 sz += pg->ps.type->status(&pg->ps, NULL, type,
1455 result + sz,
1456 maxlen - sz);
1457 else
1458 DMEMIT("0 ");
1459
1460 DMEMIT("%u %u ", pg->nr_pgpaths,
1461 pg->ps.type->info_args);
1462
1463 list_for_each_entry(p, &pg->pgpaths, list) {
1464 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001465 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 p->fail_count);
1467 if (pg->ps.type->status)
1468 sz += pg->ps.type->status(&pg->ps,
1469 &p->path, type, result + sz,
1470 maxlen - sz);
1471 }
1472 }
1473 break;
1474
1475 case STATUSTYPE_TABLE:
1476 list_for_each_entry(pg, &m->priority_groups, list) {
1477 DMEMIT("%s ", pg->ps.type->name);
1478
1479 if (pg->ps.type->status)
1480 sz += pg->ps.type->status(&pg->ps, NULL, type,
1481 result + sz,
1482 maxlen - sz);
1483 else
1484 DMEMIT("0 ");
1485
1486 DMEMIT("%u %u ", pg->nr_pgpaths,
1487 pg->ps.type->table_args);
1488
1489 list_for_each_entry(p, &pg->pgpaths, list) {
1490 DMEMIT("%s ", p->path.dev->name);
1491 if (pg->ps.type->status)
1492 sz += pg->ps.type->status(&pg->ps,
1493 &p->path, type, result + sz,
1494 maxlen - sz);
1495 }
1496 }
1497 break;
1498 }
1499
1500 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501}
1502
1503static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1504{
Mike Anderson6380f262009-12-10 23:52:21 +00001505 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 struct dm_dev *dev;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001507 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 action_fn action;
1509
Mike Anderson6380f262009-12-10 23:52:21 +00001510 mutex_lock(&m->work_mutex);
1511
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001512 if (dm_suspended(ti)) {
1513 r = -EBUSY;
1514 goto out;
1515 }
1516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001518 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001519 r = queue_if_no_path(m, 1, 0);
1520 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001521 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001522 r = queue_if_no_path(m, 0, 0);
1523 goto out;
1524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 }
1526
Mike Anderson6380f262009-12-10 23:52:21 +00001527 if (argc != 2) {
Jose Castilloa356e422014-01-29 17:52:45 +01001528 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
Mike Anderson6380f262009-12-10 23:52:21 +00001529 goto out;
1530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
Mike Snitzer498f0102011-08-02 12:32:04 +01001532 if (!strcasecmp(argv[0], "disable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001533 r = bypass_pg_num(m, argv[1], 1);
1534 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001535 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001536 r = bypass_pg_num(m, argv[1], 0);
1537 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001538 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001539 r = switch_pg_num(m, argv[1]);
1540 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001541 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001543 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001545 else {
Jose Castilloa356e422014-01-29 17:52:45 +01001546 DMWARN("Unrecognised multipath message received: %s", argv[0]);
Mike Anderson6380f262009-12-10 23:52:21 +00001547 goto out;
1548 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001550 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001552 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001554 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 }
1556
1557 r = action_dev(m, dev, action);
1558
1559 dm_put_device(ti, dev);
1560
Mike Anderson6380f262009-12-10 23:52:21 +00001561out:
1562 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564}
1565
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001566static int multipath_prepare_ioctl(struct dm_target *ti,
1567 struct block_device **bdev, fmode_t *mode)
Milan Broz9af4aa32006-10-03 01:15:20 -07001568{
Mikulas Patocka35991652012-06-03 00:29:58 +01001569 struct multipath *m = ti->private;
Milan Broz9af4aa32006-10-03 01:15:20 -07001570 unsigned long flags;
Mikulas Patocka35991652012-06-03 00:29:58 +01001571 int r;
1572
Milan Broz9af4aa32006-10-03 01:15:20 -07001573 spin_lock_irqsave(&m->lock, flags);
1574
1575 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001576 __choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001577
Junichi Nomura43e43c92015-11-17 09:36:56 +00001578 if (m->current_pgpath) {
1579 if (!m->queue_io) {
1580 *bdev = m->current_pgpath->path.dev->bdev;
1581 *mode = m->current_pgpath->path.dev->mode;
1582 r = 0;
1583 } else {
1584 /* pg_init has not started or completed */
1585 r = -ENOTCONN;
1586 }
1587 } else {
1588 /* No path is available */
1589 if (m->queue_if_no_path)
1590 r = -ENOTCONN;
1591 else
1592 r = -EIO;
Milan Broze90dae12006-10-03 01:15:22 -07001593 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001594
Milan Broz9af4aa32006-10-03 01:15:20 -07001595 spin_unlock_irqrestore(&m->lock, flags);
1596
Junichi Nomura5bbbfdf2015-11-17 09:39:26 +00001597 if (r == -ENOTCONN) {
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001598 spin_lock_irqsave(&m->lock, flags);
1599 if (!m->current_pg) {
1600 /* Path status changed, redo selection */
1601 __choose_pgpath(m, 0);
1602 }
1603 if (m->pg_init_required)
1604 __pg_init_all_paths(m);
Mike Snitzer4cdd2ad2014-05-13 13:49:39 -04001605 spin_unlock_irqrestore(&m->lock, flags);
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001606 dm_table_run_md_queue_async(m->ti->table);
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001607 }
Mikulas Patocka35991652012-06-03 00:29:58 +01001608
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001609 /*
1610 * Only pass ioctls through if the device sizes match exactly.
1611 */
1612 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1613 return 1;
1614 return r;
Milan Broz9af4aa32006-10-03 01:15:20 -07001615}
1616
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001617static int multipath_iterate_devices(struct dm_target *ti,
1618 iterate_devices_callout_fn fn, void *data)
1619{
1620 struct multipath *m = ti->private;
1621 struct priority_group *pg;
1622 struct pgpath *p;
1623 int ret = 0;
1624
1625 list_for_each_entry(pg, &m->priority_groups, list) {
1626 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001627 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001628 if (ret)
1629 goto out;
1630 }
1631 }
1632
1633out:
1634 return ret;
1635}
1636
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001637static int __pgpath_busy(struct pgpath *pgpath)
1638{
1639 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1640
Mike Snitzer52b09912015-02-23 16:36:41 -05001641 return blk_lld_busy(q);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001642}
1643
1644/*
1645 * We return "busy", only when we can map I/Os but underlying devices
1646 * are busy (so even if we map I/Os now, the I/Os will wait on
1647 * the underlying queue).
1648 * In other words, if we want to kill I/Os or queue them inside us
1649 * due to map unavailability, we don't return "busy". Otherwise,
1650 * dm core won't give us the I/Os and we can't do what we want.
1651 */
1652static int multipath_busy(struct dm_target *ti)
1653{
1654 int busy = 0, has_active = 0;
1655 struct multipath *m = ti->private;
1656 struct priority_group *pg;
1657 struct pgpath *pgpath;
1658 unsigned long flags;
1659
1660 spin_lock_irqsave(&m->lock, flags);
1661
Jun'ichi Nomura7a7a3b42014-07-08 00:55:14 +00001662 /* pg_init in progress or no paths available */
1663 if (m->pg_init_in_progress ||
1664 (!m->nr_valid_paths && m->queue_if_no_path)) {
Hannes Reineckeb63349a2013-10-01 11:49:56 +02001665 busy = 1;
1666 goto out;
1667 }
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001668 /* Guess which priority_group will be used at next mapping time */
1669 if (unlikely(!m->current_pgpath && m->next_pg))
1670 pg = m->next_pg;
1671 else if (likely(m->current_pg))
1672 pg = m->current_pg;
1673 else
1674 /*
1675 * We don't know which pg will be used at next mapping time.
1676 * We don't call __choose_pgpath() here to avoid to trigger
1677 * pg_init just by busy checking.
1678 * So we don't know whether underlying devices we will be using
1679 * at next mapping time are busy or not. Just try mapping.
1680 */
1681 goto out;
1682
1683 /*
1684 * If there is one non-busy active path at least, the path selector
1685 * will be able to select it. So we consider such a pg as not busy.
1686 */
1687 busy = 1;
1688 list_for_each_entry(pgpath, &pg->pgpaths, list)
1689 if (pgpath->is_active) {
1690 has_active = 1;
1691
1692 if (!__pgpath_busy(pgpath)) {
1693 busy = 0;
1694 break;
1695 }
1696 }
1697
1698 if (!has_active)
1699 /*
1700 * No active path in this pg, so this pg won't be used and
1701 * the current_pg will be changed at next mapping time.
1702 * We need to try mapping to determine it.
1703 */
1704 busy = 0;
1705
1706out:
1707 spin_unlock_irqrestore(&m->lock, flags);
1708
1709 return busy;
1710}
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712/*-----------------------------------------------------------------
1713 * Module setup
1714 *---------------------------------------------------------------*/
1715static struct target_type multipath_target = {
1716 .name = "multipath",
Mike Snitzer16f12262016-01-31 17:22:27 -05001717 .version = {1, 11, 0},
1718 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 .module = THIS_MODULE,
1720 .ctr = multipath_ctr,
1721 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001722 .map_rq = multipath_map,
Mike Snitzere5863d92014-12-17 21:08:12 -05001723 .clone_and_map_rq = multipath_clone_and_map,
1724 .release_clone_rq = multipath_release_clone,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001725 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001727 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 .resume = multipath_resume,
1729 .status = multipath_status,
1730 .message = multipath_message,
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001731 .prepare_ioctl = multipath_prepare_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001732 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001733 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734};
1735
1736static int __init dm_multipath_init(void)
1737{
1738 int r;
1739
1740 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001741 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 if (!_mpio_cache)
1743 return -ENOMEM;
1744
1745 r = dm_register_target(&multipath_target);
1746 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001747 DMERR("register failed %d", r);
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001748 r = -EINVAL;
1749 goto bad_register_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 }
1751
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001752 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001753 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001754 DMERR("failed to create workqueue kmpathd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001755 r = -ENOMEM;
1756 goto bad_alloc_kmultipathd;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001757 }
1758
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001759 /*
1760 * A separate workqueue is used to handle the device handlers
1761 * to avoid overloading existing workqueue. Overloading the
1762 * old workqueue would also create a bottleneck in the
1763 * path of the storage hardware device activation.
1764 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001765 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1766 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001767 if (!kmpath_handlerd) {
1768 DMERR("failed to create workqueue kmpath_handlerd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001769 r = -ENOMEM;
1770 goto bad_alloc_kmpath_handlerd;
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001771 }
1772
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001773 DMINFO("version %u.%u.%u loaded",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 multipath_target.version[0], multipath_target.version[1],
1775 multipath_target.version[2]);
1776
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001777 return 0;
1778
1779bad_alloc_kmpath_handlerd:
1780 destroy_workqueue(kmultipathd);
1781bad_alloc_kmultipathd:
1782 dm_unregister_target(&multipath_target);
1783bad_register_target:
1784 kmem_cache_destroy(_mpio_cache);
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 return r;
1787}
1788
1789static void __exit dm_multipath_exit(void)
1790{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001791 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001792 destroy_workqueue(kmultipathd);
1793
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001794 dm_unregister_target(&multipath_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 kmem_cache_destroy(_mpio_cache);
1796}
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798module_init(dm_multipath_init);
1799module_exit(dm_multipath_exit);
1800
1801MODULE_DESCRIPTION(DM_NAME " multipath target");
1802MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1803MODULE_LICENSE("GPL");