blob: 0c32b2b7856cccff50a86242e9a3de2d2b8c63f3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Mike Snitzerf4790822013-09-12 18:06:12 -040010#include "dm.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010012#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Mike Snitzere5863d92014-12-17 21:08:12 -050014#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/ctype.h>
16#include <linux/init.h>
17#include <linux/mempool.h>
18#include <linux/module.h>
19#include <linux/pagemap.h>
20#include <linux/slab.h>
21#include <linux/time.h>
22#include <linux/workqueue.h>
Mikulas Patocka35991652012-06-03 00:29:58 +010023#include <linux/delay.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070024#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
Mike Snitzer78ce23b2016-01-31 17:38:28 -050026#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Alasdair G Kergon72d94862006-06-26 00:27:35 -070028#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000029#define DM_PG_INIT_DELAY_MSECS 2000
30#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32/* Path properties */
33struct pgpath {
34 struct list_head list;
35
36 struct priority_group *pg; /* Owning PG */
Kiyoshi Ueda66800732008-10-10 13:36:58 +010037 unsigned is_active; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 unsigned fail_count; /* Cumulative failure count */
39
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080040 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000041 struct delayed_work activate_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042};
43
44#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
45
46/*
47 * Paths are grouped into Priority Groups and numbered from 1 upwards.
48 * Each has a path selector which controls which path gets used.
49 */
50struct priority_group {
51 struct list_head list;
52
53 struct multipath *m; /* Owning multipath instance */
54 struct path_selector ps;
55
56 unsigned pg_num; /* Reference number */
57 unsigned bypassed; /* Temporarily bypass this PG? */
58
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
61};
62
63/* Multipath context */
64struct multipath {
65 struct list_head list;
66 struct dm_target *ti;
67
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070068 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070069 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000070
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010071 spinlock_t lock;
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 unsigned nr_priority_groups;
74 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000075
76 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 unsigned pg_init_required; /* pg_init needs calling? */
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -070079 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000080 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82 unsigned nr_valid_paths; /* Total number of usable paths */
83 struct pgpath *current_pgpath;
84 struct priority_group *current_pg;
85 struct priority_group *next_pg; /* Switch to this PG if set */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010087 unsigned queue_io:1; /* Must we queue all I/O? */
88 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
89 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
Mike Snitzera58a9352012-07-27 15:08:04 +010090 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +000091 unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010092
Dave Wysochanskic9e45582007-10-19 22:47:53 +010093 unsigned pg_init_retries; /* Number of times to retry pg_init */
94 unsigned pg_init_count; /* Number of times pg_init called */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000095 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 struct work_struct trigger_event;
98
99 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100100 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * can resubmit bios on error.
102 */
103 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +0000104
105 struct mutex work_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106};
107
108/*
109 * Context information attached to each bio we process.
110 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100111struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100113 size_t nr_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114};
115
116typedef int (*action_fn) (struct pgpath *pgpath);
117
Christoph Lametere18b8902006-12-06 20:33:20 -0800118static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700120static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000121static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700122static void activate_path(struct work_struct *work);
Hannes Reineckee8099172014-02-28 15:33:44 +0100123static int __pgpath_busy(struct pgpath *pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125
126/*-----------------------------------------------
127 * Allocation routines
128 *-----------------------------------------------*/
129
130static struct pgpath *alloc_pgpath(void)
131{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700132 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Mike Anderson224cb3e2008-08-29 09:36:09 +0200134 if (pgpath) {
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100135 pgpath->is_active = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000136 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 return pgpath;
140}
141
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100142static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
144 kfree(pgpath);
145}
146
147static struct priority_group *alloc_priority_group(void)
148{
149 struct priority_group *pg;
150
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700151 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700153 if (pg)
154 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
156 return pg;
157}
158
159static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
160{
161 struct pgpath *pgpath, *tmp;
162
163 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
164 list_del(&pgpath->list);
165 dm_put_device(ti, pgpath->path.dev);
166 free_pgpath(pgpath);
167 }
168}
169
170static void free_priority_group(struct priority_group *pg,
171 struct dm_target *ti)
172{
173 struct path_selector *ps = &pg->ps;
174
175 if (ps->type) {
176 ps->type->destroy(ps);
177 dm_put_path_selector(ps->type);
178 }
179
180 free_pgpaths(&pg->pgpaths, ti);
181 kfree(pg);
182}
183
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500184static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
186 struct multipath *m;
187
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700188 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 INIT_LIST_HEAD(&m->priority_groups);
191 spin_lock_init(&m->lock);
192 m->queue_io = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000193 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000194 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000195 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000196 mutex_init(&m->work_mutex);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500197
198 m->mpio_pool = NULL;
199 if (!use_blk_mq) {
200 unsigned min_ios = dm_get_reserved_rq_based_ios();
201
202 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
203 if (!m->mpio_pool) {
204 kfree(m);
205 return NULL;
206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 }
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500208
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700209 m->ti = ti;
210 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212
213 return m;
214}
215
216static void free_multipath(struct multipath *m)
217{
218 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
221 list_del(&pg->list);
222 free_priority_group(pg, m->ti);
223 }
224
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700225 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700226 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 mempool_destroy(m->mpio_pool);
228 kfree(m);
229}
230
Mike Snitzer2eff1922016-02-03 09:13:14 -0500231static struct dm_mpath_io *get_mpio(union map_info *info)
232{
233 return info->ptr;
234}
235
236static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100237{
238 struct dm_mpath_io *mpio;
239
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500240 if (!m->mpio_pool) {
241 /* Use blk-mq pdu memory requested via per_io_data_size */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500242 mpio = get_mpio(info);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500243 memset(mpio, 0, sizeof(*mpio));
244 return mpio;
245 }
246
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100247 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
248 if (!mpio)
Mike Snitzer2eff1922016-02-03 09:13:14 -0500249 return NULL;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100250
251 memset(mpio, 0, sizeof(*mpio));
252 info->ptr = mpio;
253
Mike Snitzer2eff1922016-02-03 09:13:14 -0500254 return mpio;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100255}
256
Mike Snitzer2eff1922016-02-03 09:13:14 -0500257static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100258{
Mike Snitzer2eff1922016-02-03 09:13:14 -0500259 /* Only needed for non blk-mq (.request_fn) multipath */
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500260 if (m->mpio_pool) {
261 struct dm_mpath_io *mpio = info->ptr;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100262
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500263 info->ptr = NULL;
264 mempool_free(mpio, m->mpio_pool);
265 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100266}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268/*-----------------------------------------------
269 * Path selection
270 *-----------------------------------------------*/
271
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100272static int __pg_init_all_paths(struct multipath *m)
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000273{
274 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000275 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000276
Hannes Reinecke17f4ff42014-02-28 15:33:42 +0100277 if (m->pg_init_in_progress || m->pg_init_disabled)
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100278 return 0;
Hannes Reinecke17f4ff42014-02-28 15:33:42 +0100279
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000280 m->pg_init_count++;
281 m->pg_init_required = 0;
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100282
283 /* Check here to reset pg_init_required */
284 if (!m->current_pg)
285 return 0;
286
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000287 if (m->pg_init_delay_retry)
288 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
289 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000290 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
291 /* Skip failed paths */
292 if (!pgpath->is_active)
293 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000294 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
295 pg_init_delay))
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000296 m->pg_init_in_progress++;
297 }
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100298 return m->pg_init_in_progress;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000299}
300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
302{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 m->current_pg = pgpath->pg;
304
305 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700306 if (m->hw_handler_name) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 m->pg_init_required = 1;
308 m->queue_io = 1;
309 } else {
310 m->pg_init_required = 0;
311 m->queue_io = 0;
312 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100313
314 m->pg_init_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315}
316
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100317static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
318 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800320 struct dm_path *path;
Mike Snitzer21136f82016-02-10 11:58:45 -0500321 unsigned repeat_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Mike Snitzer21136f82016-02-10 11:58:45 -0500323 path = pg->ps.type->select_path(&pg->ps, &repeat_count, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 if (!path)
325 return -ENXIO;
326
327 m->current_pgpath = path_to_pgpath(path);
328
329 if (m->current_pg != pg)
330 __switch_pg(m, m->current_pgpath);
331
332 return 0;
333}
334
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100335static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 struct priority_group *pg;
338 unsigned bypassed = 1;
339
Benjamin Marzinski1f271972014-08-13 13:53:42 -0500340 if (!m->nr_valid_paths) {
341 m->queue_io = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 goto failed;
Benjamin Marzinski1f271972014-08-13 13:53:42 -0500343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 /* Were we instructed to switch PG? */
346 if (m->next_pg) {
347 pg = m->next_pg;
348 m->next_pg = NULL;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100349 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 return;
351 }
352
353 /* Don't change PG until it has no remaining paths */
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100354 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 return;
356
357 /*
358 * Loop through priority groups until we find a valid path.
359 * First time we skip PGs marked 'bypassed'.
Mike Christief220fd42012-06-03 00:29:45 +0100360 * Second time we only try the ones we skipped, but set
361 * pg_init_delay_retry so we do not hammer controllers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 */
363 do {
364 list_for_each_entry(pg, &m->priority_groups, list) {
365 if (pg->bypassed == bypassed)
366 continue;
Mike Christief220fd42012-06-03 00:29:45 +0100367 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
368 if (!bypassed)
369 m->pg_init_delay_retry = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 return;
Mike Christief220fd42012-06-03 00:29:45 +0100371 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373 } while (bypassed--);
374
375failed:
376 m->current_pgpath = NULL;
377 m->current_pg = NULL;
378}
379
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800380/*
381 * Check whether bios must be queued in the device-mapper core rather
382 * than here in the target.
383 *
384 * m->lock must be held on entry.
385 *
386 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
387 * same value then we are not between multipath_presuspend()
388 * and multipath_resume() calls and we have no need to check
389 * for the DMF_NOFLUSH_SUSPENDING flag.
390 */
391static int __must_push_back(struct multipath *m)
392{
Hannes Reineckee8099172014-02-28 15:33:44 +0100393 return (m->queue_if_no_path ||
394 (m->queue_if_no_path != m->saved_queue_if_no_path &&
395 dm_noflush_suspending(m->ti)));
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800396}
397
Hannes Reinecke36fcffc2014-02-28 15:33:47 +0100398/*
399 * Map cloned requests
400 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500401static int __multipath_map(struct dm_target *ti, struct request *clone,
402 union map_info *map_context,
403 struct request *rq, struct request **__clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Mike Snitzer7943bd62016-02-02 21:53:15 -0500405 struct multipath *m = ti->private;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100406 int r = DM_MAPIO_REQUEUE;
Mike Snitzere5863d92014-12-17 21:08:12 -0500407 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100409 struct block_device *bdev;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100410 struct dm_mpath_io *mpio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600412 spin_lock_irq(&m->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 /* Do we need to select a new pgpath? */
Mike Snitzer21136f82016-02-10 11:58:45 -0500415 if (!m->current_pgpath || !m->queue_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100416 __choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 pgpath = m->current_pgpath;
419
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100420 if (!pgpath) {
421 if (!__must_push_back(m))
422 r = -EIO; /* Failed */
423 goto out_unlock;
Mike Snitzer6afbc012014-07-08 11:55:09 -0400424 } else if (m->queue_io || m->pg_init_required) {
Hannes Reineckee3bde042014-02-28 15:33:46 +0100425 __pg_init_all_paths(m);
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100426 goto out_unlock;
427 }
Mike Snitzer6afbc012014-07-08 11:55:09 -0400428
Mike Snitzer2eff1922016-02-03 09:13:14 -0500429 mpio = set_mpio(m, map_context);
430 if (!mpio)
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100431 /* ENOMEM, requeue */
432 goto out_unlock;
433
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100434 mpio->pgpath = pgpath;
435 mpio->nr_bytes = nr_bytes;
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600436
437 bdev = pgpath->path.dev->bdev;
438
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600439 spin_unlock_irq(&m->lock);
440
Mike Snitzere5863d92014-12-17 21:08:12 -0500441 if (clone) {
Mike Snitzerc5248f72016-02-20 14:02:49 -0500442 /*
443 * Old request-based interface: allocated clone is passed in.
444 * Used by: .request_fn stacked on .request_fn path(s).
445 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500446 clone->q = bdev_get_queue(bdev);
447 clone->rq_disk = bdev->bd_disk;
448 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
449 } else {
Mike Snitzereca7ee62016-02-20 13:45:38 -0500450 /*
451 * blk-mq request-based interface; used by both:
452 * .request_fn stacked on blk-mq path(s) and
453 * blk-mq stacked on blk-mq path(s).
454 */
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500455 *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
456 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400457 if (IS_ERR(*__clone)) {
Mike Snitzere5863d92014-12-17 21:08:12 -0500458 /* ENOMEM, requeue */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500459 clear_request_fn_mpio(m, map_context);
Mike Snitzere5863d92014-12-17 21:08:12 -0500460 return r;
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400461 }
Mike Snitzere5863d92014-12-17 21:08:12 -0500462 (*__clone)->bio = (*__clone)->biotail = NULL;
463 (*__clone)->rq_disk = bdev->bd_disk;
464 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
465 }
466
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100467 if (pgpath->pg->ps.type->start_io)
468 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
469 &pgpath->path,
470 nr_bytes);
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600471 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Hannes Reineckee3bde042014-02-28 15:33:46 +0100473out_unlock:
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600474 spin_unlock_irq(&m->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476 return r;
477}
478
Mike Snitzere5863d92014-12-17 21:08:12 -0500479static int multipath_map(struct dm_target *ti, struct request *clone,
480 union map_info *map_context)
481{
482 return __multipath_map(ti, clone, map_context, NULL, NULL);
483}
484
485static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
486 union map_info *map_context,
487 struct request **clone)
488{
489 return __multipath_map(ti, NULL, map_context, rq, clone);
490}
491
492static void multipath_release_clone(struct request *clone)
493{
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500494 blk_mq_free_request(clone);
Mike Snitzere5863d92014-12-17 21:08:12 -0500495}
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497/*
498 * If we run out of usable paths, should we queue I/O or error it?
499 */
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700500static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
501 unsigned save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
503 unsigned long flags;
504
505 spin_lock_irqsave(&m->lock, flags);
506
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700507 if (save_old_value)
508 m->saved_queue_if_no_path = m->queue_if_no_path;
509 else
510 m->saved_queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 m->queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 spin_unlock_irqrestore(&m->lock, flags);
513
Hannes Reinecke63d832c2014-05-26 14:45:39 +0200514 if (!queue_if_no_path)
515 dm_table_run_md_queue_async(m->ti->table);
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 return 0;
518}
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520/*
521 * An event is triggered whenever a path is taken out of use.
522 * Includes path failure and PG bypass.
523 */
David Howellsc4028952006-11-22 14:57:56 +0000524static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
David Howellsc4028952006-11-22 14:57:56 +0000526 struct multipath *m =
527 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
529 dm_table_event(m->ti->table);
530}
531
532/*-----------------------------------------------------------------
533 * Constructor/argument parsing:
534 * <#multipath feature args> [<arg>]*
535 * <#hw_handler args> [hw_handler [<arg>]*]
536 * <#priority groups>
537 * <initial priority group>
538 * [<selector> <#selector args> [<arg>]*
539 * <#paths> <#per-path selector args>
540 * [<path> [<arg>]* ]+ ]+
541 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100542static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 struct dm_target *ti)
544{
545 int r;
546 struct path_selector_type *pst;
547 unsigned ps_argc;
548
Mike Snitzer498f0102011-08-02 12:32:04 +0100549 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700550 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 };
552
Mike Snitzer498f0102011-08-02 12:32:04 +0100553 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700555 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 return -EINVAL;
557 }
558
Mike Snitzer498f0102011-08-02 12:32:04 +0100559 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100560 if (r) {
561 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 r = pst->create(&pg->ps, ps_argc, as->argv);
566 if (r) {
567 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700568 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 return r;
570 }
571
572 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100573 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 return 0;
576}
577
Mike Snitzer498f0102011-08-02 12:32:04 +0100578static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 struct dm_target *ti)
580{
581 int r;
582 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700583 struct multipath *m = ti->private;
Mike Snitzera58a9352012-07-27 15:08:04 +0100584 struct request_queue *q = NULL;
585 const char *attached_handler_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 /* we need at least a path arg */
588 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700589 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100590 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 }
592
593 p = alloc_pgpath();
594 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100595 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Mike Snitzer498f0102011-08-02 12:32:04 +0100597 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000598 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700600 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 goto bad;
602 }
603
Mike Snitzera58a9352012-07-27 15:08:04 +0100604 if (m->retain_attached_hw_handler || m->hw_handler_name)
605 q = bdev_get_queue(p->path.dev->bdev);
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100606
Mike Snitzera58a9352012-07-27 15:08:04 +0100607 if (m->retain_attached_hw_handler) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200608retain:
Mike Snitzera58a9352012-07-27 15:08:04 +0100609 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
610 if (attached_handler_name) {
611 /*
612 * Reset hw_handler_name to match the attached handler
613 * and clear any hw_handler_params associated with the
614 * ignored handler.
615 *
616 * NB. This modifies the table line to show the actual
617 * handler instead of the original table passed in.
618 */
619 kfree(m->hw_handler_name);
620 m->hw_handler_name = attached_handler_name;
621
622 kfree(m->hw_handler_params);
623 m->hw_handler_params = NULL;
624 }
625 }
626
627 if (m->hw_handler_name) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100628 r = scsi_dh_attach(q, m->hw_handler_name);
629 if (r == -EBUSY) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200630 char b[BDEVNAME_SIZE];
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100631
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200632 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
633 bdevname(p->path.dev->bdev, b));
634 goto retain;
635 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700636 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100637 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700638 dm_put_device(ti, p->path.dev);
639 goto bad;
640 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700641
642 if (m->hw_handler_params) {
643 r = scsi_dh_set_params(q, m->hw_handler_params);
644 if (r < 0) {
645 ti->error = "unable to set hardware "
646 "handler parameters";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700647 dm_put_device(ti, p->path.dev);
648 goto bad;
649 }
650 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700651 }
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
654 if (r) {
655 dm_put_device(ti, p->path.dev);
656 goto bad;
657 }
658
659 return p;
660
661 bad:
662 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100663 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
Mike Snitzer498f0102011-08-02 12:32:04 +0100666static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700667 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
Mike Snitzer498f0102011-08-02 12:32:04 +0100669 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700670 {1, 1024, "invalid number of paths"},
671 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 };
673
674 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100675 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700677 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 if (as->argc < 2) {
680 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100681 ti->error = "not enough priority group arguments";
682 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
684
685 pg = alloc_priority_group();
686 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700687 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100688 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
690 pg->m = m;
691
692 r = parse_path_selector(as, pg, ti);
693 if (r)
694 goto bad;
695
696 /*
697 * read the paths
698 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100699 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (r)
701 goto bad;
702
Mike Snitzer498f0102011-08-02 12:32:04 +0100703 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 if (r)
705 goto bad;
706
Mike Snitzer498f0102011-08-02 12:32:04 +0100707 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 for (i = 0; i < pg->nr_pgpaths; i++) {
709 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100710 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Mike Snitzer498f0102011-08-02 12:32:04 +0100712 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100713 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100714 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Mike Snitzer498f0102011-08-02 12:32:04 +0100718 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 path_args.argv = as->argv;
720
721 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100722 if (IS_ERR(pgpath)) {
723 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727 pgpath->pg = pg;
728 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100729 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 }
731
732 return pg;
733
734 bad:
735 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100736 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737}
738
Mike Snitzer498f0102011-08-02 12:32:04 +0100739static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700742 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700743 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Mike Snitzer498f0102011-08-02 12:32:04 +0100745 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700746 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 };
748
Mike Snitzer498f0102011-08-02 12:32:04 +0100749 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 return -EINVAL;
751
752 if (!hw_argc)
753 return 0;
754
Mike Snitzer498f0102011-08-02 12:32:04 +0100755 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000756
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700757 if (hw_argc > 1) {
758 char *p;
759 int i, j, len = 4;
760
761 for (i = 0; i <= hw_argc - 2; i++)
762 len += strlen(as->argv[i]) + 1;
763 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
764 if (!p) {
765 ti->error = "memory allocation failed";
766 ret = -ENOMEM;
767 goto fail;
768 }
769 j = sprintf(p, "%d", hw_argc - 1);
770 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
771 j = sprintf(p, "%s", as->argv[i]);
772 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100773 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700776fail:
777 kfree(m->hw_handler_name);
778 m->hw_handler_name = NULL;
779 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780}
781
Mike Snitzer498f0102011-08-02 12:32:04 +0100782static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783{
784 int r;
785 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700786 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100787 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Mike Snitzer498f0102011-08-02 12:32:04 +0100789 static struct dm_arg _args[] = {
Mike Snitzera58a9352012-07-27 15:08:04 +0100790 {0, 6, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100791 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000792 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 };
794
Mike Snitzer498f0102011-08-02 12:32:04 +0100795 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 if (r)
797 return -EINVAL;
798
799 if (!argc)
800 return 0;
801
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100802 do {
Mike Snitzer498f0102011-08-02 12:32:04 +0100803 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100804 argc--;
805
Mike Snitzer498f0102011-08-02 12:32:04 +0100806 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100807 r = queue_if_no_path(m, 1, 0);
808 continue;
809 }
810
Mike Snitzera58a9352012-07-27 15:08:04 +0100811 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
812 m->retain_attached_hw_handler = 1;
813 continue;
814 }
815
Mike Snitzer498f0102011-08-02 12:32:04 +0100816 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100817 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100818 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100819 argc--;
820 continue;
821 }
822
Mike Snitzer498f0102011-08-02 12:32:04 +0100823 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000824 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100825 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000826 argc--;
827 continue;
828 }
829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100831 r = -EINVAL;
832 } while (argc && !r);
833
834 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835}
836
837static int multipath_ctr(struct dm_target *ti, unsigned int argc,
838 char **argv)
839{
Mike Snitzer498f0102011-08-02 12:32:04 +0100840 /* target arguments */
841 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +0000842 {0, 1024, "invalid number of priority groups"},
843 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 };
845
846 int r;
847 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +0100848 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 unsigned pg_count = 0;
850 unsigned next_pg_num;
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500851 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
853 as.argc = argc;
854 as.argv = argv;
855
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500856 m = alloc_multipath(ti, use_blk_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700858 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return -EINVAL;
860 }
861
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700862 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 if (r)
864 goto bad;
865
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700866 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 if (r)
868 goto bad;
869
Mike Snitzer498f0102011-08-02 12:32:04 +0100870 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 if (r)
872 goto bad;
873
Mike Snitzer498f0102011-08-02 12:32:04 +0100874 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 if (r)
876 goto bad;
877
Mike Snitzera490a072011-03-24 13:54:33 +0000878 if ((!m->nr_priority_groups && next_pg_num) ||
879 (m->nr_priority_groups && !next_pg_num)) {
880 ti->error = "invalid initial priority group";
881 r = -EINVAL;
882 goto bad;
883 }
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 /* parse the priority groups */
886 while (as.argc) {
887 struct priority_group *pg;
888
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700889 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100890 if (IS_ERR(pg)) {
891 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 goto bad;
893 }
894
895 m->nr_valid_paths += pg->nr_pgpaths;
896 list_add_tail(&pg->list, &m->priority_groups);
897 pg_count++;
898 pg->pg_num = pg_count;
899 if (!--next_pg_num)
900 m->next_pg = pg;
901 }
902
903 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700904 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 r = -EINVAL;
906 goto bad;
907 }
908
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000909 ti->num_flush_bios = 1;
910 ti->num_discard_bios = 1;
Mike Snitzer042bcef2013-05-10 14:37:16 +0100911 ti->num_write_same_bios = 1;
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500912 if (use_blk_mq)
913 ti->per_io_data_size = sizeof(struct dm_mpath_io);
Mikulas Patocka86279212009-06-22 10:12:24 +0100914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 return 0;
916
917 bad:
918 free_multipath(m);
919 return r;
920}
921
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000922static void multipath_wait_for_pg_init_completion(struct multipath *m)
923{
924 DECLARE_WAITQUEUE(wait, current);
925 unsigned long flags;
926
927 add_wait_queue(&m->pg_init_wait, &wait);
928
929 while (1) {
930 set_current_state(TASK_UNINTERRUPTIBLE);
931
932 spin_lock_irqsave(&m->lock, flags);
933 if (!m->pg_init_in_progress) {
934 spin_unlock_irqrestore(&m->lock, flags);
935 break;
936 }
937 spin_unlock_irqrestore(&m->lock, flags);
938
939 io_schedule();
940 }
941 set_current_state(TASK_RUNNING);
942
943 remove_wait_queue(&m->pg_init_wait, &wait);
944}
945
946static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947{
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000948 unsigned long flags;
949
950 spin_lock_irqsave(&m->lock, flags);
951 m->pg_init_disabled = 1;
952 spin_unlock_irqrestore(&m->lock, flags);
953
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700954 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000955 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -0700956 flush_workqueue(kmultipathd);
Tejun Heo43829732012-08-20 14:51:24 -0700957 flush_work(&m->trigger_event);
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000958
959 spin_lock_irqsave(&m->lock, flags);
960 m->pg_init_disabled = 0;
961 spin_unlock_irqrestore(&m->lock, flags);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +0000962}
963
964static void multipath_dtr(struct dm_target *ti)
965{
966 struct multipath *m = ti->private;
967
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000968 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 free_multipath(m);
970}
971
972/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 * Take a path out of use.
974 */
975static int fail_path(struct pgpath *pgpath)
976{
977 unsigned long flags;
978 struct multipath *m = pgpath->pg->m;
979
980 spin_lock_irqsave(&m->lock, flags);
981
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100982 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 goto out;
984
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700985 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100988 pgpath->is_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 pgpath->fail_count++;
990
991 m->nr_valid_paths--;
992
993 if (pgpath == m->current_pgpath)
994 m->current_pgpath = NULL;
995
Mike Andersonb15546f2007-10-19 22:48:02 +0100996 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
997 pgpath->path.dev->name, m->nr_valid_paths);
998
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +0000999 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001out:
1002 spin_unlock_irqrestore(&m->lock, flags);
1003
1004 return 0;
1005}
1006
1007/*
1008 * Reinstate a previously-failed path
1009 */
1010static int reinstate_path(struct pgpath *pgpath)
1011{
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001012 int r = 0, run_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 unsigned long flags;
1014 struct multipath *m = pgpath->pg->m;
1015
1016 spin_lock_irqsave(&m->lock, flags);
1017
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001018 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 goto out;
1020
Alasdair G Kergondef052d2008-07-21 12:00:31 +01001021 if (!pgpath->pg->ps.type->reinstate_path) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 DMWARN("Reinstate path not supported by path selector %s",
1023 pgpath->pg->ps.type->name);
1024 r = -EINVAL;
1025 goto out;
1026 }
1027
1028 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1029 if (r)
1030 goto out;
1031
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001032 pgpath->is_active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
Hannes Reineckee8099172014-02-28 15:33:44 +01001034 if (!m->nr_valid_paths++) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001035 m->current_pgpath = NULL;
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001036 run_queue = 1;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001037 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001038 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001039 m->pg_init_in_progress++;
1040 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Mike Andersonb15546f2007-10-19 22:48:02 +01001042 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1043 pgpath->path.dev->name, m->nr_valid_paths);
1044
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001045 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
1047out:
1048 spin_unlock_irqrestore(&m->lock, flags);
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001049 if (run_queue)
1050 dm_table_run_md_queue_async(m->ti->table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 return r;
1053}
1054
1055/*
1056 * Fail or reinstate all paths that match the provided struct dm_dev.
1057 */
1058static int action_dev(struct multipath *m, struct dm_dev *dev,
1059 action_fn action)
1060{
Mike Snitzer19040c02011-03-24 13:54:31 +00001061 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 struct pgpath *pgpath;
1063 struct priority_group *pg;
1064
1065 list_for_each_entry(pg, &m->priority_groups, list) {
1066 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1067 if (pgpath->path.dev == dev)
1068 r = action(pgpath);
1069 }
1070 }
1071
1072 return r;
1073}
1074
1075/*
1076 * Temporarily try to avoid having to use the specified PG
1077 */
1078static void bypass_pg(struct multipath *m, struct priority_group *pg,
1079 int bypassed)
1080{
1081 unsigned long flags;
1082
1083 spin_lock_irqsave(&m->lock, flags);
1084
1085 pg->bypassed = bypassed;
1086 m->current_pgpath = NULL;
1087 m->current_pg = NULL;
1088
1089 spin_unlock_irqrestore(&m->lock, flags);
1090
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001091 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092}
1093
1094/*
1095 * Switch to using the specified PG from the next I/O that gets mapped
1096 */
1097static int switch_pg_num(struct multipath *m, const char *pgstr)
1098{
1099 struct priority_group *pg;
1100 unsigned pgnum;
1101 unsigned long flags;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001102 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001104 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 (pgnum > m->nr_priority_groups)) {
1106 DMWARN("invalid PG number supplied to switch_pg_num");
1107 return -EINVAL;
1108 }
1109
1110 spin_lock_irqsave(&m->lock, flags);
1111 list_for_each_entry(pg, &m->priority_groups, list) {
1112 pg->bypassed = 0;
1113 if (--pgnum)
1114 continue;
1115
1116 m->current_pgpath = NULL;
1117 m->current_pg = NULL;
1118 m->next_pg = pg;
1119 }
1120 spin_unlock_irqrestore(&m->lock, flags);
1121
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001122 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 return 0;
1124}
1125
1126/*
1127 * Set/clear bypassed status of a PG.
1128 * PGs are numbered upwards from 1 in the order they were declared.
1129 */
1130static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1131{
1132 struct priority_group *pg;
1133 unsigned pgnum;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001134 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001136 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 (pgnum > m->nr_priority_groups)) {
1138 DMWARN("invalid PG number supplied to bypass_pg");
1139 return -EINVAL;
1140 }
1141
1142 list_for_each_entry(pg, &m->priority_groups, list) {
1143 if (!--pgnum)
1144 break;
1145 }
1146
1147 bypass_pg(m, pg, bypassed);
1148 return 0;
1149}
1150
1151/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001152 * Should we retry pg_init immediately?
1153 */
1154static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1155{
1156 unsigned long flags;
1157 int limit_reached = 0;
1158
1159 spin_lock_irqsave(&m->lock, flags);
1160
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +00001161 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001162 m->pg_init_required = 1;
1163 else
1164 limit_reached = 1;
1165
1166 spin_unlock_irqrestore(&m->lock, flags);
1167
1168 return limit_reached;
1169}
1170
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001171static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001172{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001173 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001174 struct priority_group *pg = pgpath->pg;
1175 struct multipath *m = pg->m;
1176 unsigned long flags;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001177 unsigned delay_retry = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001178
1179 /* device or driver problems */
1180 switch (errors) {
1181 case SCSI_DH_OK:
1182 break;
1183 case SCSI_DH_NOSYS:
1184 if (!m->hw_handler_name) {
1185 errors = 0;
1186 break;
1187 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001188 DMERR("Could not failover the device: Handler scsi_dh_%s "
1189 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001190 /*
1191 * Fail path for now, so we do not ping pong
1192 */
1193 fail_path(pgpath);
1194 break;
1195 case SCSI_DH_DEV_TEMP_BUSY:
1196 /*
1197 * Probably doing something like FW upgrade on the
1198 * controller so try the other pg.
1199 */
1200 bypass_pg(m, pg, 1);
1201 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001202 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001203 /* Wait before retrying. */
1204 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001205 case SCSI_DH_IMM_RETRY:
1206 case SCSI_DH_RES_TEMP_UNAVAIL:
1207 if (pg_init_limit_reached(m, pgpath))
1208 fail_path(pgpath);
1209 errors = 0;
1210 break;
1211 default:
1212 /*
1213 * We probably do not want to fail the path for a device
1214 * error, but this is what the old dm did. In future
1215 * patches we can do more advanced handling.
1216 */
1217 fail_path(pgpath);
1218 }
1219
1220 spin_lock_irqsave(&m->lock, flags);
1221 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001222 if (pgpath == m->current_pgpath) {
1223 DMERR("Could not failover device. Error %d.", errors);
1224 m->current_pgpath = NULL;
1225 m->current_pg = NULL;
1226 }
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001227 } else if (!m->pg_init_required)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001228 pg->bypassed = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001229
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001230 if (--m->pg_init_in_progress)
1231 /* Activations of other paths are still on going */
1232 goto out;
1233
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001234 if (m->pg_init_required) {
1235 m->pg_init_delay_retry = delay_retry;
1236 if (__pg_init_all_paths(m))
1237 goto out;
1238 }
1239 m->queue_io = 0;
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001240
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001241 /*
1242 * Wake up any thread waiting to suspend.
1243 */
1244 wake_up(&m->pg_init_wait);
1245
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001246out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001247 spin_unlock_irqrestore(&m->lock, flags);
1248}
1249
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001250static void activate_path(struct work_struct *work)
1251{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001252 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001253 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001254
Hannes Reinecke3a017502014-02-28 15:33:49 +01001255 if (pgpath->is_active)
1256 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1257 pg_init_done, pgpath);
1258 else
1259 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001260}
1261
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001262static int noretry_error(int error)
1263{
1264 switch (error) {
1265 case -EOPNOTSUPP:
1266 case -EREMOTEIO:
1267 case -EILSEQ:
1268 case -ENODATA:
Jun'ichi Nomuracc9d3c32013-09-13 14:54:30 +09001269 case -ENOSPC:
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001270 return 1;
1271 }
1272
1273 /* Anything else could be a path failure, so should be retried */
1274 return 0;
1275}
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277/*
1278 * end_io handling
1279 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001280static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001281 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001283 /*
1284 * We don't queue any clone request inside the multipath target
1285 * during end I/O handling, since those clone requests don't have
1286 * bio clones. If we queue them inside the multipath target,
1287 * we need to make bio clones, that requires memory allocation.
1288 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1289 * don't have bio clones.)
1290 * Instead of queueing the clone request here, we queue the original
1291 * request into dm core, which will remake a clone request and
1292 * clone bios for it and resubmit it later.
1293 */
1294 int r = DM_ENDIO_REQUEUE;
Stefan Bader640eb3b2005-11-21 21:32:35 -08001295 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001297 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 return 0; /* I/O complete */
1299
Mike Snitzer7eee4ae2014-06-02 15:50:06 -04001300 if (noretry_error(error))
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001301 return error;
1302
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001303 if (mpio->pgpath)
1304 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
Stefan Bader640eb3b2005-11-21 21:32:35 -08001306 spin_lock_irqsave(&m->lock, flags);
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001307 if (!m->nr_valid_paths) {
1308 if (!m->queue_if_no_path) {
1309 if (!__must_push_back(m))
1310 r = -EIO;
1311 } else {
1312 if (error == -EBADE)
1313 r = error;
1314 }
1315 }
Stefan Bader640eb3b2005-11-21 21:32:35 -08001316 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001318 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319}
1320
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001321static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 int error, union map_info *map_context)
1323{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001324 struct multipath *m = ti->private;
Mike Snitzer2eff1922016-02-03 09:13:14 -05001325 struct dm_mpath_io *mpio = get_mpio(map_context);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001326 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 struct path_selector *ps;
1328 int r;
1329
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001330 BUG_ON(!mpio);
1331
Mike Snitzer2eff1922016-02-03 09:13:14 -05001332 r = do_end_io(m, clone, error, mpio);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001333 pgpath = mpio->pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 if (pgpath) {
1335 ps = &pgpath->pg->ps;
1336 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001337 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 }
Mike Snitzer2eff1922016-02-03 09:13:14 -05001339 clear_request_fn_mpio(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341 return r;
1342}
1343
1344/*
1345 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001346 * the last path fails we must error any remaining I/O.
1347 * Note that if the freeze_bdev fails while suspending, the
1348 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 */
1350static void multipath_presuspend(struct dm_target *ti)
1351{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001352 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Alasdair G Kergon485ef692005-09-27 21:45:45 -07001354 queue_if_no_path(m, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355}
1356
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001357static void multipath_postsuspend(struct dm_target *ti)
1358{
Mike Anderson6380f262009-12-10 23:52:21 +00001359 struct multipath *m = ti->private;
1360
1361 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001362 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001363 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001364}
1365
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001366/*
1367 * Restore the queue_if_no_path setting.
1368 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369static void multipath_resume(struct dm_target *ti)
1370{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001371 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 unsigned long flags;
1373
1374 spin_lock_irqsave(&m->lock, flags);
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001375 m->queue_if_no_path = m->saved_queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 spin_unlock_irqrestore(&m->lock, flags);
1377}
1378
1379/*
1380 * Info output has the following format:
1381 * num_multipath_feature_args [multipath_feature_args]*
1382 * num_handler_status_args [handler_status_args]*
1383 * num_groups init_group_number
1384 * [A|D|E num_ps_status_args [ps_status_args]*
1385 * num_paths num_selector_args
1386 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1387 *
1388 * Table output has the following format (identical to the constructor string):
1389 * num_feature_args [features_args]*
1390 * num_handler_args hw_handler [hw_handler_args]*
1391 * num_groups init_group_number
1392 * [priority selector-name num_ps_args [ps_args]*
1393 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1394 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001395static void multipath_status(struct dm_target *ti, status_type_t type,
1396 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397{
1398 int sz = 0;
1399 unsigned long flags;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001400 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 struct priority_group *pg;
1402 struct pgpath *p;
1403 unsigned pg_num;
1404 char state;
1405
1406 spin_lock_irqsave(&m->lock, flags);
1407
1408 /* Features */
1409 if (type == STATUSTYPE_INFO)
Hannes Reineckee8099172014-02-28 15:33:44 +01001410 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001411 else {
1412 DMEMIT("%u ", m->queue_if_no_path +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001413 (m->pg_init_retries > 0) * 2 +
Mike Snitzera58a9352012-07-27 15:08:04 +01001414 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1415 m->retain_attached_hw_handler);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001416 if (m->queue_if_no_path)
1417 DMEMIT("queue_if_no_path ");
1418 if (m->pg_init_retries)
1419 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001420 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1421 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Mike Snitzera58a9352012-07-27 15:08:04 +01001422 if (m->retain_attached_hw_handler)
1423 DMEMIT("retain_attached_hw_handler ");
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001426 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 DMEMIT("0 ");
1428 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001429 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
1431 DMEMIT("%u ", m->nr_priority_groups);
1432
1433 if (m->next_pg)
1434 pg_num = m->next_pg->pg_num;
1435 else if (m->current_pg)
1436 pg_num = m->current_pg->pg_num;
1437 else
Mike Snitzera490a072011-03-24 13:54:33 +00001438 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
1440 DMEMIT("%u ", pg_num);
1441
1442 switch (type) {
1443 case STATUSTYPE_INFO:
1444 list_for_each_entry(pg, &m->priority_groups, list) {
1445 if (pg->bypassed)
1446 state = 'D'; /* Disabled */
1447 else if (pg == m->current_pg)
1448 state = 'A'; /* Currently Active */
1449 else
1450 state = 'E'; /* Enabled */
1451
1452 DMEMIT("%c ", state);
1453
1454 if (pg->ps.type->status)
1455 sz += pg->ps.type->status(&pg->ps, NULL, type,
1456 result + sz,
1457 maxlen - sz);
1458 else
1459 DMEMIT("0 ");
1460
1461 DMEMIT("%u %u ", pg->nr_pgpaths,
1462 pg->ps.type->info_args);
1463
1464 list_for_each_entry(p, &pg->pgpaths, list) {
1465 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001466 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 p->fail_count);
1468 if (pg->ps.type->status)
1469 sz += pg->ps.type->status(&pg->ps,
1470 &p->path, type, result + sz,
1471 maxlen - sz);
1472 }
1473 }
1474 break;
1475
1476 case STATUSTYPE_TABLE:
1477 list_for_each_entry(pg, &m->priority_groups, list) {
1478 DMEMIT("%s ", pg->ps.type->name);
1479
1480 if (pg->ps.type->status)
1481 sz += pg->ps.type->status(&pg->ps, NULL, type,
1482 result + sz,
1483 maxlen - sz);
1484 else
1485 DMEMIT("0 ");
1486
1487 DMEMIT("%u %u ", pg->nr_pgpaths,
1488 pg->ps.type->table_args);
1489
1490 list_for_each_entry(p, &pg->pgpaths, list) {
1491 DMEMIT("%s ", p->path.dev->name);
1492 if (pg->ps.type->status)
1493 sz += pg->ps.type->status(&pg->ps,
1494 &p->path, type, result + sz,
1495 maxlen - sz);
1496 }
1497 }
1498 break;
1499 }
1500
1501 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502}
1503
1504static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1505{
Mike Anderson6380f262009-12-10 23:52:21 +00001506 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 struct dm_dev *dev;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001508 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 action_fn action;
1510
Mike Anderson6380f262009-12-10 23:52:21 +00001511 mutex_lock(&m->work_mutex);
1512
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001513 if (dm_suspended(ti)) {
1514 r = -EBUSY;
1515 goto out;
1516 }
1517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001519 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001520 r = queue_if_no_path(m, 1, 0);
1521 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001522 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001523 r = queue_if_no_path(m, 0, 0);
1524 goto out;
1525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 }
1527
Mike Anderson6380f262009-12-10 23:52:21 +00001528 if (argc != 2) {
Jose Castilloa356e422014-01-29 17:52:45 +01001529 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
Mike Anderson6380f262009-12-10 23:52:21 +00001530 goto out;
1531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Mike Snitzer498f0102011-08-02 12:32:04 +01001533 if (!strcasecmp(argv[0], "disable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001534 r = bypass_pg_num(m, argv[1], 1);
1535 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001536 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001537 r = bypass_pg_num(m, argv[1], 0);
1538 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001539 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001540 r = switch_pg_num(m, argv[1]);
1541 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001542 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001544 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001546 else {
Jose Castilloa356e422014-01-29 17:52:45 +01001547 DMWARN("Unrecognised multipath message received: %s", argv[0]);
Mike Anderson6380f262009-12-10 23:52:21 +00001548 goto out;
1549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001551 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001553 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001555 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 }
1557
1558 r = action_dev(m, dev, action);
1559
1560 dm_put_device(ti, dev);
1561
Mike Anderson6380f262009-12-10 23:52:21 +00001562out:
1563 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565}
1566
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001567static int multipath_prepare_ioctl(struct dm_target *ti,
1568 struct block_device **bdev, fmode_t *mode)
Milan Broz9af4aa32006-10-03 01:15:20 -07001569{
Mikulas Patocka35991652012-06-03 00:29:58 +01001570 struct multipath *m = ti->private;
Milan Broz9af4aa32006-10-03 01:15:20 -07001571 unsigned long flags;
Mikulas Patocka35991652012-06-03 00:29:58 +01001572 int r;
1573
Milan Broz9af4aa32006-10-03 01:15:20 -07001574 spin_lock_irqsave(&m->lock, flags);
1575
1576 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001577 __choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001578
Junichi Nomura43e43c92015-11-17 09:36:56 +00001579 if (m->current_pgpath) {
1580 if (!m->queue_io) {
1581 *bdev = m->current_pgpath->path.dev->bdev;
1582 *mode = m->current_pgpath->path.dev->mode;
1583 r = 0;
1584 } else {
1585 /* pg_init has not started or completed */
1586 r = -ENOTCONN;
1587 }
1588 } else {
1589 /* No path is available */
1590 if (m->queue_if_no_path)
1591 r = -ENOTCONN;
1592 else
1593 r = -EIO;
Milan Broze90dae12006-10-03 01:15:22 -07001594 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001595
Milan Broz9af4aa32006-10-03 01:15:20 -07001596 spin_unlock_irqrestore(&m->lock, flags);
1597
Junichi Nomura5bbbfdf2015-11-17 09:39:26 +00001598 if (r == -ENOTCONN) {
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001599 spin_lock_irqsave(&m->lock, flags);
1600 if (!m->current_pg) {
1601 /* Path status changed, redo selection */
1602 __choose_pgpath(m, 0);
1603 }
1604 if (m->pg_init_required)
1605 __pg_init_all_paths(m);
Mike Snitzer4cdd2ad2014-05-13 13:49:39 -04001606 spin_unlock_irqrestore(&m->lock, flags);
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001607 dm_table_run_md_queue_async(m->ti->table);
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001608 }
Mikulas Patocka35991652012-06-03 00:29:58 +01001609
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001610 /*
1611 * Only pass ioctls through if the device sizes match exactly.
1612 */
1613 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1614 return 1;
1615 return r;
Milan Broz9af4aa32006-10-03 01:15:20 -07001616}
1617
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001618static int multipath_iterate_devices(struct dm_target *ti,
1619 iterate_devices_callout_fn fn, void *data)
1620{
1621 struct multipath *m = ti->private;
1622 struct priority_group *pg;
1623 struct pgpath *p;
1624 int ret = 0;
1625
1626 list_for_each_entry(pg, &m->priority_groups, list) {
1627 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001628 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001629 if (ret)
1630 goto out;
1631 }
1632 }
1633
1634out:
1635 return ret;
1636}
1637
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001638static int __pgpath_busy(struct pgpath *pgpath)
1639{
1640 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1641
Mike Snitzer52b09912015-02-23 16:36:41 -05001642 return blk_lld_busy(q);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001643}
1644
1645/*
1646 * We return "busy", only when we can map I/Os but underlying devices
1647 * are busy (so even if we map I/Os now, the I/Os will wait on
1648 * the underlying queue).
1649 * In other words, if we want to kill I/Os or queue them inside us
1650 * due to map unavailability, we don't return "busy". Otherwise,
1651 * dm core won't give us the I/Os and we can't do what we want.
1652 */
1653static int multipath_busy(struct dm_target *ti)
1654{
1655 int busy = 0, has_active = 0;
1656 struct multipath *m = ti->private;
1657 struct priority_group *pg;
1658 struct pgpath *pgpath;
1659 unsigned long flags;
1660
1661 spin_lock_irqsave(&m->lock, flags);
1662
Jun'ichi Nomura7a7a3b42014-07-08 00:55:14 +00001663 /* pg_init in progress or no paths available */
1664 if (m->pg_init_in_progress ||
1665 (!m->nr_valid_paths && m->queue_if_no_path)) {
Hannes Reineckeb63349a2013-10-01 11:49:56 +02001666 busy = 1;
1667 goto out;
1668 }
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001669 /* Guess which priority_group will be used at next mapping time */
1670 if (unlikely(!m->current_pgpath && m->next_pg))
1671 pg = m->next_pg;
1672 else if (likely(m->current_pg))
1673 pg = m->current_pg;
1674 else
1675 /*
1676 * We don't know which pg will be used at next mapping time.
1677 * We don't call __choose_pgpath() here to avoid to trigger
1678 * pg_init just by busy checking.
1679 * So we don't know whether underlying devices we will be using
1680 * at next mapping time are busy or not. Just try mapping.
1681 */
1682 goto out;
1683
1684 /*
1685 * If there is one non-busy active path at least, the path selector
1686 * will be able to select it. So we consider such a pg as not busy.
1687 */
1688 busy = 1;
1689 list_for_each_entry(pgpath, &pg->pgpaths, list)
1690 if (pgpath->is_active) {
1691 has_active = 1;
1692
1693 if (!__pgpath_busy(pgpath)) {
1694 busy = 0;
1695 break;
1696 }
1697 }
1698
1699 if (!has_active)
1700 /*
1701 * No active path in this pg, so this pg won't be used and
1702 * the current_pg will be changed at next mapping time.
1703 * We need to try mapping to determine it.
1704 */
1705 busy = 0;
1706
1707out:
1708 spin_unlock_irqrestore(&m->lock, flags);
1709
1710 return busy;
1711}
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713/*-----------------------------------------------------------------
1714 * Module setup
1715 *---------------------------------------------------------------*/
1716static struct target_type multipath_target = {
1717 .name = "multipath",
Mike Snitzer16f12262016-01-31 17:22:27 -05001718 .version = {1, 11, 0},
1719 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 .module = THIS_MODULE,
1721 .ctr = multipath_ctr,
1722 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001723 .map_rq = multipath_map,
Mike Snitzere5863d92014-12-17 21:08:12 -05001724 .clone_and_map_rq = multipath_clone_and_map,
1725 .release_clone_rq = multipath_release_clone,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001726 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001728 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 .resume = multipath_resume,
1730 .status = multipath_status,
1731 .message = multipath_message,
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001732 .prepare_ioctl = multipath_prepare_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001733 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001734 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735};
1736
1737static int __init dm_multipath_init(void)
1738{
1739 int r;
1740
1741 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001742 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 if (!_mpio_cache)
1744 return -ENOMEM;
1745
1746 r = dm_register_target(&multipath_target);
1747 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001748 DMERR("register failed %d", r);
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001749 r = -EINVAL;
1750 goto bad_register_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 }
1752
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001753 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001754 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001755 DMERR("failed to create workqueue kmpathd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001756 r = -ENOMEM;
1757 goto bad_alloc_kmultipathd;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001758 }
1759
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001760 /*
1761 * A separate workqueue is used to handle the device handlers
1762 * to avoid overloading existing workqueue. Overloading the
1763 * old workqueue would also create a bottleneck in the
1764 * path of the storage hardware device activation.
1765 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001766 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1767 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001768 if (!kmpath_handlerd) {
1769 DMERR("failed to create workqueue kmpath_handlerd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001770 r = -ENOMEM;
1771 goto bad_alloc_kmpath_handlerd;
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001772 }
1773
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001774 DMINFO("version %u.%u.%u loaded",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 multipath_target.version[0], multipath_target.version[1],
1776 multipath_target.version[2]);
1777
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001778 return 0;
1779
1780bad_alloc_kmpath_handlerd:
1781 destroy_workqueue(kmultipathd);
1782bad_alloc_kmultipathd:
1783 dm_unregister_target(&multipath_target);
1784bad_register_target:
1785 kmem_cache_destroy(_mpio_cache);
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 return r;
1788}
1789
1790static void __exit dm_multipath_exit(void)
1791{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001792 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001793 destroy_workqueue(kmultipathd);
1794
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001795 dm_unregister_target(&multipath_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 kmem_cache_destroy(_mpio_cache);
1797}
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799module_init(dm_multipath_init);
1800module_exit(dm_multipath_exit);
1801
1802MODULE_DESCRIPTION(DM_NAME " multipath target");
1803MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1804MODULE_LICENSE("GPL");