blob: f5921661bd9990cce2bb16da0bcb1b9801d6ec9a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Mike Snitzer4cc96132016-05-12 16:28:10 -040010#include "dm-rq.h"
Mike Snitzer76e33fe2016-05-19 16:15:14 -040011#include "dm-bio-record.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010013#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Mike Snitzere5863d92014-12-17 21:08:12 -050015#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>
20#include <linux/pagemap.h>
21#include <linux/slab.h>
22#include <linux/time.h>
23#include <linux/workqueue.h>
Mikulas Patocka35991652012-06-03 00:29:58 +010024#include <linux/delay.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070025#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070026#include <linux/atomic.h>
Mike Snitzer78ce23b2016-01-31 17:38:28 -050027#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Alasdair G Kergon72d94862006-06-26 00:27:35 -070029#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000030#define DM_PG_INIT_DELAY_MSECS 2000
31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33/* Path properties */
34struct pgpath {
35 struct list_head list;
36
37 struct priority_group *pg; /* Owning PG */
38 unsigned fail_count; /* Cumulative failure count */
39
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080040 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000041 struct delayed_work activate_path;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -050042
43 bool is_active:1; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044};
45
46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47
48/*
49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
50 * Each has a path selector which controls which path gets used.
51 */
52struct priority_group {
53 struct list_head list;
54
55 struct multipath *m; /* Owning multipath instance */
56 struct path_selector ps;
57
58 unsigned pg_num; /* Reference number */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -050061
62 bool bypassed:1; /* Temporarily bypass this PG? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063};
64
65/* Multipath context */
66struct multipath {
67 struct list_head list;
68 struct dm_target *ti;
69
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070070 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070071 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000072
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010073 spinlock_t lock;
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 unsigned nr_priority_groups;
76 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000077
78 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 struct pgpath *current_pgpath;
81 struct priority_group *current_pg;
82 struct priority_group *next_pg; /* Switch to this PG if set */
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Mike Snitzer518257b2016-03-17 16:32:10 -040084 unsigned long flags; /* Multipath state flags */
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010085
Dave Wysochanskic9e45582007-10-19 22:47:53 +010086 unsigned pg_init_retries; /* Number of times to retry pg_init */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000087 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Mike Snitzer91e968a2016-03-17 17:10:15 -040089 atomic_t nr_valid_paths; /* Total number of usable paths */
90 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
91 atomic_t pg_init_count; /* Number of times pg_init called */
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +010094 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 * can resubmit bios on error.
96 */
97 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +000098
99 struct mutex work_mutex;
Mike Snitzer20800cb2016-03-17 17:13:10 -0400100 struct work_struct trigger_event;
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400101
102 struct work_struct process_queued_bios;
103 struct bio_list queued_bios;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104};
105
106/*
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400107 * Context information attached to each io we process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100109struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100111 size_t nr_bytes;
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400112
113 /*
114 * FIXME: make request-based code _not_ include this member.
115 */
116 struct dm_bio_details bio_details;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117};
118
119typedef int (*action_fn) (struct pgpath *pgpath);
120
Christoph Lametere18b8902006-12-06 20:33:20 -0800121static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700123static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000124static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700125static void activate_path(struct work_struct *work);
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400126static void process_queued_bios(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Mike Snitzer518257b2016-03-17 16:32:10 -0400128/*-----------------------------------------------
129 * Multipath state flags.
130 *-----------------------------------------------*/
131
132#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
133#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
134#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
135#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
136#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
137#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
138#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400139#define MPATHF_BIO_BASED 7 /* Device is bio-based? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141/*-----------------------------------------------
142 * Allocation routines
143 *-----------------------------------------------*/
144
145static struct pgpath *alloc_pgpath(void)
146{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700147 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Mike Anderson224cb3e2008-08-29 09:36:09 +0200149 if (pgpath) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -0500150 pgpath->is_active = true;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000151 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 return pgpath;
155}
156
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100157static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 kfree(pgpath);
160}
161
162static struct priority_group *alloc_priority_group(void)
163{
164 struct priority_group *pg;
165
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700166 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700168 if (pg)
169 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 return pg;
172}
173
174static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
175{
176 struct pgpath *pgpath, *tmp;
177
178 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
179 list_del(&pgpath->list);
180 dm_put_device(ti, pgpath->path.dev);
181 free_pgpath(pgpath);
182 }
183}
184
185static void free_priority_group(struct priority_group *pg,
186 struct dm_target *ti)
187{
188 struct path_selector *ps = &pg->ps;
189
190 if (ps->type) {
191 ps->type->destroy(ps);
192 dm_put_path_selector(ps->type);
193 }
194
195 free_pgpaths(&pg->pgpaths, ti);
196 kfree(pg);
197}
198
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400199static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq,
200 bool bio_based)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 struct multipath *m;
203
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700204 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 INIT_LIST_HEAD(&m->priority_groups);
207 spin_lock_init(&m->lock);
Mike Snitzer518257b2016-03-17 16:32:10 -0400208 set_bit(MPATHF_QUEUE_IO, &m->flags);
Mike Snitzer91e968a2016-03-17 17:10:15 -0400209 atomic_set(&m->nr_valid_paths, 0);
210 atomic_set(&m->pg_init_in_progress, 0);
211 atomic_set(&m->pg_init_count, 0);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000212 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000213 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000214 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000215 mutex_init(&m->work_mutex);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500216
217 m->mpio_pool = NULL;
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400218 if (!use_blk_mq && !bio_based) {
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500219 unsigned min_ios = dm_get_reserved_rq_based_ios();
220
221 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
222 if (!m->mpio_pool) {
223 kfree(m);
224 return NULL;
225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 }
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500227
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400228 if (bio_based) {
229 INIT_WORK(&m->process_queued_bios, process_queued_bios);
230 set_bit(MPATHF_BIO_BASED, &m->flags);
231 /*
232 * bio-based doesn't support any direct scsi_dh management;
233 * it just discovers if a scsi_dh is attached.
234 */
235 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
236 }
237
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700238 m->ti = ti;
239 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241
242 return m;
243}
244
245static void free_multipath(struct multipath *m)
246{
247 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
250 list_del(&pg->list);
251 free_priority_group(pg, m->ti);
252 }
253
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700254 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700255 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 mempool_destroy(m->mpio_pool);
257 kfree(m);
258}
259
Mike Snitzer2eff1922016-02-03 09:13:14 -0500260static struct dm_mpath_io *get_mpio(union map_info *info)
261{
262 return info->ptr;
263}
264
265static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100266{
267 struct dm_mpath_io *mpio;
268
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500269 if (!m->mpio_pool) {
270 /* Use blk-mq pdu memory requested via per_io_data_size */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500271 mpio = get_mpio(info);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500272 memset(mpio, 0, sizeof(*mpio));
273 return mpio;
274 }
275
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100276 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
277 if (!mpio)
Mike Snitzer2eff1922016-02-03 09:13:14 -0500278 return NULL;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100279
280 memset(mpio, 0, sizeof(*mpio));
281 info->ptr = mpio;
282
Mike Snitzer2eff1922016-02-03 09:13:14 -0500283 return mpio;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100284}
285
Mike Snitzer2eff1922016-02-03 09:13:14 -0500286static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100287{
Mike Snitzer2eff1922016-02-03 09:13:14 -0500288 /* Only needed for non blk-mq (.request_fn) multipath */
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500289 if (m->mpio_pool) {
290 struct dm_mpath_io *mpio = info->ptr;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100291
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500292 info->ptr = NULL;
293 mempool_free(mpio, m->mpio_pool);
294 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100295}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400297static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
298{
299 return dm_per_bio_data(bio, sizeof(struct dm_mpath_io));
300}
301
302static struct dm_mpath_io *set_mpio_bio(struct multipath *m, struct bio *bio)
303{
304 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
305
306 memset(mpio, 0, sizeof(*mpio));
307 dm_bio_record(&mpio->bio_details, bio);
308
309 return mpio;
310}
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312/*-----------------------------------------------
313 * Path selection
314 *-----------------------------------------------*/
315
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100316static int __pg_init_all_paths(struct multipath *m)
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000317{
318 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000319 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000320
Mike Snitzer91e968a2016-03-17 17:10:15 -0400321 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100322 return 0;
Hannes Reinecke17f4ff42014-02-28 15:33:42 +0100323
Mike Snitzer91e968a2016-03-17 17:10:15 -0400324 atomic_inc(&m->pg_init_count);
Mike Snitzer518257b2016-03-17 16:32:10 -0400325 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100326
327 /* Check here to reset pg_init_required */
328 if (!m->current_pg)
329 return 0;
330
Mike Snitzer518257b2016-03-17 16:32:10 -0400331 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000332 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
333 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000334 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
335 /* Skip failed paths */
336 if (!pgpath->is_active)
337 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000338 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
339 pg_init_delay))
Mike Snitzer91e968a2016-03-17 17:10:15 -0400340 atomic_inc(&m->pg_init_in_progress);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000341 }
Mike Snitzer91e968a2016-03-17 17:10:15 -0400342 return atomic_read(&m->pg_init_in_progress);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000343}
344
Mike Snitzer2da16102016-03-17 18:38:17 -0400345static int pg_init_all_paths(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
Mike Snitzer2da16102016-03-17 18:38:17 -0400347 int r;
348 unsigned long flags;
349
350 spin_lock_irqsave(&m->lock, flags);
351 r = __pg_init_all_paths(m);
352 spin_unlock_irqrestore(&m->lock, flags);
353
354 return r;
355}
356
357static void __switch_pg(struct multipath *m, struct priority_group *pg)
358{
359 m->current_pg = pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700362 if (m->hw_handler_name) {
Mike Snitzer518257b2016-03-17 16:32:10 -0400363 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
364 set_bit(MPATHF_QUEUE_IO, &m->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 } else {
Mike Snitzer518257b2016-03-17 16:32:10 -0400366 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
367 clear_bit(MPATHF_QUEUE_IO, &m->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100369
Mike Snitzer91e968a2016-03-17 17:10:15 -0400370 atomic_set(&m->pg_init_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371}
372
Mike Snitzer2da16102016-03-17 18:38:17 -0400373static struct pgpath *choose_path_in_pg(struct multipath *m,
374 struct priority_group *pg,
375 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Mike Snitzer2da16102016-03-17 18:38:17 -0400377 unsigned long flags;
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800378 struct dm_path *path;
Mike Snitzer2da16102016-03-17 18:38:17 -0400379 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Mike Snitzer90a43232016-02-17 21:29:17 -0500381 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 if (!path)
Mike Snitzer2da16102016-03-17 18:38:17 -0400383 return ERR_PTR(-ENXIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Mike Snitzer2da16102016-03-17 18:38:17 -0400385 pgpath = path_to_pgpath(path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Mike Snitzer2da16102016-03-17 18:38:17 -0400387 if (unlikely(lockless_dereference(m->current_pg) != pg)) {
388 /* Only update current_pgpath if pg changed */
389 spin_lock_irqsave(&m->lock, flags);
390 m->current_pgpath = pgpath;
391 __switch_pg(m, pg);
392 spin_unlock_irqrestore(&m->lock, flags);
393 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Mike Snitzer2da16102016-03-17 18:38:17 -0400395 return pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396}
397
Mike Snitzer2da16102016-03-17 18:38:17 -0400398static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399{
Mike Snitzer2da16102016-03-17 18:38:17 -0400400 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct priority_group *pg;
Mike Snitzer2da16102016-03-17 18:38:17 -0400402 struct pgpath *pgpath;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -0500403 bool bypassed = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Mike Snitzer91e968a2016-03-17 17:10:15 -0400405 if (!atomic_read(&m->nr_valid_paths)) {
Mike Snitzer518257b2016-03-17 16:32:10 -0400406 clear_bit(MPATHF_QUEUE_IO, &m->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 goto failed;
Benjamin Marzinski1f271972014-08-13 13:53:42 -0500408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 /* Were we instructed to switch PG? */
Mike Snitzer2da16102016-03-17 18:38:17 -0400411 if (lockless_dereference(m->next_pg)) {
412 spin_lock_irqsave(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 pg = m->next_pg;
Mike Snitzer2da16102016-03-17 18:38:17 -0400414 if (!pg) {
415 spin_unlock_irqrestore(&m->lock, flags);
416 goto check_current_pg;
417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 m->next_pg = NULL;
Mike Snitzer2da16102016-03-17 18:38:17 -0400419 spin_unlock_irqrestore(&m->lock, flags);
420 pgpath = choose_path_in_pg(m, pg, nr_bytes);
421 if (!IS_ERR_OR_NULL(pgpath))
422 return pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 }
424
425 /* Don't change PG until it has no remaining paths */
Mike Snitzer2da16102016-03-17 18:38:17 -0400426check_current_pg:
427 pg = lockless_dereference(m->current_pg);
428 if (pg) {
429 pgpath = choose_path_in_pg(m, pg, nr_bytes);
430 if (!IS_ERR_OR_NULL(pgpath))
431 return pgpath;
432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 /*
435 * Loop through priority groups until we find a valid path.
436 * First time we skip PGs marked 'bypassed'.
Mike Christief220fd42012-06-03 00:29:45 +0100437 * Second time we only try the ones we skipped, but set
438 * pg_init_delay_retry so we do not hammer controllers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 */
440 do {
441 list_for_each_entry(pg, &m->priority_groups, list) {
442 if (pg->bypassed == bypassed)
443 continue;
Mike Snitzer2da16102016-03-17 18:38:17 -0400444 pgpath = choose_path_in_pg(m, pg, nr_bytes);
445 if (!IS_ERR_OR_NULL(pgpath)) {
Mike Christief220fd42012-06-03 00:29:45 +0100446 if (!bypassed)
Mike Snitzer518257b2016-03-17 16:32:10 -0400447 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
Mike Snitzer2da16102016-03-17 18:38:17 -0400448 return pgpath;
Mike Christief220fd42012-06-03 00:29:45 +0100449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 }
451 } while (bypassed--);
452
453failed:
Mike Snitzer2da16102016-03-17 18:38:17 -0400454 spin_lock_irqsave(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 m->current_pgpath = NULL;
456 m->current_pg = NULL;
Mike Snitzer2da16102016-03-17 18:38:17 -0400457 spin_unlock_irqrestore(&m->lock, flags);
458
459 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460}
461
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800462/*
463 * Check whether bios must be queued in the device-mapper core rather
464 * than here in the target.
465 *
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800466 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
467 * same value then we are not between multipath_presuspend()
468 * and multipath_resume() calls and we have no need to check
469 * for the DMF_NOFLUSH_SUSPENDING flag.
470 */
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400471static bool __must_push_back(struct multipath *m)
472{
473 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
474 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
475 dm_noflush_suspending(m->ti));
476}
477
478static bool must_push_back_rq(struct multipath *m)
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800479{
Mike Snitzer518257b2016-03-17 16:32:10 -0400480 return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400481 __must_push_back(m));
482}
483
484static bool must_push_back_bio(struct multipath *m)
485{
486 return __must_push_back(m);
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800487}
488
Hannes Reinecke36fcffc2014-02-28 15:33:47 +0100489/*
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400490 * Map cloned requests (request-based multipath)
Hannes Reinecke36fcffc2014-02-28 15:33:47 +0100491 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500492static int __multipath_map(struct dm_target *ti, struct request *clone,
493 union map_info *map_context,
494 struct request *rq, struct request **__clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
Mike Snitzer7943bd62016-02-02 21:53:15 -0500496 struct multipath *m = ti->private;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100497 int r = DM_MAPIO_REQUEUE;
Mike Snitzere5863d92014-12-17 21:08:12 -0500498 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100500 struct block_device *bdev;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100501 struct dm_mpath_io *mpio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 /* Do we need to select a new pgpath? */
Mike Snitzer2da16102016-03-17 18:38:17 -0400504 pgpath = lockless_dereference(m->current_pgpath);
505 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
506 pgpath = choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100508 if (!pgpath) {
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400509 if (!must_push_back_rq(m))
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100510 r = -EIO; /* Failed */
Mike Snitzer2da16102016-03-17 18:38:17 -0400511 return r;
Mike Snitzer518257b2016-03-17 16:32:10 -0400512 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
513 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
Mike Snitzer2da16102016-03-17 18:38:17 -0400514 pg_init_all_paths(m);
515 return r;
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100516 }
Mike Snitzer6afbc012014-07-08 11:55:09 -0400517
Mike Snitzer2eff1922016-02-03 09:13:14 -0500518 mpio = set_mpio(m, map_context);
519 if (!mpio)
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100520 /* ENOMEM, requeue */
Mike Snitzer2da16102016-03-17 18:38:17 -0400521 return r;
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100522
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100523 mpio->pgpath = pgpath;
524 mpio->nr_bytes = nr_bytes;
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600525
526 bdev = pgpath->path.dev->bdev;
527
Mike Snitzere5863d92014-12-17 21:08:12 -0500528 if (clone) {
Mike Snitzerc5248f72016-02-20 14:02:49 -0500529 /*
530 * Old request-based interface: allocated clone is passed in.
531 * Used by: .request_fn stacked on .request_fn path(s).
532 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500533 clone->q = bdev_get_queue(bdev);
534 clone->rq_disk = bdev->bd_disk;
535 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
536 } else {
Mike Snitzereca7ee62016-02-20 13:45:38 -0500537 /*
538 * blk-mq request-based interface; used by both:
539 * .request_fn stacked on blk-mq path(s) and
540 * blk-mq stacked on blk-mq path(s).
541 */
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500542 *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
543 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400544 if (IS_ERR(*__clone)) {
Mike Snitzere5863d92014-12-17 21:08:12 -0500545 /* ENOMEM, requeue */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500546 clear_request_fn_mpio(m, map_context);
Mike Snitzere5863d92014-12-17 21:08:12 -0500547 return r;
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400548 }
Mike Snitzere5863d92014-12-17 21:08:12 -0500549 (*__clone)->bio = (*__clone)->biotail = NULL;
550 (*__clone)->rq_disk = bdev->bd_disk;
551 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
552 }
553
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100554 if (pgpath->pg->ps.type->start_io)
555 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
556 &pgpath->path,
557 nr_bytes);
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600558 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559}
560
Mike Snitzere5863d92014-12-17 21:08:12 -0500561static int multipath_map(struct dm_target *ti, struct request *clone,
562 union map_info *map_context)
563{
564 return __multipath_map(ti, clone, map_context, NULL, NULL);
565}
566
567static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
568 union map_info *map_context,
569 struct request **clone)
570{
571 return __multipath_map(ti, NULL, map_context, rq, clone);
572}
573
574static void multipath_release_clone(struct request *clone)
575{
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500576 blk_mq_free_request(clone);
Mike Snitzere5863d92014-12-17 21:08:12 -0500577}
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579/*
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400580 * Map cloned bios (bio-based multipath)
581 */
582static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
583{
584 size_t nr_bytes = bio->bi_iter.bi_size;
585 struct pgpath *pgpath;
586 unsigned long flags;
587 bool queue_io;
588
589 /* Do we need to select a new pgpath? */
590 pgpath = lockless_dereference(m->current_pgpath);
591 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
592 if (!pgpath || !queue_io)
593 pgpath = choose_pgpath(m, nr_bytes);
594
595 if ((pgpath && queue_io) ||
596 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
597 /* Queue for the daemon to resubmit */
598 spin_lock_irqsave(&m->lock, flags);
599 bio_list_add(&m->queued_bios, bio);
600 spin_unlock_irqrestore(&m->lock, flags);
601 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
602 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
603 pg_init_all_paths(m);
604 else if (!queue_io)
605 queue_work(kmultipathd, &m->process_queued_bios);
606 return DM_MAPIO_SUBMITTED;
607 }
608
609 if (!pgpath) {
610 if (!must_push_back_bio(m))
611 return -EIO;
612 return DM_MAPIO_REQUEUE;
613 }
614
615 mpio->pgpath = pgpath;
616 mpio->nr_bytes = nr_bytes;
617
618 bio->bi_error = 0;
619 bio->bi_bdev = pgpath->path.dev->bdev;
620 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
621
622 if (pgpath->pg->ps.type->start_io)
623 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
624 &pgpath->path,
625 nr_bytes);
626 return DM_MAPIO_REMAPPED;
627}
628
629static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
630{
631 struct multipath *m = ti->private;
632 struct dm_mpath_io *mpio = set_mpio_bio(m, bio);
633
634 return __multipath_map_bio(m, bio, mpio);
635}
636
637static void process_queued_bios_list(struct multipath *m)
638{
639 if (test_bit(MPATHF_BIO_BASED, &m->flags))
640 queue_work(kmultipathd, &m->process_queued_bios);
641}
642
643static void process_queued_bios(struct work_struct *work)
644{
645 int r;
646 unsigned long flags;
647 struct bio *bio;
648 struct bio_list bios;
649 struct blk_plug plug;
650 struct multipath *m =
651 container_of(work, struct multipath, process_queued_bios);
652
653 bio_list_init(&bios);
654
655 spin_lock_irqsave(&m->lock, flags);
656
657 if (bio_list_empty(&m->queued_bios)) {
658 spin_unlock_irqrestore(&m->lock, flags);
659 return;
660 }
661
662 bio_list_merge(&bios, &m->queued_bios);
663 bio_list_init(&m->queued_bios);
664
665 spin_unlock_irqrestore(&m->lock, flags);
666
667 blk_start_plug(&plug);
668 while ((bio = bio_list_pop(&bios))) {
669 r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
670 if (r < 0 || r == DM_MAPIO_REQUEUE) {
671 bio->bi_error = r;
672 bio_endio(bio);
673 } else if (r == DM_MAPIO_REMAPPED)
674 generic_make_request(bio);
675 }
676 blk_finish_plug(&plug);
677}
678
679/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 * If we run out of usable paths, should we queue I/O or error it?
681 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -0500682static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
683 bool save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 unsigned long flags;
686
687 spin_lock_irqsave(&m->lock, flags);
688
Mike Snitzer518257b2016-03-17 16:32:10 -0400689 if (save_old_value) {
690 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
691 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
692 else
693 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
694 } else {
695 if (queue_if_no_path)
696 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
697 else
698 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
699 }
700 if (queue_if_no_path)
701 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700702 else
Mike Snitzer518257b2016-03-17 16:32:10 -0400703 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 spin_unlock_irqrestore(&m->lock, flags);
706
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400707 if (!queue_if_no_path) {
Hannes Reinecke63d832c2014-05-26 14:45:39 +0200708 dm_table_run_md_queue_async(m->ti->table);
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400709 process_queued_bios_list(m);
710 }
Hannes Reinecke63d832c2014-05-26 14:45:39 +0200711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 return 0;
713}
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715/*
716 * An event is triggered whenever a path is taken out of use.
717 * Includes path failure and PG bypass.
718 */
David Howellsc4028952006-11-22 14:57:56 +0000719static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720{
David Howellsc4028952006-11-22 14:57:56 +0000721 struct multipath *m =
722 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 dm_table_event(m->ti->table);
725}
726
727/*-----------------------------------------------------------------
728 * Constructor/argument parsing:
729 * <#multipath feature args> [<arg>]*
730 * <#hw_handler args> [hw_handler [<arg>]*]
731 * <#priority groups>
732 * <initial priority group>
733 * [<selector> <#selector args> [<arg>]*
734 * <#paths> <#per-path selector args>
735 * [<path> [<arg>]* ]+ ]+
736 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100737static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 struct dm_target *ti)
739{
740 int r;
741 struct path_selector_type *pst;
742 unsigned ps_argc;
743
Mike Snitzer498f0102011-08-02 12:32:04 +0100744 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700745 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 };
747
Mike Snitzer498f0102011-08-02 12:32:04 +0100748 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700750 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return -EINVAL;
752 }
753
Mike Snitzer498f0102011-08-02 12:32:04 +0100754 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100755 if (r) {
756 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100758 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760 r = pst->create(&pg->ps, ps_argc, as->argv);
761 if (r) {
762 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700763 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return r;
765 }
766
767 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100768 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 return 0;
771}
772
Mike Snitzer498f0102011-08-02 12:32:04 +0100773static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 struct dm_target *ti)
775{
776 int r;
777 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700778 struct multipath *m = ti->private;
Mike Snitzera58a9352012-07-27 15:08:04 +0100779 struct request_queue *q = NULL;
780 const char *attached_handler_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 /* we need at least a path arg */
783 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700784 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100785 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 }
787
788 p = alloc_pgpath();
789 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100790 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
Mike Snitzer498f0102011-08-02 12:32:04 +0100792 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000793 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700795 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 goto bad;
797 }
798
Mike Snitzer518257b2016-03-17 16:32:10 -0400799 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
Mike Snitzera58a9352012-07-27 15:08:04 +0100800 q = bdev_get_queue(p->path.dev->bdev);
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100801
Mike Snitzer518257b2016-03-17 16:32:10 -0400802 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200803retain:
Mike Snitzera58a9352012-07-27 15:08:04 +0100804 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
805 if (attached_handler_name) {
806 /*
807 * Reset hw_handler_name to match the attached handler
808 * and clear any hw_handler_params associated with the
809 * ignored handler.
810 *
811 * NB. This modifies the table line to show the actual
812 * handler instead of the original table passed in.
813 */
814 kfree(m->hw_handler_name);
815 m->hw_handler_name = attached_handler_name;
816
817 kfree(m->hw_handler_params);
818 m->hw_handler_params = NULL;
819 }
820 }
821
822 if (m->hw_handler_name) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100823 r = scsi_dh_attach(q, m->hw_handler_name);
824 if (r == -EBUSY) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200825 char b[BDEVNAME_SIZE];
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100826
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200827 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
828 bdevname(p->path.dev->bdev, b));
829 goto retain;
830 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700831 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100832 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700833 dm_put_device(ti, p->path.dev);
834 goto bad;
835 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700836
837 if (m->hw_handler_params) {
838 r = scsi_dh_set_params(q, m->hw_handler_params);
839 if (r < 0) {
840 ti->error = "unable to set hardware "
841 "handler parameters";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700842 dm_put_device(ti, p->path.dev);
843 goto bad;
844 }
845 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700846 }
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
849 if (r) {
850 dm_put_device(ti, p->path.dev);
851 goto bad;
852 }
853
854 return p;
855
856 bad:
857 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100858 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859}
860
Mike Snitzer498f0102011-08-02 12:32:04 +0100861static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700862 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Mike Snitzer498f0102011-08-02 12:32:04 +0100864 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700865 {1, 1024, "invalid number of paths"},
866 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 };
868
869 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100870 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700872 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874 if (as->argc < 2) {
875 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100876 ti->error = "not enough priority group arguments";
877 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 }
879
880 pg = alloc_priority_group();
881 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700882 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100883 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 }
885 pg->m = m;
886
887 r = parse_path_selector(as, pg, ti);
888 if (r)
889 goto bad;
890
891 /*
892 * read the paths
893 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100894 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 if (r)
896 goto bad;
897
Mike Snitzer498f0102011-08-02 12:32:04 +0100898 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 if (r)
900 goto bad;
901
Mike Snitzer498f0102011-08-02 12:32:04 +0100902 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 for (i = 0; i < pg->nr_pgpaths; i++) {
904 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100905 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
Mike Snitzer498f0102011-08-02 12:32:04 +0100907 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100908 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100909 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Mike Snitzer498f0102011-08-02 12:32:04 +0100913 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 path_args.argv = as->argv;
915
916 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100917 if (IS_ERR(pgpath)) {
918 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 pgpath->pg = pg;
923 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100924 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 }
926
927 return pg;
928
929 bad:
930 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100931 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932}
933
Mike Snitzer498f0102011-08-02 12:32:04 +0100934static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700937 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700938 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
Mike Snitzer498f0102011-08-02 12:32:04 +0100940 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700941 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 };
943
Mike Snitzer498f0102011-08-02 12:32:04 +0100944 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 return -EINVAL;
946
947 if (!hw_argc)
948 return 0;
949
Mike Snitzer76e33fe2016-05-19 16:15:14 -0400950 if (test_bit(MPATHF_BIO_BASED, &m->flags)) {
951 dm_consume_args(as, hw_argc);
952 DMERR("bio-based multipath doesn't allow hardware handler args");
953 return 0;
954 }
955
Mike Snitzer498f0102011-08-02 12:32:04 +0100956 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000957
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700958 if (hw_argc > 1) {
959 char *p;
960 int i, j, len = 4;
961
962 for (i = 0; i <= hw_argc - 2; i++)
963 len += strlen(as->argv[i]) + 1;
964 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
965 if (!p) {
966 ti->error = "memory allocation failed";
967 ret = -ENOMEM;
968 goto fail;
969 }
970 j = sprintf(p, "%d", hw_argc - 1);
971 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
972 j = sprintf(p, "%s", as->argv[i]);
973 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100974 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700977fail:
978 kfree(m->hw_handler_name);
979 m->hw_handler_name = NULL;
980 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981}
982
Mike Snitzer498f0102011-08-02 12:32:04 +0100983static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984{
985 int r;
986 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700987 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100988 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
Mike Snitzer498f0102011-08-02 12:32:04 +0100990 static struct dm_arg _args[] = {
Mike Snitzera58a9352012-07-27 15:08:04 +0100991 {0, 6, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100992 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000993 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 };
995
Mike Snitzer498f0102011-08-02 12:32:04 +0100996 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 if (r)
998 return -EINVAL;
999
1000 if (!argc)
1001 return 0;
1002
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001003 do {
Mike Snitzer498f0102011-08-02 12:32:04 +01001004 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001005 argc--;
1006
Mike Snitzer498f0102011-08-02 12:32:04 +01001007 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001008 r = queue_if_no_path(m, true, false);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001009 continue;
1010 }
1011
Mike Snitzera58a9352012-07-27 15:08:04 +01001012 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
Mike Snitzer518257b2016-03-17 16:32:10 -04001013 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
Mike Snitzera58a9352012-07-27 15:08:04 +01001014 continue;
1015 }
1016
Mike Snitzer498f0102011-08-02 12:32:04 +01001017 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001018 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001019 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001020 argc--;
1021 continue;
1022 }
1023
Mike Snitzer498f0102011-08-02 12:32:04 +01001024 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001025 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001026 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001027 argc--;
1028 continue;
1029 }
1030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001032 r = -EINVAL;
1033 } while (argc && !r);
1034
1035 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036}
1037
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001038static int __multipath_ctr(struct dm_target *ti, unsigned int argc,
1039 char **argv, bool bio_based)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Mike Snitzer498f0102011-08-02 12:32:04 +01001041 /* target arguments */
1042 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +00001043 {0, 1024, "invalid number of priority groups"},
1044 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 };
1046
1047 int r;
1048 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +01001049 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 unsigned pg_count = 0;
1051 unsigned next_pg_num;
Mike Snitzer8637a6b2016-01-31 12:08:36 -05001052 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
1054 as.argc = argc;
1055 as.argv = argv;
1056
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001057 m = alloc_multipath(ti, use_blk_mq, bio_based);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001059 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 return -EINVAL;
1061 }
1062
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -07001063 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 if (r)
1065 goto bad;
1066
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -07001067 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 if (r)
1069 goto bad;
1070
Mike Snitzer498f0102011-08-02 12:32:04 +01001071 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 if (r)
1073 goto bad;
1074
Mike Snitzer498f0102011-08-02 12:32:04 +01001075 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 if (r)
1077 goto bad;
1078
Mike Snitzera490a072011-03-24 13:54:33 +00001079 if ((!m->nr_priority_groups && next_pg_num) ||
1080 (m->nr_priority_groups && !next_pg_num)) {
1081 ti->error = "invalid initial priority group";
1082 r = -EINVAL;
1083 goto bad;
1084 }
1085
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /* parse the priority groups */
1087 while (as.argc) {
1088 struct priority_group *pg;
Mike Snitzer91e968a2016-03-17 17:10:15 -04001089 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -07001091 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +01001092 if (IS_ERR(pg)) {
1093 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 goto bad;
1095 }
1096
Mike Snitzer91e968a2016-03-17 17:10:15 -04001097 nr_valid_paths += pg->nr_pgpaths;
1098 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 list_add_tail(&pg->list, &m->priority_groups);
1101 pg_count++;
1102 pg->pg_num = pg_count;
1103 if (!--next_pg_num)
1104 m->next_pg = pg;
1105 }
1106
1107 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001108 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 r = -EINVAL;
1110 goto bad;
1111 }
1112
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001113 ti->num_flush_bios = 1;
1114 ti->num_discard_bios = 1;
Mike Snitzer042bcef2013-05-10 14:37:16 +01001115 ti->num_write_same_bios = 1;
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001116 if (use_blk_mq || bio_based)
Mike Snitzer8637a6b2016-01-31 12:08:36 -05001117 ti->per_io_data_size = sizeof(struct dm_mpath_io);
Mikulas Patocka86279212009-06-22 10:12:24 +01001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 return 0;
1120
1121 bad:
1122 free_multipath(m);
1123 return r;
1124}
1125
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001126static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1127{
1128 return __multipath_ctr(ti, argc, argv, false);
1129}
1130
1131static int multipath_bio_ctr(struct dm_target *ti, unsigned argc, char **argv)
1132{
1133 return __multipath_ctr(ti, argc, argv, true);
1134}
1135
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001136static void multipath_wait_for_pg_init_completion(struct multipath *m)
1137{
1138 DECLARE_WAITQUEUE(wait, current);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001139
1140 add_wait_queue(&m->pg_init_wait, &wait);
1141
1142 while (1) {
1143 set_current_state(TASK_UNINTERRUPTIBLE);
1144
Mike Snitzer91e968a2016-03-17 17:10:15 -04001145 if (!atomic_read(&m->pg_init_in_progress))
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001146 break;
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001147
1148 io_schedule();
1149 }
1150 set_current_state(TASK_RUNNING);
1151
1152 remove_wait_queue(&m->pg_init_wait, &wait);
1153}
1154
1155static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156{
Mike Snitzer518257b2016-03-17 16:32:10 -04001157 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1158 smp_mb__after_atomic();
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +00001159
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001160 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001161 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -07001162 flush_workqueue(kmultipathd);
Tejun Heo43829732012-08-20 14:51:24 -07001163 flush_work(&m->trigger_event);
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +00001164
Mike Snitzer518257b2016-03-17 16:32:10 -04001165 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1166 smp_mb__after_atomic();
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001167}
1168
1169static void multipath_dtr(struct dm_target *ti)
1170{
1171 struct multipath *m = ti->private;
1172
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001173 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 free_multipath(m);
1175}
1176
1177/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 * Take a path out of use.
1179 */
1180static int fail_path(struct pgpath *pgpath)
1181{
1182 unsigned long flags;
1183 struct multipath *m = pgpath->pg->m;
1184
1185 spin_lock_irqsave(&m->lock, flags);
1186
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001187 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 goto out;
1189
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001190 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001193 pgpath->is_active = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 pgpath->fail_count++;
1195
Mike Snitzer91e968a2016-03-17 17:10:15 -04001196 atomic_dec(&m->nr_valid_paths);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 if (pgpath == m->current_pgpath)
1199 m->current_pgpath = NULL;
1200
Mike Andersonb15546f2007-10-19 22:48:02 +01001201 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
Mike Snitzer91e968a2016-03-17 17:10:15 -04001202 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
Mike Andersonb15546f2007-10-19 22:48:02 +01001203
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001204 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206out:
1207 spin_unlock_irqrestore(&m->lock, flags);
1208
1209 return 0;
1210}
1211
1212/*
1213 * Reinstate a previously-failed path
1214 */
1215static int reinstate_path(struct pgpath *pgpath)
1216{
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001217 int r = 0, run_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 unsigned long flags;
1219 struct multipath *m = pgpath->pg->m;
Mike Snitzer91e968a2016-03-17 17:10:15 -04001220 unsigned nr_valid_paths;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 spin_lock_irqsave(&m->lock, flags);
1223
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001224 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 goto out;
1226
Mike Snitzerec31f3f2016-02-20 12:49:43 -05001227 DMWARN("Reinstating path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
1229 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1230 if (r)
1231 goto out;
1232
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001233 pgpath->is_active = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Mike Snitzer91e968a2016-03-17 17:10:15 -04001235 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1236 if (nr_valid_paths == 1) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001237 m->current_pgpath = NULL;
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001238 run_queue = 1;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001239 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001240 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Mike Snitzer91e968a2016-03-17 17:10:15 -04001241 atomic_inc(&m->pg_init_in_progress);
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Mike Andersonb15546f2007-10-19 22:48:02 +01001244 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
Mike Snitzer91e968a2016-03-17 17:10:15 -04001245 pgpath->path.dev->name, nr_valid_paths);
Mike Andersonb15546f2007-10-19 22:48:02 +01001246
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001247 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
1249out:
1250 spin_unlock_irqrestore(&m->lock, flags);
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001251 if (run_queue) {
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001252 dm_table_run_md_queue_async(m->ti->table);
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001253 process_queued_bios_list(m);
1254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
1256 return r;
1257}
1258
1259/*
1260 * Fail or reinstate all paths that match the provided struct dm_dev.
1261 */
1262static int action_dev(struct multipath *m, struct dm_dev *dev,
1263 action_fn action)
1264{
Mike Snitzer19040c02011-03-24 13:54:31 +00001265 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 struct pgpath *pgpath;
1267 struct priority_group *pg;
1268
1269 list_for_each_entry(pg, &m->priority_groups, list) {
1270 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1271 if (pgpath->path.dev == dev)
1272 r = action(pgpath);
1273 }
1274 }
1275
1276 return r;
1277}
1278
1279/*
1280 * Temporarily try to avoid having to use the specified PG
1281 */
1282static void bypass_pg(struct multipath *m, struct priority_group *pg,
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001283 bool bypassed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284{
1285 unsigned long flags;
1286
1287 spin_lock_irqsave(&m->lock, flags);
1288
1289 pg->bypassed = bypassed;
1290 m->current_pgpath = NULL;
1291 m->current_pg = NULL;
1292
1293 spin_unlock_irqrestore(&m->lock, flags);
1294
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001295 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296}
1297
1298/*
1299 * Switch to using the specified PG from the next I/O that gets mapped
1300 */
1301static int switch_pg_num(struct multipath *m, const char *pgstr)
1302{
1303 struct priority_group *pg;
1304 unsigned pgnum;
1305 unsigned long flags;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001306 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001308 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 (pgnum > m->nr_priority_groups)) {
1310 DMWARN("invalid PG number supplied to switch_pg_num");
1311 return -EINVAL;
1312 }
1313
1314 spin_lock_irqsave(&m->lock, flags);
1315 list_for_each_entry(pg, &m->priority_groups, list) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001316 pg->bypassed = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 if (--pgnum)
1318 continue;
1319
1320 m->current_pgpath = NULL;
1321 m->current_pg = NULL;
1322 m->next_pg = pg;
1323 }
1324 spin_unlock_irqrestore(&m->lock, flags);
1325
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001326 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 return 0;
1328}
1329
1330/*
1331 * Set/clear bypassed status of a PG.
1332 * PGs are numbered upwards from 1 in the order they were declared.
1333 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001334static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335{
1336 struct priority_group *pg;
1337 unsigned pgnum;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001338 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001340 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 (pgnum > m->nr_priority_groups)) {
1342 DMWARN("invalid PG number supplied to bypass_pg");
1343 return -EINVAL;
1344 }
1345
1346 list_for_each_entry(pg, &m->priority_groups, list) {
1347 if (!--pgnum)
1348 break;
1349 }
1350
1351 bypass_pg(m, pg, bypassed);
1352 return 0;
1353}
1354
1355/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001356 * Should we retry pg_init immediately?
1357 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001358static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001359{
1360 unsigned long flags;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001361 bool limit_reached = false;
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001362
1363 spin_lock_irqsave(&m->lock, flags);
1364
Mike Snitzer91e968a2016-03-17 17:10:15 -04001365 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1366 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
Mike Snitzer518257b2016-03-17 16:32:10 -04001367 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001368 else
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001369 limit_reached = true;
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001370
1371 spin_unlock_irqrestore(&m->lock, flags);
1372
1373 return limit_reached;
1374}
1375
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001376static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001377{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001378 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001379 struct priority_group *pg = pgpath->pg;
1380 struct multipath *m = pg->m;
1381 unsigned long flags;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001382 bool delay_retry = false;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001383
1384 /* device or driver problems */
1385 switch (errors) {
1386 case SCSI_DH_OK:
1387 break;
1388 case SCSI_DH_NOSYS:
1389 if (!m->hw_handler_name) {
1390 errors = 0;
1391 break;
1392 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001393 DMERR("Could not failover the device: Handler scsi_dh_%s "
1394 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001395 /*
1396 * Fail path for now, so we do not ping pong
1397 */
1398 fail_path(pgpath);
1399 break;
1400 case SCSI_DH_DEV_TEMP_BUSY:
1401 /*
1402 * Probably doing something like FW upgrade on the
1403 * controller so try the other pg.
1404 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001405 bypass_pg(m, pg, true);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001406 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001407 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001408 /* Wait before retrying. */
1409 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001410 case SCSI_DH_IMM_RETRY:
1411 case SCSI_DH_RES_TEMP_UNAVAIL:
1412 if (pg_init_limit_reached(m, pgpath))
1413 fail_path(pgpath);
1414 errors = 0;
1415 break;
Mike Snitzerec31f3f2016-02-20 12:49:43 -05001416 case SCSI_DH_DEV_OFFLINED:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001417 default:
1418 /*
1419 * We probably do not want to fail the path for a device
1420 * error, but this is what the old dm did. In future
1421 * patches we can do more advanced handling.
1422 */
1423 fail_path(pgpath);
1424 }
1425
1426 spin_lock_irqsave(&m->lock, flags);
1427 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001428 if (pgpath == m->current_pgpath) {
1429 DMERR("Could not failover device. Error %d.", errors);
1430 m->current_pgpath = NULL;
1431 m->current_pg = NULL;
1432 }
Mike Snitzer518257b2016-03-17 16:32:10 -04001433 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001434 pg->bypassed = false;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001435
Mike Snitzer91e968a2016-03-17 17:10:15 -04001436 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001437 /* Activations of other paths are still on going */
1438 goto out;
1439
Mike Snitzer518257b2016-03-17 16:32:10 -04001440 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1441 if (delay_retry)
1442 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1443 else
1444 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1445
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001446 if (__pg_init_all_paths(m))
1447 goto out;
1448 }
Mike Snitzer518257b2016-03-17 16:32:10 -04001449 clear_bit(MPATHF_QUEUE_IO, &m->flags);
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001450
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001451 process_queued_bios_list(m);
1452
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001453 /*
1454 * Wake up any thread waiting to suspend.
1455 */
1456 wake_up(&m->pg_init_wait);
1457
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001458out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001459 spin_unlock_irqrestore(&m->lock, flags);
1460}
1461
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001462static void activate_path(struct work_struct *work)
1463{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001464 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001465 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001466
Hannes Reinecke3a017502014-02-28 15:33:49 +01001467 if (pgpath->is_active)
1468 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1469 pg_init_done, pgpath);
1470 else
1471 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001472}
1473
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001474static int noretry_error(int error)
1475{
1476 switch (error) {
1477 case -EOPNOTSUPP:
1478 case -EREMOTEIO:
1479 case -EILSEQ:
1480 case -ENODATA:
Jun'ichi Nomuracc9d3c32013-09-13 14:54:30 +09001481 case -ENOSPC:
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001482 return 1;
1483 }
1484
1485 /* Anything else could be a path failure, so should be retried */
1486 return 0;
1487}
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489/*
1490 * end_io handling
1491 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001492static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001493 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001495 /*
1496 * We don't queue any clone request inside the multipath target
1497 * during end I/O handling, since those clone requests don't have
1498 * bio clones. If we queue them inside the multipath target,
1499 * we need to make bio clones, that requires memory allocation.
Mike Snitzer4cc96132016-05-12 16:28:10 -04001500 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001501 * don't have bio clones.)
1502 * Instead of queueing the clone request here, we queue the original
1503 * request into dm core, which will remake a clone request and
1504 * clone bios for it and resubmit it later.
1505 */
1506 int r = DM_ENDIO_REQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001508 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 return 0; /* I/O complete */
1510
Mike Snitzer7eee4ae2014-06-02 15:50:06 -04001511 if (noretry_error(error))
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001512 return error;
1513
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001514 if (mpio->pgpath)
1515 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516
Mike Snitzer91e968a2016-03-17 17:10:15 -04001517 if (!atomic_read(&m->nr_valid_paths)) {
Mike Snitzer518257b2016-03-17 16:32:10 -04001518 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001519 if (!must_push_back_rq(m))
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001520 r = -EIO;
1521 } else {
1522 if (error == -EBADE)
1523 r = error;
1524 }
1525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001527 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528}
1529
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001530static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 int error, union map_info *map_context)
1532{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001533 struct multipath *m = ti->private;
Mike Snitzer2eff1922016-02-03 09:13:14 -05001534 struct dm_mpath_io *mpio = get_mpio(map_context);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001535 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 struct path_selector *ps;
1537 int r;
1538
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001539 BUG_ON(!mpio);
1540
Mike Snitzer2eff1922016-02-03 09:13:14 -05001541 r = do_end_io(m, clone, error, mpio);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001542 pgpath = mpio->pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 if (pgpath) {
1544 ps = &pgpath->pg->ps;
1545 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001546 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 }
Mike Snitzer2eff1922016-02-03 09:13:14 -05001548 clear_request_fn_mpio(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550 return r;
1551}
1552
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001553static int do_end_io_bio(struct multipath *m, struct bio *clone,
1554 int error, struct dm_mpath_io *mpio)
1555{
1556 unsigned long flags;
1557
1558 if (!error)
1559 return 0; /* I/O complete */
1560
1561 if (noretry_error(error))
1562 return error;
1563
1564 if (mpio->pgpath)
1565 fail_path(mpio->pgpath);
1566
1567 if (!atomic_read(&m->nr_valid_paths)) {
1568 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1569 if (!must_push_back_bio(m))
1570 return -EIO;
1571 return DM_ENDIO_REQUEUE;
1572 } else {
1573 if (error == -EBADE)
1574 return error;
1575 }
1576 }
1577
1578 /* Queue for the daemon to resubmit */
1579 dm_bio_restore(&mpio->bio_details, clone);
1580
1581 spin_lock_irqsave(&m->lock, flags);
1582 bio_list_add(&m->queued_bios, clone);
1583 spin_unlock_irqrestore(&m->lock, flags);
1584 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1585 queue_work(kmultipathd, &m->process_queued_bios);
1586
1587 return DM_ENDIO_INCOMPLETE;
1588}
1589
1590static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
1591{
1592 struct multipath *m = ti->private;
1593 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1594 struct pgpath *pgpath;
1595 struct path_selector *ps;
1596 int r;
1597
1598 BUG_ON(!mpio);
1599
1600 r = do_end_io_bio(m, clone, error, mpio);
1601 pgpath = mpio->pgpath;
1602 if (pgpath) {
1603 ps = &pgpath->pg->ps;
1604 if (ps->type->end_io)
1605 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1606 }
1607
1608 return r;
1609}
1610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611/*
1612 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001613 * the last path fails we must error any remaining I/O.
1614 * Note that if the freeze_bdev fails while suspending, the
1615 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 */
1617static void multipath_presuspend(struct dm_target *ti)
1618{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001619 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001621 queue_if_no_path(m, false, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622}
1623
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001624static void multipath_postsuspend(struct dm_target *ti)
1625{
Mike Anderson6380f262009-12-10 23:52:21 +00001626 struct multipath *m = ti->private;
1627
1628 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001629 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001630 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001631}
1632
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001633/*
1634 * Restore the queue_if_no_path setting.
1635 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636static void multipath_resume(struct dm_target *ti)
1637{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001638 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
Mike Snitzer518257b2016-03-17 16:32:10 -04001640 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
1641 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1642 else
1643 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1644 smp_mb__after_atomic();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645}
1646
1647/*
1648 * Info output has the following format:
1649 * num_multipath_feature_args [multipath_feature_args]*
1650 * num_handler_status_args [handler_status_args]*
1651 * num_groups init_group_number
1652 * [A|D|E num_ps_status_args [ps_status_args]*
1653 * num_paths num_selector_args
1654 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1655 *
1656 * Table output has the following format (identical to the constructor string):
1657 * num_feature_args [features_args]*
1658 * num_handler_args hw_handler [hw_handler_args]*
1659 * num_groups init_group_number
1660 * [priority selector-name num_ps_args [ps_args]*
1661 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1662 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001663static void multipath_status(struct dm_target *ti, status_type_t type,
1664 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
1666 int sz = 0;
1667 unsigned long flags;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001668 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 struct priority_group *pg;
1670 struct pgpath *p;
1671 unsigned pg_num;
1672 char state;
1673
1674 spin_lock_irqsave(&m->lock, flags);
1675
1676 /* Features */
1677 if (type == STATUSTYPE_INFO)
Mike Snitzer91e968a2016-03-17 17:10:15 -04001678 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1679 atomic_read(&m->pg_init_count));
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001680 else {
Mike Snitzer518257b2016-03-17 16:32:10 -04001681 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001682 (m->pg_init_retries > 0) * 2 +
Mike Snitzera58a9352012-07-27 15:08:04 +01001683 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
Mike Snitzer518257b2016-03-17 16:32:10 -04001684 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
1685 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001686 DMEMIT("queue_if_no_path ");
1687 if (m->pg_init_retries)
1688 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001689 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1690 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Mike Snitzer518257b2016-03-17 16:32:10 -04001691 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
Mike Snitzera58a9352012-07-27 15:08:04 +01001692 DMEMIT("retain_attached_hw_handler ");
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001693 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001695 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 DMEMIT("0 ");
1697 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001698 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
1700 DMEMIT("%u ", m->nr_priority_groups);
1701
1702 if (m->next_pg)
1703 pg_num = m->next_pg->pg_num;
1704 else if (m->current_pg)
1705 pg_num = m->current_pg->pg_num;
1706 else
Mike Snitzera490a072011-03-24 13:54:33 +00001707 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 DMEMIT("%u ", pg_num);
1710
1711 switch (type) {
1712 case STATUSTYPE_INFO:
1713 list_for_each_entry(pg, &m->priority_groups, list) {
1714 if (pg->bypassed)
1715 state = 'D'; /* Disabled */
1716 else if (pg == m->current_pg)
1717 state = 'A'; /* Currently Active */
1718 else
1719 state = 'E'; /* Enabled */
1720
1721 DMEMIT("%c ", state);
1722
1723 if (pg->ps.type->status)
1724 sz += pg->ps.type->status(&pg->ps, NULL, type,
1725 result + sz,
1726 maxlen - sz);
1727 else
1728 DMEMIT("0 ");
1729
1730 DMEMIT("%u %u ", pg->nr_pgpaths,
1731 pg->ps.type->info_args);
1732
1733 list_for_each_entry(p, &pg->pgpaths, list) {
1734 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001735 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 p->fail_count);
1737 if (pg->ps.type->status)
1738 sz += pg->ps.type->status(&pg->ps,
1739 &p->path, type, result + sz,
1740 maxlen - sz);
1741 }
1742 }
1743 break;
1744
1745 case STATUSTYPE_TABLE:
1746 list_for_each_entry(pg, &m->priority_groups, list) {
1747 DMEMIT("%s ", pg->ps.type->name);
1748
1749 if (pg->ps.type->status)
1750 sz += pg->ps.type->status(&pg->ps, NULL, type,
1751 result + sz,
1752 maxlen - sz);
1753 else
1754 DMEMIT("0 ");
1755
1756 DMEMIT("%u %u ", pg->nr_pgpaths,
1757 pg->ps.type->table_args);
1758
1759 list_for_each_entry(p, &pg->pgpaths, list) {
1760 DMEMIT("%s ", p->path.dev->name);
1761 if (pg->ps.type->status)
1762 sz += pg->ps.type->status(&pg->ps,
1763 &p->path, type, result + sz,
1764 maxlen - sz);
1765 }
1766 }
1767 break;
1768 }
1769
1770 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772
1773static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1774{
Mike Anderson6380f262009-12-10 23:52:21 +00001775 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 struct dm_dev *dev;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001777 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 action_fn action;
1779
Mike Anderson6380f262009-12-10 23:52:21 +00001780 mutex_lock(&m->work_mutex);
1781
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001782 if (dm_suspended(ti)) {
1783 r = -EBUSY;
1784 goto out;
1785 }
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001788 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001789 r = queue_if_no_path(m, true, false);
Mike Anderson6380f262009-12-10 23:52:21 +00001790 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001791 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001792 r = queue_if_no_path(m, false, false);
Mike Anderson6380f262009-12-10 23:52:21 +00001793 goto out;
1794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 }
1796
Mike Anderson6380f262009-12-10 23:52:21 +00001797 if (argc != 2) {
Jose Castilloa356e422014-01-29 17:52:45 +01001798 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
Mike Anderson6380f262009-12-10 23:52:21 +00001799 goto out;
1800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
Mike Snitzer498f0102011-08-02 12:32:04 +01001802 if (!strcasecmp(argv[0], "disable_group")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001803 r = bypass_pg_num(m, argv[1], true);
Mike Anderson6380f262009-12-10 23:52:21 +00001804 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001805 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001806 r = bypass_pg_num(m, argv[1], false);
Mike Anderson6380f262009-12-10 23:52:21 +00001807 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001808 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001809 r = switch_pg_num(m, argv[1]);
1810 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001811 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001813 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001815 else {
Jose Castilloa356e422014-01-29 17:52:45 +01001816 DMWARN("Unrecognised multipath message received: %s", argv[0]);
Mike Anderson6380f262009-12-10 23:52:21 +00001817 goto out;
1818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001820 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001822 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001824 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 }
1826
1827 r = action_dev(m, dev, action);
1828
1829 dm_put_device(ti, dev);
1830
Mike Anderson6380f262009-12-10 23:52:21 +00001831out:
1832 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001836static int multipath_prepare_ioctl(struct dm_target *ti,
1837 struct block_device **bdev, fmode_t *mode)
Milan Broz9af4aa32006-10-03 01:15:20 -07001838{
Mikulas Patocka35991652012-06-03 00:29:58 +01001839 struct multipath *m = ti->private;
Mike Snitzer2da16102016-03-17 18:38:17 -04001840 struct pgpath *current_pgpath;
Mikulas Patocka35991652012-06-03 00:29:58 +01001841 int r;
1842
Mike Snitzer2da16102016-03-17 18:38:17 -04001843 current_pgpath = lockless_dereference(m->current_pgpath);
1844 if (!current_pgpath)
1845 current_pgpath = choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001846
Mike Snitzer2da16102016-03-17 18:38:17 -04001847 if (current_pgpath) {
Mike Snitzer518257b2016-03-17 16:32:10 -04001848 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
Mike Snitzer2da16102016-03-17 18:38:17 -04001849 *bdev = current_pgpath->path.dev->bdev;
1850 *mode = current_pgpath->path.dev->mode;
Junichi Nomura43e43c92015-11-17 09:36:56 +00001851 r = 0;
1852 } else {
1853 /* pg_init has not started or completed */
1854 r = -ENOTCONN;
1855 }
1856 } else {
1857 /* No path is available */
Mike Snitzer518257b2016-03-17 16:32:10 -04001858 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
Junichi Nomura43e43c92015-11-17 09:36:56 +00001859 r = -ENOTCONN;
1860 else
1861 r = -EIO;
Milan Broze90dae12006-10-03 01:15:22 -07001862 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001863
Junichi Nomura5bbbfdf2015-11-17 09:39:26 +00001864 if (r == -ENOTCONN) {
Mike Snitzer2da16102016-03-17 18:38:17 -04001865 if (!lockless_dereference(m->current_pg)) {
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001866 /* Path status changed, redo selection */
Mike Snitzer2da16102016-03-17 18:38:17 -04001867 (void) choose_pgpath(m, 0);
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001868 }
Mike Snitzer518257b2016-03-17 16:32:10 -04001869 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
Mike Snitzer2da16102016-03-17 18:38:17 -04001870 pg_init_all_paths(m);
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001871 dm_table_run_md_queue_async(m->ti->table);
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001872 process_queued_bios_list(m);
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001873 }
Mikulas Patocka35991652012-06-03 00:29:58 +01001874
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001875 /*
1876 * Only pass ioctls through if the device sizes match exactly.
1877 */
1878 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1879 return 1;
1880 return r;
Milan Broz9af4aa32006-10-03 01:15:20 -07001881}
1882
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001883static int multipath_iterate_devices(struct dm_target *ti,
1884 iterate_devices_callout_fn fn, void *data)
1885{
1886 struct multipath *m = ti->private;
1887 struct priority_group *pg;
1888 struct pgpath *p;
1889 int ret = 0;
1890
1891 list_for_each_entry(pg, &m->priority_groups, list) {
1892 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001893 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001894 if (ret)
1895 goto out;
1896 }
1897 }
1898
1899out:
1900 return ret;
1901}
1902
Mike Snitzer9f54cec2016-02-11 21:42:28 -05001903static int pgpath_busy(struct pgpath *pgpath)
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001904{
1905 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1906
Mike Snitzer52b09912015-02-23 16:36:41 -05001907 return blk_lld_busy(q);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001908}
1909
1910/*
1911 * We return "busy", only when we can map I/Os but underlying devices
1912 * are busy (so even if we map I/Os now, the I/Os will wait on
1913 * the underlying queue).
1914 * In other words, if we want to kill I/Os or queue them inside us
1915 * due to map unavailability, we don't return "busy". Otherwise,
1916 * dm core won't give us the I/Os and we can't do what we want.
1917 */
1918static int multipath_busy(struct dm_target *ti)
1919{
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001920 bool busy = false, has_active = false;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001921 struct multipath *m = ti->private;
Mike Snitzer2da16102016-03-17 18:38:17 -04001922 struct priority_group *pg, *next_pg;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001923 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001924
Jun'ichi Nomura7a7a3b42014-07-08 00:55:14 +00001925 /* pg_init in progress or no paths available */
Mike Snitzer91e968a2016-03-17 17:10:15 -04001926 if (atomic_read(&m->pg_init_in_progress) ||
Mike Snitzer2da16102016-03-17 18:38:17 -04001927 (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)))
1928 return true;
1929
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001930 /* Guess which priority_group will be used at next mapping time */
Mike Snitzer2da16102016-03-17 18:38:17 -04001931 pg = lockless_dereference(m->current_pg);
1932 next_pg = lockless_dereference(m->next_pg);
1933 if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
1934 pg = next_pg;
1935
1936 if (!pg) {
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001937 /*
1938 * We don't know which pg will be used at next mapping time.
Mike Snitzer2da16102016-03-17 18:38:17 -04001939 * We don't call choose_pgpath() here to avoid to trigger
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001940 * pg_init just by busy checking.
1941 * So we don't know whether underlying devices we will be using
1942 * at next mapping time are busy or not. Just try mapping.
1943 */
Mike Snitzer2da16102016-03-17 18:38:17 -04001944 return busy;
1945 }
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001946
1947 /*
1948 * If there is one non-busy active path at least, the path selector
1949 * will be able to select it. So we consider such a pg as not busy.
1950 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001951 busy = true;
Mike Snitzer2da16102016-03-17 18:38:17 -04001952 list_for_each_entry(pgpath, &pg->pgpaths, list) {
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001953 if (pgpath->is_active) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001954 has_active = true;
Mike Snitzer9f54cec2016-02-11 21:42:28 -05001955 if (!pgpath_busy(pgpath)) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001956 busy = false;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001957 break;
1958 }
1959 }
Mike Snitzer2da16102016-03-17 18:38:17 -04001960 }
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001961
Mike Snitzer2da16102016-03-17 18:38:17 -04001962 if (!has_active) {
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001963 /*
1964 * No active path in this pg, so this pg won't be used and
1965 * the current_pg will be changed at next mapping time.
1966 * We need to try mapping to determine it.
1967 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001968 busy = false;
Mike Snitzer2da16102016-03-17 18:38:17 -04001969 }
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001970
1971 return busy;
1972}
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974/*-----------------------------------------------------------------
1975 * Module setup
1976 *---------------------------------------------------------------*/
1977static struct target_type multipath_target = {
1978 .name = "multipath",
Mike Snitzer16f12262016-01-31 17:22:27 -05001979 .version = {1, 11, 0},
1980 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 .module = THIS_MODULE,
1982 .ctr = multipath_ctr,
1983 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001984 .map_rq = multipath_map,
Mike Snitzere5863d92014-12-17 21:08:12 -05001985 .clone_and_map_rq = multipath_clone_and_map,
1986 .release_clone_rq = multipath_release_clone,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001987 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001989 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 .resume = multipath_resume,
1991 .status = multipath_status,
1992 .message = multipath_message,
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001993 .prepare_ioctl = multipath_prepare_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001994 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001995 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996};
1997
Mike Snitzer76e33fe2016-05-19 16:15:14 -04001998static struct target_type multipath_bio_target = {
1999 .name = "multipath-bio",
2000 .version = {1, 0, 0},
2001 .module = THIS_MODULE,
2002 .ctr = multipath_bio_ctr,
2003 .dtr = multipath_dtr,
2004 .map = multipath_map_bio,
2005 .end_io = multipath_end_io_bio,
2006 .presuspend = multipath_presuspend,
2007 .postsuspend = multipath_postsuspend,
2008 .resume = multipath_resume,
2009 .status = multipath_status,
2010 .message = multipath_message,
2011 .prepare_ioctl = multipath_prepare_ioctl,
2012 .iterate_devices = multipath_iterate_devices,
2013 .busy = multipath_busy,
2014};
2015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016static int __init dm_multipath_init(void)
2017{
2018 int r;
2019
Mike Snitzer76e33fe2016-05-19 16:15:14 -04002020 /* allocate a slab for the dm_mpath_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002021 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 if (!_mpio_cache)
2023 return -ENOMEM;
2024
2025 r = dm_register_target(&multipath_target);
2026 if (r < 0) {
Mike Snitzer76e33fe2016-05-19 16:15:14 -04002027 DMERR("request-based register failed %d", r);
Johannes Thumshirnff658e92015-01-11 12:45:23 +01002028 r = -EINVAL;
2029 goto bad_register_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 }
2031
Mike Snitzer76e33fe2016-05-19 16:15:14 -04002032 r = dm_register_target(&multipath_bio_target);
2033 if (r < 0) {
2034 DMERR("bio-based register failed %d", r);
2035 r = -EINVAL;
2036 goto bad_register_bio_based_target;
2037 }
2038
Tejun Heo4d4d66a2011-01-13 19:59:57 +00002039 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07002040 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01002041 DMERR("failed to create workqueue kmpathd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01002042 r = -ENOMEM;
2043 goto bad_alloc_kmultipathd;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07002044 }
2045
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07002046 /*
2047 * A separate workqueue is used to handle the device handlers
2048 * to avoid overloading existing workqueue. Overloading the
2049 * old workqueue would also create a bottleneck in the
2050 * path of the storage hardware device activation.
2051 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00002052 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2053 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07002054 if (!kmpath_handlerd) {
2055 DMERR("failed to create workqueue kmpath_handlerd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01002056 r = -ENOMEM;
2057 goto bad_alloc_kmpath_handlerd;
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07002058 }
2059
Johannes Thumshirnff658e92015-01-11 12:45:23 +01002060 return 0;
2061
2062bad_alloc_kmpath_handlerd:
2063 destroy_workqueue(kmultipathd);
2064bad_alloc_kmultipathd:
Mike Snitzer76e33fe2016-05-19 16:15:14 -04002065 dm_unregister_target(&multipath_bio_target);
2066bad_register_bio_based_target:
Johannes Thumshirnff658e92015-01-11 12:45:23 +01002067 dm_unregister_target(&multipath_target);
2068bad_register_target:
2069 kmem_cache_destroy(_mpio_cache);
2070
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 return r;
2072}
2073
2074static void __exit dm_multipath_exit(void)
2075{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07002076 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07002077 destroy_workqueue(kmultipathd);
2078
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002079 dm_unregister_target(&multipath_target);
Mike Snitzer76e33fe2016-05-19 16:15:14 -04002080 dm_unregister_target(&multipath_bio_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 kmem_cache_destroy(_mpio_cache);
2082}
2083
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084module_init(dm_multipath_init);
2085module_exit(dm_multipath_exit);
2086
2087MODULE_DESCRIPTION(DM_NAME " multipath target");
2088MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2089MODULE_LICENSE("GPL");