blob: 780e5d0a066fcf947f1630611c46e4161df13086 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Mike Snitzerf4790822013-09-12 18:06:12 -040010#include "dm.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010012#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Mike Snitzere5863d92014-12-17 21:08:12 -050014#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/ctype.h>
16#include <linux/init.h>
17#include <linux/mempool.h>
18#include <linux/module.h>
19#include <linux/pagemap.h>
20#include <linux/slab.h>
21#include <linux/time.h>
22#include <linux/workqueue.h>
Mikulas Patocka35991652012-06-03 00:29:58 +010023#include <linux/delay.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070024#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
Mike Snitzer78ce23b2016-01-31 17:38:28 -050026#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Alasdair G Kergon72d94862006-06-26 00:27:35 -070028#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000029#define DM_PG_INIT_DELAY_MSECS 2000
30#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32/* Path properties */
33struct pgpath {
34 struct list_head list;
35
36 struct priority_group *pg; /* Owning PG */
37 unsigned fail_count; /* Cumulative failure count */
38
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080039 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000040 struct delayed_work activate_path;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -050041
42 bool is_active:1; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070043};
44
45#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
46
47/*
48 * Paths are grouped into Priority Groups and numbered from 1 upwards.
49 * Each has a path selector which controls which path gets used.
50 */
51struct priority_group {
52 struct list_head list;
53
54 struct multipath *m; /* Owning multipath instance */
55 struct path_selector ps;
56
57 unsigned pg_num; /* Reference number */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 unsigned nr_pgpaths; /* Number of paths in PG */
59 struct list_head pgpaths;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -050060
61 bool bypassed:1; /* Temporarily bypass this PG? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070062};
63
64/* Multipath context */
65struct multipath {
66 struct list_head list;
67 struct dm_target *ti;
68
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070069 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070070 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000071
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010072 spinlock_t lock;
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 unsigned nr_priority_groups;
75 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000076
77 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 struct pgpath *current_pgpath;
80 struct priority_group *current_pg;
81 struct priority_group *next_pg; /* Switch to this PG if set */
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
Mike Snitzer518257b2016-03-17 16:32:10 -040083 unsigned long flags; /* Multipath state flags */
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010084
Dave Wysochanskic9e45582007-10-19 22:47:53 +010085 unsigned pg_init_retries; /* Number of times to retry pg_init */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000086 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Mike Snitzer91e968a2016-03-17 17:10:15 -040088 atomic_t nr_valid_paths; /* Total number of usable paths */
89 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
90 atomic_t pg_init_count; /* Number of times pg_init called */
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 struct work_struct trigger_event;
93
94 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +010095 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 * can resubmit bios on error.
97 */
98 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +000099
100 struct mutex work_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101};
102
103/*
104 * Context information attached to each bio we process.
105 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100106struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100108 size_t nr_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109};
110
111typedef int (*action_fn) (struct pgpath *pgpath);
112
Christoph Lametere18b8902006-12-06 20:33:20 -0800113static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700115static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000116static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700117static void activate_path(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Mike Snitzer518257b2016-03-17 16:32:10 -0400119/*-----------------------------------------------
120 * Multipath state flags.
121 *-----------------------------------------------*/
122
123#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
124#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
125#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
126#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
127#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
128#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
129#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131/*-----------------------------------------------
132 * Allocation routines
133 *-----------------------------------------------*/
134
135static struct pgpath *alloc_pgpath(void)
136{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700137 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Mike Anderson224cb3e2008-08-29 09:36:09 +0200139 if (pgpath) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -0500140 pgpath->is_active = true;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000141 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 return pgpath;
145}
146
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100147static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
149 kfree(pgpath);
150}
151
152static struct priority_group *alloc_priority_group(void)
153{
154 struct priority_group *pg;
155
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700156 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700158 if (pg)
159 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 return pg;
162}
163
164static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
165{
166 struct pgpath *pgpath, *tmp;
167
168 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
169 list_del(&pgpath->list);
170 dm_put_device(ti, pgpath->path.dev);
171 free_pgpath(pgpath);
172 }
173}
174
175static void free_priority_group(struct priority_group *pg,
176 struct dm_target *ti)
177{
178 struct path_selector *ps = &pg->ps;
179
180 if (ps->type) {
181 ps->type->destroy(ps);
182 dm_put_path_selector(ps->type);
183 }
184
185 free_pgpaths(&pg->pgpaths, ti);
186 kfree(pg);
187}
188
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500189static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 struct multipath *m;
192
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700193 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 INIT_LIST_HEAD(&m->priority_groups);
196 spin_lock_init(&m->lock);
Mike Snitzer518257b2016-03-17 16:32:10 -0400197 set_bit(MPATHF_QUEUE_IO, &m->flags);
Mike Snitzer91e968a2016-03-17 17:10:15 -0400198 atomic_set(&m->nr_valid_paths, 0);
199 atomic_set(&m->pg_init_in_progress, 0);
200 atomic_set(&m->pg_init_count, 0);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000201 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000202 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000203 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000204 mutex_init(&m->work_mutex);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500205
206 m->mpio_pool = NULL;
207 if (!use_blk_mq) {
208 unsigned min_ios = dm_get_reserved_rq_based_ios();
209
210 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
211 if (!m->mpio_pool) {
212 kfree(m);
213 return NULL;
214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 }
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500216
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700217 m->ti = ti;
218 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 }
220
221 return m;
222}
223
224static void free_multipath(struct multipath *m)
225{
226 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
229 list_del(&pg->list);
230 free_priority_group(pg, m->ti);
231 }
232
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700233 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700234 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 mempool_destroy(m->mpio_pool);
236 kfree(m);
237}
238
Mike Snitzer2eff1922016-02-03 09:13:14 -0500239static struct dm_mpath_io *get_mpio(union map_info *info)
240{
241 return info->ptr;
242}
243
244static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100245{
246 struct dm_mpath_io *mpio;
247
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500248 if (!m->mpio_pool) {
249 /* Use blk-mq pdu memory requested via per_io_data_size */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500250 mpio = get_mpio(info);
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500251 memset(mpio, 0, sizeof(*mpio));
252 return mpio;
253 }
254
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100255 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
256 if (!mpio)
Mike Snitzer2eff1922016-02-03 09:13:14 -0500257 return NULL;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100258
259 memset(mpio, 0, sizeof(*mpio));
260 info->ptr = mpio;
261
Mike Snitzer2eff1922016-02-03 09:13:14 -0500262 return mpio;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100263}
264
Mike Snitzer2eff1922016-02-03 09:13:14 -0500265static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100266{
Mike Snitzer2eff1922016-02-03 09:13:14 -0500267 /* Only needed for non blk-mq (.request_fn) multipath */
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500268 if (m->mpio_pool) {
269 struct dm_mpath_io *mpio = info->ptr;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100270
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500271 info->ptr = NULL;
272 mempool_free(mpio, m->mpio_pool);
273 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100274}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276/*-----------------------------------------------
277 * Path selection
278 *-----------------------------------------------*/
279
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100280static int __pg_init_all_paths(struct multipath *m)
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000281{
282 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000283 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000284
Mike Snitzer91e968a2016-03-17 17:10:15 -0400285 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100286 return 0;
Hannes Reinecke17f4ff42014-02-28 15:33:42 +0100287
Mike Snitzer91e968a2016-03-17 17:10:15 -0400288 atomic_inc(&m->pg_init_count);
Mike Snitzer518257b2016-03-17 16:32:10 -0400289 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +0100290
291 /* Check here to reset pg_init_required */
292 if (!m->current_pg)
293 return 0;
294
Mike Snitzer518257b2016-03-17 16:32:10 -0400295 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000296 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
297 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000298 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
299 /* Skip failed paths */
300 if (!pgpath->is_active)
301 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000302 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
303 pg_init_delay))
Mike Snitzer91e968a2016-03-17 17:10:15 -0400304 atomic_inc(&m->pg_init_in_progress);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000305 }
Mike Snitzer91e968a2016-03-17 17:10:15 -0400306 return atomic_read(&m->pg_init_in_progress);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
310{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 m->current_pg = pgpath->pg;
312
313 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700314 if (m->hw_handler_name) {
Mike Snitzer518257b2016-03-17 16:32:10 -0400315 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
316 set_bit(MPATHF_QUEUE_IO, &m->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 } else {
Mike Snitzer518257b2016-03-17 16:32:10 -0400318 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
319 clear_bit(MPATHF_QUEUE_IO, &m->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100321
Mike Snitzer91e968a2016-03-17 17:10:15 -0400322 atomic_set(&m->pg_init_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100325static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
326 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800328 struct dm_path *path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Mike Snitzer90a43232016-02-17 21:29:17 -0500330 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (!path)
332 return -ENXIO;
333
334 m->current_pgpath = path_to_pgpath(path);
335
336 if (m->current_pg != pg)
337 __switch_pg(m, m->current_pgpath);
338
339 return 0;
340}
341
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100342static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
344 struct priority_group *pg;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -0500345 bool bypassed = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Mike Snitzer91e968a2016-03-17 17:10:15 -0400347 if (!atomic_read(&m->nr_valid_paths)) {
Mike Snitzer518257b2016-03-17 16:32:10 -0400348 clear_bit(MPATHF_QUEUE_IO, &m->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 goto failed;
Benjamin Marzinski1f271972014-08-13 13:53:42 -0500350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 /* Were we instructed to switch PG? */
353 if (m->next_pg) {
354 pg = m->next_pg;
355 m->next_pg = NULL;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100356 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return;
358 }
359
360 /* Don't change PG until it has no remaining paths */
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100361 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 return;
363
364 /*
365 * Loop through priority groups until we find a valid path.
366 * First time we skip PGs marked 'bypassed'.
Mike Christief220fd42012-06-03 00:29:45 +0100367 * Second time we only try the ones we skipped, but set
368 * pg_init_delay_retry so we do not hammer controllers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 */
370 do {
371 list_for_each_entry(pg, &m->priority_groups, list) {
372 if (pg->bypassed == bypassed)
373 continue;
Mike Christief220fd42012-06-03 00:29:45 +0100374 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
375 if (!bypassed)
Mike Snitzer518257b2016-03-17 16:32:10 -0400376 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 return;
Mike Christief220fd42012-06-03 00:29:45 +0100378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 }
380 } while (bypassed--);
381
382failed:
383 m->current_pgpath = NULL;
384 m->current_pg = NULL;
385}
386
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800387/*
388 * Check whether bios must be queued in the device-mapper core rather
389 * than here in the target.
390 *
391 * m->lock must be held on entry.
392 *
393 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
394 * same value then we are not between multipath_presuspend()
395 * and multipath_resume() calls and we have no need to check
396 * for the DMF_NOFLUSH_SUSPENDING flag.
397 */
398static int __must_push_back(struct multipath *m)
399{
Mike Snitzer518257b2016-03-17 16:32:10 -0400400 return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
401 ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
402 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
Hannes Reineckee8099172014-02-28 15:33:44 +0100403 dm_noflush_suspending(m->ti)));
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800404}
405
Hannes Reinecke36fcffc2014-02-28 15:33:47 +0100406/*
407 * Map cloned requests
408 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500409static int __multipath_map(struct dm_target *ti, struct request *clone,
410 union map_info *map_context,
411 struct request *rq, struct request **__clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412{
Mike Snitzer7943bd62016-02-02 21:53:15 -0500413 struct multipath *m = ti->private;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100414 int r = DM_MAPIO_REQUEUE;
Mike Snitzere5863d92014-12-17 21:08:12 -0500415 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100417 struct block_device *bdev;
Hannes Reineckee3bde042014-02-28 15:33:46 +0100418 struct dm_mpath_io *mpio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600420 spin_lock_irq(&m->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422 /* Do we need to select a new pgpath? */
Mike Snitzer518257b2016-03-17 16:32:10 -0400423 if (!m->current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100424 __choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 pgpath = m->current_pgpath;
427
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100428 if (!pgpath) {
429 if (!__must_push_back(m))
430 r = -EIO; /* Failed */
431 goto out_unlock;
Mike Snitzer518257b2016-03-17 16:32:10 -0400432 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
433 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
Hannes Reineckee3bde042014-02-28 15:33:46 +0100434 __pg_init_all_paths(m);
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100435 goto out_unlock;
436 }
Mike Snitzer6afbc012014-07-08 11:55:09 -0400437
Mike Snitzer2eff1922016-02-03 09:13:14 -0500438 mpio = set_mpio(m, map_context);
439 if (!mpio)
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100440 /* ENOMEM, requeue */
441 goto out_unlock;
442
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100443 mpio->pgpath = pgpath;
444 mpio->nr_bytes = nr_bytes;
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600445
446 bdev = pgpath->path.dev->bdev;
447
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600448 spin_unlock_irq(&m->lock);
449
Mike Snitzere5863d92014-12-17 21:08:12 -0500450 if (clone) {
Mike Snitzerc5248f72016-02-20 14:02:49 -0500451 /*
452 * Old request-based interface: allocated clone is passed in.
453 * Used by: .request_fn stacked on .request_fn path(s).
454 */
Mike Snitzere5863d92014-12-17 21:08:12 -0500455 clone->q = bdev_get_queue(bdev);
456 clone->rq_disk = bdev->bd_disk;
457 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
458 } else {
Mike Snitzereca7ee62016-02-20 13:45:38 -0500459 /*
460 * blk-mq request-based interface; used by both:
461 * .request_fn stacked on blk-mq path(s) and
462 * blk-mq stacked on blk-mq path(s).
463 */
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500464 *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
465 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400466 if (IS_ERR(*__clone)) {
Mike Snitzere5863d92014-12-17 21:08:12 -0500467 /* ENOMEM, requeue */
Mike Snitzer2eff1922016-02-03 09:13:14 -0500468 clear_request_fn_mpio(m, map_context);
Mike Snitzere5863d92014-12-17 21:08:12 -0500469 return r;
Mike Snitzer4c6dd532015-05-27 15:23:56 -0400470 }
Mike Snitzere5863d92014-12-17 21:08:12 -0500471 (*__clone)->bio = (*__clone)->biotail = NULL;
472 (*__clone)->rq_disk = bdev->bd_disk;
473 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
474 }
475
Mike Snitzer9bf59a62014-02-28 15:33:48 +0100476 if (pgpath->pg->ps.type->start_io)
477 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
478 &pgpath->path,
479 nr_bytes);
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600480 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Hannes Reineckee3bde042014-02-28 15:33:46 +0100482out_unlock:
Keith Busch2eb6e1e2014-10-17 17:46:36 -0600483 spin_unlock_irq(&m->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 return r;
486}
487
Mike Snitzere5863d92014-12-17 21:08:12 -0500488static int multipath_map(struct dm_target *ti, struct request *clone,
489 union map_info *map_context)
490{
491 return __multipath_map(ti, clone, map_context, NULL, NULL);
492}
493
494static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
495 union map_info *map_context,
496 struct request **clone)
497{
498 return __multipath_map(ti, NULL, map_context, rq, clone);
499}
500
501static void multipath_release_clone(struct request *clone)
502{
Mike Snitzer78ce23b2016-01-31 17:38:28 -0500503 blk_mq_free_request(clone);
Mike Snitzere5863d92014-12-17 21:08:12 -0500504}
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506/*
507 * If we run out of usable paths, should we queue I/O or error it?
508 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -0500509static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
510 bool save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511{
512 unsigned long flags;
513
514 spin_lock_irqsave(&m->lock, flags);
515
Mike Snitzer518257b2016-03-17 16:32:10 -0400516 if (save_old_value) {
517 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
518 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
519 else
520 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
521 } else {
522 if (queue_if_no_path)
523 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
524 else
525 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
526 }
527 if (queue_if_no_path)
528 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700529 else
Mike Snitzer518257b2016-03-17 16:32:10 -0400530 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 spin_unlock_irqrestore(&m->lock, flags);
533
Hannes Reinecke63d832c2014-05-26 14:45:39 +0200534 if (!queue_if_no_path)
535 dm_table_run_md_queue_async(m->ti->table);
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return 0;
538}
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540/*
541 * An event is triggered whenever a path is taken out of use.
542 * Includes path failure and PG bypass.
543 */
David Howellsc4028952006-11-22 14:57:56 +0000544static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545{
David Howellsc4028952006-11-22 14:57:56 +0000546 struct multipath *m =
547 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
549 dm_table_event(m->ti->table);
550}
551
552/*-----------------------------------------------------------------
553 * Constructor/argument parsing:
554 * <#multipath feature args> [<arg>]*
555 * <#hw_handler args> [hw_handler [<arg>]*]
556 * <#priority groups>
557 * <initial priority group>
558 * [<selector> <#selector args> [<arg>]*
559 * <#paths> <#per-path selector args>
560 * [<path> [<arg>]* ]+ ]+
561 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100562static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 struct dm_target *ti)
564{
565 int r;
566 struct path_selector_type *pst;
567 unsigned ps_argc;
568
Mike Snitzer498f0102011-08-02 12:32:04 +0100569 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700570 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 };
572
Mike Snitzer498f0102011-08-02 12:32:04 +0100573 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700575 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 return -EINVAL;
577 }
578
Mike Snitzer498f0102011-08-02 12:32:04 +0100579 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100580 if (r) {
581 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 r = pst->create(&pg->ps, ps_argc, as->argv);
586 if (r) {
587 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700588 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 return r;
590 }
591
592 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100593 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 return 0;
596}
597
Mike Snitzer498f0102011-08-02 12:32:04 +0100598static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 struct dm_target *ti)
600{
601 int r;
602 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700603 struct multipath *m = ti->private;
Mike Snitzera58a9352012-07-27 15:08:04 +0100604 struct request_queue *q = NULL;
605 const char *attached_handler_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607 /* we need at least a path arg */
608 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700609 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100610 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 }
612
613 p = alloc_pgpath();
614 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100615 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Mike Snitzer498f0102011-08-02 12:32:04 +0100617 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000618 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700620 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 goto bad;
622 }
623
Mike Snitzer518257b2016-03-17 16:32:10 -0400624 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
Mike Snitzera58a9352012-07-27 15:08:04 +0100625 q = bdev_get_queue(p->path.dev->bdev);
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100626
Mike Snitzer518257b2016-03-17 16:32:10 -0400627 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200628retain:
Mike Snitzera58a9352012-07-27 15:08:04 +0100629 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
630 if (attached_handler_name) {
631 /*
632 * Reset hw_handler_name to match the attached handler
633 * and clear any hw_handler_params associated with the
634 * ignored handler.
635 *
636 * NB. This modifies the table line to show the actual
637 * handler instead of the original table passed in.
638 */
639 kfree(m->hw_handler_name);
640 m->hw_handler_name = attached_handler_name;
641
642 kfree(m->hw_handler_params);
643 m->hw_handler_params = NULL;
644 }
645 }
646
647 if (m->hw_handler_name) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100648 r = scsi_dh_attach(q, m->hw_handler_name);
649 if (r == -EBUSY) {
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200650 char b[BDEVNAME_SIZE];
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100651
Christoph Hellwig1bab0de2015-08-27 14:16:54 +0200652 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
653 bdevname(p->path.dev->bdev, b));
654 goto retain;
655 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700656 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100657 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700658 dm_put_device(ti, p->path.dev);
659 goto bad;
660 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700661
662 if (m->hw_handler_params) {
663 r = scsi_dh_set_params(q, m->hw_handler_params);
664 if (r < 0) {
665 ti->error = "unable to set hardware "
666 "handler parameters";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700667 dm_put_device(ti, p->path.dev);
668 goto bad;
669 }
670 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700671 }
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
674 if (r) {
675 dm_put_device(ti, p->path.dev);
676 goto bad;
677 }
678
679 return p;
680
681 bad:
682 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100683 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684}
685
Mike Snitzer498f0102011-08-02 12:32:04 +0100686static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700687 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688{
Mike Snitzer498f0102011-08-02 12:32:04 +0100689 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700690 {1, 1024, "invalid number of paths"},
691 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 };
693
694 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100695 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700697 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699 if (as->argc < 2) {
700 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100701 ti->error = "not enough priority group arguments";
702 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 }
704
705 pg = alloc_priority_group();
706 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700707 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100708 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
710 pg->m = m;
711
712 r = parse_path_selector(as, pg, ti);
713 if (r)
714 goto bad;
715
716 /*
717 * read the paths
718 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100719 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 if (r)
721 goto bad;
722
Mike Snitzer498f0102011-08-02 12:32:04 +0100723 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 if (r)
725 goto bad;
726
Mike Snitzer498f0102011-08-02 12:32:04 +0100727 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 for (i = 0; i < pg->nr_pgpaths; i++) {
729 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100730 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Mike Snitzer498f0102011-08-02 12:32:04 +0100732 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100733 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100734 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100736 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Mike Snitzer498f0102011-08-02 12:32:04 +0100738 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 path_args.argv = as->argv;
740
741 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100742 if (IS_ERR(pgpath)) {
743 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
747 pgpath->pg = pg;
748 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100749 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 }
751
752 return pg;
753
754 bad:
755 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100756 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
Mike Snitzer498f0102011-08-02 12:32:04 +0100759static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700762 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700763 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Mike Snitzer498f0102011-08-02 12:32:04 +0100765 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700766 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 };
768
Mike Snitzer498f0102011-08-02 12:32:04 +0100769 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 return -EINVAL;
771
772 if (!hw_argc)
773 return 0;
774
Mike Snitzer498f0102011-08-02 12:32:04 +0100775 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000776
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700777 if (hw_argc > 1) {
778 char *p;
779 int i, j, len = 4;
780
781 for (i = 0; i <= hw_argc - 2; i++)
782 len += strlen(as->argv[i]) + 1;
783 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
784 if (!p) {
785 ti->error = "memory allocation failed";
786 ret = -ENOMEM;
787 goto fail;
788 }
789 j = sprintf(p, "%d", hw_argc - 1);
790 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
791 j = sprintf(p, "%s", as->argv[i]);
792 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100793 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
795 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700796fail:
797 kfree(m->hw_handler_name);
798 m->hw_handler_name = NULL;
799 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
Mike Snitzer498f0102011-08-02 12:32:04 +0100802static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
804 int r;
805 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700806 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100807 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Mike Snitzer498f0102011-08-02 12:32:04 +0100809 static struct dm_arg _args[] = {
Mike Snitzera58a9352012-07-27 15:08:04 +0100810 {0, 6, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100811 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000812 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 };
814
Mike Snitzer498f0102011-08-02 12:32:04 +0100815 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 if (r)
817 return -EINVAL;
818
819 if (!argc)
820 return 0;
821
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100822 do {
Mike Snitzer498f0102011-08-02 12:32:04 +0100823 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100824 argc--;
825
Mike Snitzer498f0102011-08-02 12:32:04 +0100826 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -0500827 r = queue_if_no_path(m, true, false);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100828 continue;
829 }
830
Mike Snitzera58a9352012-07-27 15:08:04 +0100831 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
Mike Snitzer518257b2016-03-17 16:32:10 -0400832 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
Mike Snitzera58a9352012-07-27 15:08:04 +0100833 continue;
834 }
835
Mike Snitzer498f0102011-08-02 12:32:04 +0100836 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100837 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100838 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100839 argc--;
840 continue;
841 }
842
Mike Snitzer498f0102011-08-02 12:32:04 +0100843 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000844 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100845 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000846 argc--;
847 continue;
848 }
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100851 r = -EINVAL;
852 } while (argc && !r);
853
854 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855}
856
857static int multipath_ctr(struct dm_target *ti, unsigned int argc,
858 char **argv)
859{
Mike Snitzer498f0102011-08-02 12:32:04 +0100860 /* target arguments */
861 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +0000862 {0, 1024, "invalid number of priority groups"},
863 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 };
865
866 int r;
867 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +0100868 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 unsigned pg_count = 0;
870 unsigned next_pg_num;
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500871 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 as.argc = argc;
874 as.argv = argv;
875
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500876 m = alloc_multipath(ti, use_blk_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700878 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 return -EINVAL;
880 }
881
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700882 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 if (r)
884 goto bad;
885
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700886 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 if (r)
888 goto bad;
889
Mike Snitzer498f0102011-08-02 12:32:04 +0100890 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 if (r)
892 goto bad;
893
Mike Snitzer498f0102011-08-02 12:32:04 +0100894 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 if (r)
896 goto bad;
897
Mike Snitzera490a072011-03-24 13:54:33 +0000898 if ((!m->nr_priority_groups && next_pg_num) ||
899 (m->nr_priority_groups && !next_pg_num)) {
900 ti->error = "invalid initial priority group";
901 r = -EINVAL;
902 goto bad;
903 }
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 /* parse the priority groups */
906 while (as.argc) {
907 struct priority_group *pg;
Mike Snitzer91e968a2016-03-17 17:10:15 -0400908 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700910 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100911 if (IS_ERR(pg)) {
912 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 goto bad;
914 }
915
Mike Snitzer91e968a2016-03-17 17:10:15 -0400916 nr_valid_paths += pg->nr_pgpaths;
917 atomic_set(&m->nr_valid_paths, nr_valid_paths);
918
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 list_add_tail(&pg->list, &m->priority_groups);
920 pg_count++;
921 pg->pg_num = pg_count;
922 if (!--next_pg_num)
923 m->next_pg = pg;
924 }
925
926 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700927 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 r = -EINVAL;
929 goto bad;
930 }
931
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000932 ti->num_flush_bios = 1;
933 ti->num_discard_bios = 1;
Mike Snitzer042bcef2013-05-10 14:37:16 +0100934 ti->num_write_same_bios = 1;
Mike Snitzer8637a6b2016-01-31 12:08:36 -0500935 if (use_blk_mq)
936 ti->per_io_data_size = sizeof(struct dm_mpath_io);
Mikulas Patocka86279212009-06-22 10:12:24 +0100937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 return 0;
939
940 bad:
941 free_multipath(m);
942 return r;
943}
944
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000945static void multipath_wait_for_pg_init_completion(struct multipath *m)
946{
947 DECLARE_WAITQUEUE(wait, current);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000948
949 add_wait_queue(&m->pg_init_wait, &wait);
950
951 while (1) {
952 set_current_state(TASK_UNINTERRUPTIBLE);
953
Mike Snitzer91e968a2016-03-17 17:10:15 -0400954 if (!atomic_read(&m->pg_init_in_progress))
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000955 break;
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000956
957 io_schedule();
958 }
959 set_current_state(TASK_RUNNING);
960
961 remove_wait_queue(&m->pg_init_wait, &wait);
962}
963
964static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965{
Mike Snitzer518257b2016-03-17 16:32:10 -0400966 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
967 smp_mb__after_atomic();
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000968
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700969 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000970 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -0700971 flush_workqueue(kmultipathd);
Tejun Heo43829732012-08-20 14:51:24 -0700972 flush_work(&m->trigger_event);
Shiva Krishna Merla954a73d2013-10-30 03:26:38 +0000973
Mike Snitzer518257b2016-03-17 16:32:10 -0400974 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
975 smp_mb__after_atomic();
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +0000976}
977
978static void multipath_dtr(struct dm_target *ti)
979{
980 struct multipath *m = ti->private;
981
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000982 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 free_multipath(m);
984}
985
986/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 * Take a path out of use.
988 */
989static int fail_path(struct pgpath *pgpath)
990{
991 unsigned long flags;
992 struct multipath *m = pgpath->pg->m;
993
994 spin_lock_irqsave(&m->lock, flags);
995
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100996 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 goto out;
998
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700999 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001002 pgpath->is_active = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 pgpath->fail_count++;
1004
Mike Snitzer91e968a2016-03-17 17:10:15 -04001005 atomic_dec(&m->nr_valid_paths);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 if (pgpath == m->current_pgpath)
1008 m->current_pgpath = NULL;
1009
Mike Andersonb15546f2007-10-19 22:48:02 +01001010 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
Mike Snitzer91e968a2016-03-17 17:10:15 -04001011 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
Mike Andersonb15546f2007-10-19 22:48:02 +01001012
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001013 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015out:
1016 spin_unlock_irqrestore(&m->lock, flags);
1017
1018 return 0;
1019}
1020
1021/*
1022 * Reinstate a previously-failed path
1023 */
1024static int reinstate_path(struct pgpath *pgpath)
1025{
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001026 int r = 0, run_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 unsigned long flags;
1028 struct multipath *m = pgpath->pg->m;
Mike Snitzer91e968a2016-03-17 17:10:15 -04001029 unsigned nr_valid_paths;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031 spin_lock_irqsave(&m->lock, flags);
1032
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001033 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 goto out;
1035
Mike Snitzerec31f3f2016-02-20 12:49:43 -05001036 DMWARN("Reinstating path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
1038 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1039 if (r)
1040 goto out;
1041
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001042 pgpath->is_active = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
Mike Snitzer91e968a2016-03-17 17:10:15 -04001044 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1045 if (nr_valid_paths == 1) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001046 m->current_pgpath = NULL;
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001047 run_queue = 1;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001048 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001049 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Mike Snitzer91e968a2016-03-17 17:10:15 -04001050 atomic_inc(&m->pg_init_in_progress);
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001051 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Mike Andersonb15546f2007-10-19 22:48:02 +01001053 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
Mike Snitzer91e968a2016-03-17 17:10:15 -04001054 pgpath->path.dev->name, nr_valid_paths);
Mike Andersonb15546f2007-10-19 22:48:02 +01001055
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001056 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
1058out:
1059 spin_unlock_irqrestore(&m->lock, flags);
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001060 if (run_queue)
1061 dm_table_run_md_queue_async(m->ti->table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063 return r;
1064}
1065
1066/*
1067 * Fail or reinstate all paths that match the provided struct dm_dev.
1068 */
1069static int action_dev(struct multipath *m, struct dm_dev *dev,
1070 action_fn action)
1071{
Mike Snitzer19040c02011-03-24 13:54:31 +00001072 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 struct pgpath *pgpath;
1074 struct priority_group *pg;
1075
1076 list_for_each_entry(pg, &m->priority_groups, list) {
1077 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1078 if (pgpath->path.dev == dev)
1079 r = action(pgpath);
1080 }
1081 }
1082
1083 return r;
1084}
1085
1086/*
1087 * Temporarily try to avoid having to use the specified PG
1088 */
1089static void bypass_pg(struct multipath *m, struct priority_group *pg,
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001090 bool bypassed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091{
1092 unsigned long flags;
1093
1094 spin_lock_irqsave(&m->lock, flags);
1095
1096 pg->bypassed = bypassed;
1097 m->current_pgpath = NULL;
1098 m->current_pg = NULL;
1099
1100 spin_unlock_irqrestore(&m->lock, flags);
1101
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001102 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103}
1104
1105/*
1106 * Switch to using the specified PG from the next I/O that gets mapped
1107 */
1108static int switch_pg_num(struct multipath *m, const char *pgstr)
1109{
1110 struct priority_group *pg;
1111 unsigned pgnum;
1112 unsigned long flags;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001113 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001115 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 (pgnum > m->nr_priority_groups)) {
1117 DMWARN("invalid PG number supplied to switch_pg_num");
1118 return -EINVAL;
1119 }
1120
1121 spin_lock_irqsave(&m->lock, flags);
1122 list_for_each_entry(pg, &m->priority_groups, list) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001123 pg->bypassed = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 if (--pgnum)
1125 continue;
1126
1127 m->current_pgpath = NULL;
1128 m->current_pg = NULL;
1129 m->next_pg = pg;
1130 }
1131 spin_unlock_irqrestore(&m->lock, flags);
1132
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001133 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 return 0;
1135}
1136
1137/*
1138 * Set/clear bypassed status of a PG.
1139 * PGs are numbered upwards from 1 in the order they were declared.
1140 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001141static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142{
1143 struct priority_group *pg;
1144 unsigned pgnum;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001145 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001147 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 (pgnum > m->nr_priority_groups)) {
1149 DMWARN("invalid PG number supplied to bypass_pg");
1150 return -EINVAL;
1151 }
1152
1153 list_for_each_entry(pg, &m->priority_groups, list) {
1154 if (!--pgnum)
1155 break;
1156 }
1157
1158 bypass_pg(m, pg, bypassed);
1159 return 0;
1160}
1161
1162/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001163 * Should we retry pg_init immediately?
1164 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001165static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001166{
1167 unsigned long flags;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001168 bool limit_reached = false;
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001169
1170 spin_lock_irqsave(&m->lock, flags);
1171
Mike Snitzer91e968a2016-03-17 17:10:15 -04001172 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1173 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
Mike Snitzer518257b2016-03-17 16:32:10 -04001174 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001175 else
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001176 limit_reached = true;
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001177
1178 spin_unlock_irqrestore(&m->lock, flags);
1179
1180 return limit_reached;
1181}
1182
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001183static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001184{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001185 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001186 struct priority_group *pg = pgpath->pg;
1187 struct multipath *m = pg->m;
1188 unsigned long flags;
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001189 bool delay_retry = false;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001190
1191 /* device or driver problems */
1192 switch (errors) {
1193 case SCSI_DH_OK:
1194 break;
1195 case SCSI_DH_NOSYS:
1196 if (!m->hw_handler_name) {
1197 errors = 0;
1198 break;
1199 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001200 DMERR("Could not failover the device: Handler scsi_dh_%s "
1201 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001202 /*
1203 * Fail path for now, so we do not ping pong
1204 */
1205 fail_path(pgpath);
1206 break;
1207 case SCSI_DH_DEV_TEMP_BUSY:
1208 /*
1209 * Probably doing something like FW upgrade on the
1210 * controller so try the other pg.
1211 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001212 bypass_pg(m, pg, true);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001213 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001214 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001215 /* Wait before retrying. */
1216 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001217 case SCSI_DH_IMM_RETRY:
1218 case SCSI_DH_RES_TEMP_UNAVAIL:
1219 if (pg_init_limit_reached(m, pgpath))
1220 fail_path(pgpath);
1221 errors = 0;
1222 break;
Mike Snitzerec31f3f2016-02-20 12:49:43 -05001223 case SCSI_DH_DEV_OFFLINED:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001224 default:
1225 /*
1226 * We probably do not want to fail the path for a device
1227 * error, but this is what the old dm did. In future
1228 * patches we can do more advanced handling.
1229 */
1230 fail_path(pgpath);
1231 }
1232
1233 spin_lock_irqsave(&m->lock, flags);
1234 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001235 if (pgpath == m->current_pgpath) {
1236 DMERR("Could not failover device. Error %d.", errors);
1237 m->current_pgpath = NULL;
1238 m->current_pg = NULL;
1239 }
Mike Snitzer518257b2016-03-17 16:32:10 -04001240 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001241 pg->bypassed = false;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001242
Mike Snitzer91e968a2016-03-17 17:10:15 -04001243 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001244 /* Activations of other paths are still on going */
1245 goto out;
1246
Mike Snitzer518257b2016-03-17 16:32:10 -04001247 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1248 if (delay_retry)
1249 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1250 else
1251 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1252
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001253 if (__pg_init_all_paths(m))
1254 goto out;
1255 }
Mike Snitzer518257b2016-03-17 16:32:10 -04001256 clear_bit(MPATHF_QUEUE_IO, &m->flags);
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001257
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001258 /*
1259 * Wake up any thread waiting to suspend.
1260 */
1261 wake_up(&m->pg_init_wait);
1262
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001263out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001264 spin_unlock_irqrestore(&m->lock, flags);
1265}
1266
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001267static void activate_path(struct work_struct *work)
1268{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001269 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001270 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001271
Hannes Reinecke3a017502014-02-28 15:33:49 +01001272 if (pgpath->is_active)
1273 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1274 pg_init_done, pgpath);
1275 else
1276 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001277}
1278
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001279static int noretry_error(int error)
1280{
1281 switch (error) {
1282 case -EOPNOTSUPP:
1283 case -EREMOTEIO:
1284 case -EILSEQ:
1285 case -ENODATA:
Jun'ichi Nomuracc9d3c32013-09-13 14:54:30 +09001286 case -ENOSPC:
Hannes Reinecke7e782af2013-07-01 15:16:26 +02001287 return 1;
1288 }
1289
1290 /* Anything else could be a path failure, so should be retried */
1291 return 0;
1292}
1293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294/*
1295 * end_io handling
1296 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001297static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001298 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001300 /*
1301 * We don't queue any clone request inside the multipath target
1302 * during end I/O handling, since those clone requests don't have
1303 * bio clones. If we queue them inside the multipath target,
1304 * we need to make bio clones, that requires memory allocation.
1305 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1306 * don't have bio clones.)
1307 * Instead of queueing the clone request here, we queue the original
1308 * request into dm core, which will remake a clone request and
1309 * clone bios for it and resubmit it later.
1310 */
1311 int r = DM_ENDIO_REQUEUE;
Stefan Bader640eb3b2005-11-21 21:32:35 -08001312 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001314 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 return 0; /* I/O complete */
1316
Mike Snitzer7eee4ae2014-06-02 15:50:06 -04001317 if (noretry_error(error))
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001318 return error;
1319
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001320 if (mpio->pgpath)
1321 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Stefan Bader640eb3b2005-11-21 21:32:35 -08001323 spin_lock_irqsave(&m->lock, flags);
Mike Snitzer91e968a2016-03-17 17:10:15 -04001324 if (!atomic_read(&m->nr_valid_paths)) {
Mike Snitzer518257b2016-03-17 16:32:10 -04001325 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001326 if (!__must_push_back(m))
1327 r = -EIO;
1328 } else {
1329 if (error == -EBADE)
1330 r = error;
1331 }
1332 }
Stefan Bader640eb3b2005-11-21 21:32:35 -08001333 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001335 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336}
1337
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001338static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 int error, union map_info *map_context)
1340{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001341 struct multipath *m = ti->private;
Mike Snitzer2eff1922016-02-03 09:13:14 -05001342 struct dm_mpath_io *mpio = get_mpio(map_context);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001343 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 struct path_selector *ps;
1345 int r;
1346
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001347 BUG_ON(!mpio);
1348
Mike Snitzer2eff1922016-02-03 09:13:14 -05001349 r = do_end_io(m, clone, error, mpio);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001350 pgpath = mpio->pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 if (pgpath) {
1352 ps = &pgpath->pg->ps;
1353 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001354 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 }
Mike Snitzer2eff1922016-02-03 09:13:14 -05001356 clear_request_fn_mpio(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358 return r;
1359}
1360
1361/*
1362 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001363 * the last path fails we must error any remaining I/O.
1364 * Note that if the freeze_bdev fails while suspending, the
1365 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 */
1367static void multipath_presuspend(struct dm_target *ti)
1368{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001369 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001371 queue_if_no_path(m, false, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372}
1373
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001374static void multipath_postsuspend(struct dm_target *ti)
1375{
Mike Anderson6380f262009-12-10 23:52:21 +00001376 struct multipath *m = ti->private;
1377
1378 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001379 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001380 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001381}
1382
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001383/*
1384 * Restore the queue_if_no_path setting.
1385 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386static void multipath_resume(struct dm_target *ti)
1387{
Mike Snitzer7943bd62016-02-02 21:53:15 -05001388 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
Mike Snitzer518257b2016-03-17 16:32:10 -04001390 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
1391 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1392 else
1393 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1394 smp_mb__after_atomic();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395}
1396
1397/*
1398 * Info output has the following format:
1399 * num_multipath_feature_args [multipath_feature_args]*
1400 * num_handler_status_args [handler_status_args]*
1401 * num_groups init_group_number
1402 * [A|D|E num_ps_status_args [ps_status_args]*
1403 * num_paths num_selector_args
1404 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1405 *
1406 * Table output has the following format (identical to the constructor string):
1407 * num_feature_args [features_args]*
1408 * num_handler_args hw_handler [hw_handler_args]*
1409 * num_groups init_group_number
1410 * [priority selector-name num_ps_args [ps_args]*
1411 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1412 */
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001413static void multipath_status(struct dm_target *ti, status_type_t type,
1414 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
1416 int sz = 0;
1417 unsigned long flags;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001418 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 struct priority_group *pg;
1420 struct pgpath *p;
1421 unsigned pg_num;
1422 char state;
1423
1424 spin_lock_irqsave(&m->lock, flags);
1425
1426 /* Features */
1427 if (type == STATUSTYPE_INFO)
Mike Snitzer91e968a2016-03-17 17:10:15 -04001428 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1429 atomic_read(&m->pg_init_count));
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001430 else {
Mike Snitzer518257b2016-03-17 16:32:10 -04001431 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001432 (m->pg_init_retries > 0) * 2 +
Mike Snitzera58a9352012-07-27 15:08:04 +01001433 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
Mike Snitzer518257b2016-03-17 16:32:10 -04001434 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
1435 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001436 DMEMIT("queue_if_no_path ");
1437 if (m->pg_init_retries)
1438 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001439 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1440 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Mike Snitzer518257b2016-03-17 16:32:10 -04001441 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
Mike Snitzera58a9352012-07-27 15:08:04 +01001442 DMEMIT("retain_attached_hw_handler ");
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001443 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001445 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 DMEMIT("0 ");
1447 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001448 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
1450 DMEMIT("%u ", m->nr_priority_groups);
1451
1452 if (m->next_pg)
1453 pg_num = m->next_pg->pg_num;
1454 else if (m->current_pg)
1455 pg_num = m->current_pg->pg_num;
1456 else
Mike Snitzera490a072011-03-24 13:54:33 +00001457 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
1459 DMEMIT("%u ", pg_num);
1460
1461 switch (type) {
1462 case STATUSTYPE_INFO:
1463 list_for_each_entry(pg, &m->priority_groups, list) {
1464 if (pg->bypassed)
1465 state = 'D'; /* Disabled */
1466 else if (pg == m->current_pg)
1467 state = 'A'; /* Currently Active */
1468 else
1469 state = 'E'; /* Enabled */
1470
1471 DMEMIT("%c ", state);
1472
1473 if (pg->ps.type->status)
1474 sz += pg->ps.type->status(&pg->ps, NULL, type,
1475 result + sz,
1476 maxlen - sz);
1477 else
1478 DMEMIT("0 ");
1479
1480 DMEMIT("%u %u ", pg->nr_pgpaths,
1481 pg->ps.type->info_args);
1482
1483 list_for_each_entry(p, &pg->pgpaths, list) {
1484 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001485 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 p->fail_count);
1487 if (pg->ps.type->status)
1488 sz += pg->ps.type->status(&pg->ps,
1489 &p->path, type, result + sz,
1490 maxlen - sz);
1491 }
1492 }
1493 break;
1494
1495 case STATUSTYPE_TABLE:
1496 list_for_each_entry(pg, &m->priority_groups, list) {
1497 DMEMIT("%s ", pg->ps.type->name);
1498
1499 if (pg->ps.type->status)
1500 sz += pg->ps.type->status(&pg->ps, NULL, type,
1501 result + sz,
1502 maxlen - sz);
1503 else
1504 DMEMIT("0 ");
1505
1506 DMEMIT("%u %u ", pg->nr_pgpaths,
1507 pg->ps.type->table_args);
1508
1509 list_for_each_entry(p, &pg->pgpaths, list) {
1510 DMEMIT("%s ", p->path.dev->name);
1511 if (pg->ps.type->status)
1512 sz += pg->ps.type->status(&pg->ps,
1513 &p->path, type, result + sz,
1514 maxlen - sz);
1515 }
1516 }
1517 break;
1518 }
1519
1520 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521}
1522
1523static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1524{
Mike Anderson6380f262009-12-10 23:52:21 +00001525 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 struct dm_dev *dev;
Mike Snitzer7943bd62016-02-02 21:53:15 -05001527 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 action_fn action;
1529
Mike Anderson6380f262009-12-10 23:52:21 +00001530 mutex_lock(&m->work_mutex);
1531
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001532 if (dm_suspended(ti)) {
1533 r = -EBUSY;
1534 goto out;
1535 }
1536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001538 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001539 r = queue_if_no_path(m, true, false);
Mike Anderson6380f262009-12-10 23:52:21 +00001540 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001541 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001542 r = queue_if_no_path(m, false, false);
Mike Anderson6380f262009-12-10 23:52:21 +00001543 goto out;
1544 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 }
1546
Mike Anderson6380f262009-12-10 23:52:21 +00001547 if (argc != 2) {
Jose Castilloa356e422014-01-29 17:52:45 +01001548 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
Mike Anderson6380f262009-12-10 23:52:21 +00001549 goto out;
1550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Mike Snitzer498f0102011-08-02 12:32:04 +01001552 if (!strcasecmp(argv[0], "disable_group")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001553 r = bypass_pg_num(m, argv[1], true);
Mike Anderson6380f262009-12-10 23:52:21 +00001554 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001555 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001556 r = bypass_pg_num(m, argv[1], false);
Mike Anderson6380f262009-12-10 23:52:21 +00001557 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001558 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001559 r = switch_pg_num(m, argv[1]);
1560 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001561 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001563 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001565 else {
Jose Castilloa356e422014-01-29 17:52:45 +01001566 DMWARN("Unrecognised multipath message received: %s", argv[0]);
Mike Anderson6380f262009-12-10 23:52:21 +00001567 goto out;
1568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001570 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001572 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001574 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
1576
1577 r = action_dev(m, dev, action);
1578
1579 dm_put_device(ti, dev);
1580
Mike Anderson6380f262009-12-10 23:52:21 +00001581out:
1582 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584}
1585
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001586static int multipath_prepare_ioctl(struct dm_target *ti,
1587 struct block_device **bdev, fmode_t *mode)
Milan Broz9af4aa32006-10-03 01:15:20 -07001588{
Mikulas Patocka35991652012-06-03 00:29:58 +01001589 struct multipath *m = ti->private;
Milan Broz9af4aa32006-10-03 01:15:20 -07001590 unsigned long flags;
Mikulas Patocka35991652012-06-03 00:29:58 +01001591 int r;
1592
Milan Broz9af4aa32006-10-03 01:15:20 -07001593 spin_lock_irqsave(&m->lock, flags);
1594
1595 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001596 __choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001597
Junichi Nomura43e43c92015-11-17 09:36:56 +00001598 if (m->current_pgpath) {
Mike Snitzer518257b2016-03-17 16:32:10 -04001599 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
Junichi Nomura43e43c92015-11-17 09:36:56 +00001600 *bdev = m->current_pgpath->path.dev->bdev;
1601 *mode = m->current_pgpath->path.dev->mode;
1602 r = 0;
1603 } else {
1604 /* pg_init has not started or completed */
1605 r = -ENOTCONN;
1606 }
1607 } else {
1608 /* No path is available */
Mike Snitzer518257b2016-03-17 16:32:10 -04001609 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
Junichi Nomura43e43c92015-11-17 09:36:56 +00001610 r = -ENOTCONN;
1611 else
1612 r = -EIO;
Milan Broze90dae12006-10-03 01:15:22 -07001613 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001614
Milan Broz9af4aa32006-10-03 01:15:20 -07001615 spin_unlock_irqrestore(&m->lock, flags);
1616
Junichi Nomura5bbbfdf2015-11-17 09:39:26 +00001617 if (r == -ENOTCONN) {
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001618 spin_lock_irqsave(&m->lock, flags);
1619 if (!m->current_pg) {
1620 /* Path status changed, redo selection */
1621 __choose_pgpath(m, 0);
1622 }
Mike Snitzer518257b2016-03-17 16:32:10 -04001623 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001624 __pg_init_all_paths(m);
Mike Snitzer4cdd2ad2014-05-13 13:49:39 -04001625 spin_unlock_irqrestore(&m->lock, flags);
Hannes Reinecke63d832c2014-05-26 14:45:39 +02001626 dm_table_run_md_queue_async(m->ti->table);
Hannes Reinecke3e9f1be2014-02-28 15:33:45 +01001627 }
Mikulas Patocka35991652012-06-03 00:29:58 +01001628
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001629 /*
1630 * Only pass ioctls through if the device sizes match exactly.
1631 */
1632 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1633 return 1;
1634 return r;
Milan Broz9af4aa32006-10-03 01:15:20 -07001635}
1636
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001637static int multipath_iterate_devices(struct dm_target *ti,
1638 iterate_devices_callout_fn fn, void *data)
1639{
1640 struct multipath *m = ti->private;
1641 struct priority_group *pg;
1642 struct pgpath *p;
1643 int ret = 0;
1644
1645 list_for_each_entry(pg, &m->priority_groups, list) {
1646 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001647 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001648 if (ret)
1649 goto out;
1650 }
1651 }
1652
1653out:
1654 return ret;
1655}
1656
Mike Snitzer9f54cec2016-02-11 21:42:28 -05001657static int pgpath_busy(struct pgpath *pgpath)
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001658{
1659 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1660
Mike Snitzer52b09912015-02-23 16:36:41 -05001661 return blk_lld_busy(q);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001662}
1663
1664/*
1665 * We return "busy", only when we can map I/Os but underlying devices
1666 * are busy (so even if we map I/Os now, the I/Os will wait on
1667 * the underlying queue).
1668 * In other words, if we want to kill I/Os or queue them inside us
1669 * due to map unavailability, we don't return "busy". Otherwise,
1670 * dm core won't give us the I/Os and we can't do what we want.
1671 */
1672static int multipath_busy(struct dm_target *ti)
1673{
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001674 bool busy = false, has_active = false;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001675 struct multipath *m = ti->private;
1676 struct priority_group *pg;
1677 struct pgpath *pgpath;
1678 unsigned long flags;
1679
1680 spin_lock_irqsave(&m->lock, flags);
1681
Jun'ichi Nomura7a7a3b42014-07-08 00:55:14 +00001682 /* pg_init in progress or no paths available */
Mike Snitzer91e968a2016-03-17 17:10:15 -04001683 if (atomic_read(&m->pg_init_in_progress) ||
1684 (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001685 busy = true;
Hannes Reineckeb63349a2013-10-01 11:49:56 +02001686 goto out;
1687 }
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001688 /* Guess which priority_group will be used at next mapping time */
1689 if (unlikely(!m->current_pgpath && m->next_pg))
1690 pg = m->next_pg;
1691 else if (likely(m->current_pg))
1692 pg = m->current_pg;
1693 else
1694 /*
1695 * We don't know which pg will be used at next mapping time.
1696 * We don't call __choose_pgpath() here to avoid to trigger
1697 * pg_init just by busy checking.
1698 * So we don't know whether underlying devices we will be using
1699 * at next mapping time are busy or not. Just try mapping.
1700 */
1701 goto out;
1702
1703 /*
1704 * If there is one non-busy active path at least, the path selector
1705 * will be able to select it. So we consider such a pg as not busy.
1706 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001707 busy = true;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001708 list_for_each_entry(pgpath, &pg->pgpaths, list)
1709 if (pgpath->is_active) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001710 has_active = true;
Mike Snitzer9f54cec2016-02-11 21:42:28 -05001711 if (!pgpath_busy(pgpath)) {
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001712 busy = false;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001713 break;
1714 }
1715 }
1716
1717 if (!has_active)
1718 /*
1719 * No active path in this pg, so this pg won't be used and
1720 * the current_pg will be changed at next mapping time.
1721 * We need to try mapping to determine it.
1722 */
Mike Snitzerbe7d31c2016-02-10 13:02:21 -05001723 busy = false;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001724
1725out:
1726 spin_unlock_irqrestore(&m->lock, flags);
1727
1728 return busy;
1729}
1730
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731/*-----------------------------------------------------------------
1732 * Module setup
1733 *---------------------------------------------------------------*/
1734static struct target_type multipath_target = {
1735 .name = "multipath",
Mike Snitzer16f12262016-01-31 17:22:27 -05001736 .version = {1, 11, 0},
1737 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 .module = THIS_MODULE,
1739 .ctr = multipath_ctr,
1740 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001741 .map_rq = multipath_map,
Mike Snitzere5863d92014-12-17 21:08:12 -05001742 .clone_and_map_rq = multipath_clone_and_map,
1743 .release_clone_rq = multipath_release_clone,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001744 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001746 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 .resume = multipath_resume,
1748 .status = multipath_status,
1749 .message = multipath_message,
Christoph Hellwige56f81e2015-10-15 14:10:50 +02001750 .prepare_ioctl = multipath_prepare_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001751 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001752 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753};
1754
1755static int __init dm_multipath_init(void)
1756{
1757 int r;
1758
1759 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001760 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 if (!_mpio_cache)
1762 return -ENOMEM;
1763
1764 r = dm_register_target(&multipath_target);
1765 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001766 DMERR("register failed %d", r);
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001767 r = -EINVAL;
1768 goto bad_register_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 }
1770
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001771 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001772 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001773 DMERR("failed to create workqueue kmpathd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001774 r = -ENOMEM;
1775 goto bad_alloc_kmultipathd;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001776 }
1777
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001778 /*
1779 * A separate workqueue is used to handle the device handlers
1780 * to avoid overloading existing workqueue. Overloading the
1781 * old workqueue would also create a bottleneck in the
1782 * path of the storage hardware device activation.
1783 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001784 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1785 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001786 if (!kmpath_handlerd) {
1787 DMERR("failed to create workqueue kmpath_handlerd");
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001788 r = -ENOMEM;
1789 goto bad_alloc_kmpath_handlerd;
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001790 }
1791
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001792 DMINFO("version %u.%u.%u loaded",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 multipath_target.version[0], multipath_target.version[1],
1794 multipath_target.version[2]);
1795
Johannes Thumshirnff658e92015-01-11 12:45:23 +01001796 return 0;
1797
1798bad_alloc_kmpath_handlerd:
1799 destroy_workqueue(kmultipathd);
1800bad_alloc_kmultipathd:
1801 dm_unregister_target(&multipath_target);
1802bad_register_target:
1803 kmem_cache_destroy(_mpio_cache);
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 return r;
1806}
1807
1808static void __exit dm_multipath_exit(void)
1809{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001810 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001811 destroy_workqueue(kmultipathd);
1812
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001813 dm_unregister_target(&multipath_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 kmem_cache_destroy(_mpio_cache);
1815}
1816
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817module_init(dm_multipath_init);
1818module_exit(dm_multipath_exit);
1819
1820MODULE_DESCRIPTION(DM_NAME " multipath target");
1821MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1822MODULE_LICENSE("GPL");