blob: c9b134cd1532fcb8994497aa8d37c6ff81a76ebf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * multipath.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * MULTIPATH management functions.
9 *
10 * derived from raid1.c.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * You should have received a copy of the GNU General Public License
18 * (for example /usr/src/linux/COPYING); if not, write to the Free
19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/raid/multipath.h>
26#include <linux/buffer_head.h>
27#include <asm/atomic.h>
28
29#define MAJOR_NR MD_MAJOR
30#define MD_DRIVER
31#define MD_PERSONALITY
32
33#define MAX_WORK_PER_DISK 128
34
35#define NR_RESERVED_BUFS 32
36
37
38static mdk_personality_t multipath_personality;
39
40
41static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data)
42{
43 struct multipath_bh *mpb;
44 mpb = kmalloc(sizeof(*mpb), gfp_flags);
45 if (mpb)
46 memset(mpb, 0, sizeof(*mpb));
47 return mpb;
48}
49
50static void mp_pool_free(void *mpb, void *data)
51{
52 kfree(mpb);
53}
54
55static int multipath_map (multipath_conf_t *conf)
56{
57 int i, disks = conf->raid_disks;
58
59 /*
60 * Later we do read balancing on the read side
61 * now we use the first available disk.
62 */
63
64 rcu_read_lock();
65 for (i = 0; i < disks; i++) {
66 mdk_rdev_t *rdev = conf->multipaths[i].rdev;
67 if (rdev && rdev->in_sync) {
68 atomic_inc(&rdev->nr_pending);
69 rcu_read_unlock();
70 return i;
71 }
72 }
73 rcu_read_unlock();
74
75 printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
76 return (-1);
77}
78
79static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
80{
81 unsigned long flags;
82 mddev_t *mddev = mp_bh->mddev;
83 multipath_conf_t *conf = mddev_to_conf(mddev);
84
85 spin_lock_irqsave(&conf->device_lock, flags);
86 list_add(&mp_bh->retry_list, &conf->retry_list);
87 spin_unlock_irqrestore(&conf->device_lock, flags);
88 md_wakeup_thread(mddev->thread);
89}
90
91
92/*
93 * multipath_end_bh_io() is called when we have finished servicing a multipathed
94 * operation and are ready to return a success/failure code to the buffer
95 * cache layer.
96 */
97static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
98{
99 struct bio *bio = mp_bh->master_bio;
100 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
101
102 bio_endio(bio, bio->bi_size, err);
103 mempool_free(mp_bh, conf->pool);
104}
105
106int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error)
107{
108 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
109 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
110 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
111 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
112
113 if (bio->bi_size)
114 return 1;
115
116 if (uptodate)
117 multipath_end_bh_io(mp_bh, 0);
118 else if (!bio_rw_ahead(bio)) {
119 /*
120 * oops, IO error:
121 */
122 char b[BDEVNAME_SIZE];
123 md_error (mp_bh->mddev, rdev);
124 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
125 bdevname(rdev->bdev,b),
126 (unsigned long long)bio->bi_sector);
127 multipath_reschedule_retry(mp_bh);
128 } else
129 multipath_end_bh_io(mp_bh, error);
130 rdev_dec_pending(rdev, conf->mddev);
131 return 0;
132}
133
134static void unplug_slaves(mddev_t *mddev)
135{
136 multipath_conf_t *conf = mddev_to_conf(mddev);
137 int i;
138
139 rcu_read_lock();
140 for (i=0; i<mddev->raid_disks; i++) {
141 mdk_rdev_t *rdev = conf->multipaths[i].rdev;
142 if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
143 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
144
145 atomic_inc(&rdev->nr_pending);
146 rcu_read_unlock();
147
148 if (r_queue->unplug_fn)
149 r_queue->unplug_fn(r_queue);
150
151 rdev_dec_pending(rdev, mddev);
152 rcu_read_lock();
153 }
154 }
155 rcu_read_unlock();
156}
157
158static void multipath_unplug(request_queue_t *q)
159{
160 unplug_slaves(q->queuedata);
161}
162
163
164static int multipath_make_request (request_queue_t *q, struct bio * bio)
165{
166 mddev_t *mddev = q->queuedata;
167 multipath_conf_t *conf = mddev_to_conf(mddev);
168 struct multipath_bh * mp_bh;
169 struct multipath_info *multipath;
170
171 mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
172
173 mp_bh->master_bio = bio;
174 mp_bh->mddev = mddev;
175
176 if (bio_data_dir(bio)==WRITE) {
177 disk_stat_inc(mddev->gendisk, writes);
178 disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
179 } else {
180 disk_stat_inc(mddev->gendisk, reads);
181 disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
182 }
183
184 mp_bh->path = multipath_map(conf);
185 if (mp_bh->path < 0) {
186 bio_endio(bio, bio->bi_size, -EIO);
187 mempool_free(mp_bh, conf->pool);
188 return 0;
189 }
190 multipath = conf->multipaths + mp_bh->path;
191
192 mp_bh->bio = *bio;
193 mp_bh->bio.bi_sector += multipath->rdev->data_offset;
194 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
195 mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
196 mp_bh->bio.bi_end_io = multipath_end_request;
197 mp_bh->bio.bi_private = mp_bh;
198 generic_make_request(&mp_bh->bio);
199 return 0;
200}
201
202static void multipath_status (struct seq_file *seq, mddev_t *mddev)
203{
204 multipath_conf_t *conf = mddev_to_conf(mddev);
205 int i;
206
207 seq_printf (seq, " [%d/%d] [", conf->raid_disks,
208 conf->working_disks);
209 for (i = 0; i < conf->raid_disks; i++)
210 seq_printf (seq, "%s",
211 conf->multipaths[i].rdev &&
212 conf->multipaths[i].rdev->in_sync ? "U" : "_");
213 seq_printf (seq, "]");
214}
215
216static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
217 sector_t *error_sector)
218{
219 mddev_t *mddev = q->queuedata;
220 multipath_conf_t *conf = mddev_to_conf(mddev);
221 int i, ret = 0;
222
223 rcu_read_lock();
224 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
225 mdk_rdev_t *rdev = conf->multipaths[i].rdev;
226 if (rdev && !rdev->faulty) {
227 struct block_device *bdev = rdev->bdev;
228 request_queue_t *r_queue = bdev_get_queue(bdev);
229
230 if (!r_queue->issue_flush_fn)
231 ret = -EOPNOTSUPP;
232 else {
233 atomic_inc(&rdev->nr_pending);
234 rcu_read_unlock();
235 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
236 error_sector);
237 rdev_dec_pending(rdev, mddev);
238 rcu_read_lock();
239 }
240 }
241 }
242 rcu_read_unlock();
243 return ret;
244}
245
246/*
247 * Careful, this can execute in IRQ contexts as well!
248 */
249static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
250{
251 multipath_conf_t *conf = mddev_to_conf(mddev);
252
253 if (conf->working_disks <= 1) {
254 /*
255 * Uh oh, we can do nothing if this is our last path, but
256 * first check if this is a queued request for a device
257 * which has just failed.
258 */
259 printk(KERN_ALERT
260 "multipath: only one IO path left and IO error.\n");
261 /* leave it active... it's all we have */
262 } else {
263 /*
264 * Mark disk as unusable
265 */
266 if (!rdev->faulty) {
267 char b[BDEVNAME_SIZE];
268 rdev->in_sync = 0;
269 rdev->faulty = 1;
270 mddev->sb_dirty = 1;
271 conf->working_disks--;
272 printk(KERN_ALERT "multipath: IO failure on %s,"
273 " disabling IO path. \n Operation continuing"
274 " on %d IO paths.\n",
275 bdevname (rdev->bdev,b),
276 conf->working_disks);
277 }
278 }
279}
280
281static void print_multipath_conf (multipath_conf_t *conf)
282{
283 int i;
284 struct multipath_info *tmp;
285
286 printk("MULTIPATH conf printout:\n");
287 if (!conf) {
288 printk("(conf==NULL)\n");
289 return;
290 }
291 printk(" --- wd:%d rd:%d\n", conf->working_disks,
292 conf->raid_disks);
293
294 for (i = 0; i < conf->raid_disks; i++) {
295 char b[BDEVNAME_SIZE];
296 tmp = conf->multipaths + i;
297 if (tmp->rdev)
298 printk(" disk%d, o:%d, dev:%s\n",
299 i,!tmp->rdev->faulty,
300 bdevname(tmp->rdev->bdev,b));
301 }
302}
303
304
305static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
306{
307 multipath_conf_t *conf = mddev->private;
308 int found = 0;
309 int path;
310 struct multipath_info *p;
311
312 print_multipath_conf(conf);
313
314 for (path=0; path<mddev->raid_disks; path++)
315 if ((p=conf->multipaths+path)->rdev == NULL) {
316 blk_queue_stack_limits(mddev->queue,
317 rdev->bdev->bd_disk->queue);
318
319 /* as we don't honour merge_bvec_fn, we must never risk
320 * violating it, so limit ->max_sector to one PAGE, as
321 * a one page request is never in violation.
322 * (Note: it is very unlikely that a device with
323 * merge_bvec_fn will be involved in multipath.)
324 */
325 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
326 mddev->queue->max_sectors > (PAGE_SIZE>>9))
327 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
328
329 conf->working_disks++;
330 rdev->raid_disk = path;
331 rdev->in_sync = 1;
332 p->rdev = rdev;
333 found = 1;
334 }
335
336 print_multipath_conf(conf);
337 return found;
338}
339
340static int multipath_remove_disk(mddev_t *mddev, int number)
341{
342 multipath_conf_t *conf = mddev->private;
343 int err = 0;
344 mdk_rdev_t *rdev;
345 struct multipath_info *p = conf->multipaths + number;
346
347 print_multipath_conf(conf);
348
349 rdev = p->rdev;
350 if (rdev) {
351 if (rdev->in_sync ||
352 atomic_read(&rdev->nr_pending)) {
353 printk(KERN_ERR "hot-remove-disk, slot %d is identified" " but is still operational!\n", number);
354 err = -EBUSY;
355 goto abort;
356 }
357 p->rdev = NULL;
358 synchronize_kernel();
359 if (atomic_read(&rdev->nr_pending)) {
360 /* lost the race, try later */
361 err = -EBUSY;
362 p->rdev = rdev;
363 }
364 }
365abort:
366
367 print_multipath_conf(conf);
368 return err;
369}
370
371
372
373/*
374 * This is a kernel thread which:
375 *
376 * 1. Retries failed read operations on working multipaths.
377 * 2. Updates the raid superblock when problems encounter.
378 * 3. Performs writes following reads for array syncronising.
379 */
380
381static void multipathd (mddev_t *mddev)
382{
383 struct multipath_bh *mp_bh;
384 struct bio *bio;
385 unsigned long flags;
386 multipath_conf_t *conf = mddev_to_conf(mddev);
387 struct list_head *head = &conf->retry_list;
388
389 md_check_recovery(mddev);
390 for (;;) {
391 char b[BDEVNAME_SIZE];
392 spin_lock_irqsave(&conf->device_lock, flags);
393 if (list_empty(head))
394 break;
395 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
396 list_del(head->prev);
397 spin_unlock_irqrestore(&conf->device_lock, flags);
398
399 bio = &mp_bh->bio;
400 bio->bi_sector = mp_bh->master_bio->bi_sector;
401
402 if ((mp_bh->path = multipath_map (conf))<0) {
403 printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
404 " error for block %llu\n",
405 bdevname(bio->bi_bdev,b),
406 (unsigned long long)bio->bi_sector);
407 multipath_end_bh_io(mp_bh, -EIO);
408 } else {
409 printk(KERN_ERR "multipath: %s: redirecting sector %llu"
410 " to another IO path\n",
411 bdevname(bio->bi_bdev,b),
412 (unsigned long long)bio->bi_sector);
413 *bio = *(mp_bh->master_bio);
414 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
415 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
416 bio->bi_rw |= (1 << BIO_RW_FAILFAST);
417 bio->bi_end_io = multipath_end_request;
418 bio->bi_private = mp_bh;
419 generic_make_request(bio);
420 }
421 }
422 spin_unlock_irqrestore(&conf->device_lock, flags);
423}
424
425static int multipath_run (mddev_t *mddev)
426{
427 multipath_conf_t *conf;
428 int disk_idx;
429 struct multipath_info *disk;
430 mdk_rdev_t *rdev;
431 struct list_head *tmp;
432
433 if (mddev->level != LEVEL_MULTIPATH) {
434 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
435 mdname(mddev), mddev->level);
436 goto out;
437 }
438 /*
439 * copy the already verified devices into our private MULTIPATH
440 * bookkeeping area. [whatever we allocate in multipath_run(),
441 * should be freed in multipath_stop()]
442 */
443
444 conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL);
445 mddev->private = conf;
446 if (!conf) {
447 printk(KERN_ERR
448 "multipath: couldn't allocate memory for %s\n",
449 mdname(mddev));
450 goto out;
451 }
452 memset(conf, 0, sizeof(*conf));
453
454 conf->multipaths = kmalloc(sizeof(struct multipath_info)*mddev->raid_disks,
455 GFP_KERNEL);
456 if (!conf->multipaths) {
457 printk(KERN_ERR
458 "multipath: couldn't allocate memory for %s\n",
459 mdname(mddev));
460 goto out_free_conf;
461 }
462 memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks);
463
464 mddev->queue->unplug_fn = multipath_unplug;
465
466 mddev->queue->issue_flush_fn = multipath_issue_flush;
467
468 conf->working_disks = 0;
469 ITERATE_RDEV(mddev,rdev,tmp) {
470 disk_idx = rdev->raid_disk;
471 if (disk_idx < 0 ||
472 disk_idx >= mddev->raid_disks)
473 continue;
474
475 disk = conf->multipaths + disk_idx;
476 disk->rdev = rdev;
477
478 blk_queue_stack_limits(mddev->queue,
479 rdev->bdev->bd_disk->queue);
480 /* as we don't honour merge_bvec_fn, we must never risk
481 * violating it, not that we ever expect a device with
482 * a merge_bvec_fn to be involved in multipath */
483 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
484 mddev->queue->max_sectors > (PAGE_SIZE>>9))
485 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
486
487 if (!rdev->faulty)
488 conf->working_disks++;
489 }
490
491 conf->raid_disks = mddev->raid_disks;
492 mddev->sb_dirty = 1;
493 conf->mddev = mddev;
494 spin_lock_init(&conf->device_lock);
495 INIT_LIST_HEAD(&conf->retry_list);
496
497 if (!conf->working_disks) {
498 printk(KERN_ERR "multipath: no operational IO paths for %s\n",
499 mdname(mddev));
500 goto out_free_conf;
501 }
502 mddev->degraded = conf->raid_disks = conf->working_disks;
503
504 conf->pool = mempool_create(NR_RESERVED_BUFS,
505 mp_pool_alloc, mp_pool_free,
506 NULL);
507 if (conf->pool == NULL) {
508 printk(KERN_ERR
509 "multipath: couldn't allocate memory for %s\n",
510 mdname(mddev));
511 goto out_free_conf;
512 }
513
514 {
515 mddev->thread = md_register_thread(multipathd, mddev, "%s_multipath");
516 if (!mddev->thread) {
517 printk(KERN_ERR "multipath: couldn't allocate thread"
518 " for %s\n", mdname(mddev));
519 goto out_free_conf;
520 }
521 }
522
523 printk(KERN_INFO
524 "multipath: array %s active with %d out of %d IO paths\n",
525 mdname(mddev), conf->working_disks, mddev->raid_disks);
526 /*
527 * Ok, everything is just fine now
528 */
529 mddev->array_size = mddev->size;
530 return 0;
531
532out_free_conf:
533 if (conf->pool)
534 mempool_destroy(conf->pool);
535 if (conf->multipaths)
536 kfree(conf->multipaths);
537 kfree(conf);
538 mddev->private = NULL;
539out:
540 return -EIO;
541}
542
543
544static int multipath_stop (mddev_t *mddev)
545{
546 multipath_conf_t *conf = mddev_to_conf(mddev);
547
548 md_unregister_thread(mddev->thread);
549 mddev->thread = NULL;
550 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
551 mempool_destroy(conf->pool);
552 kfree(conf->multipaths);
553 kfree(conf);
554 mddev->private = NULL;
555 return 0;
556}
557
558static mdk_personality_t multipath_personality=
559{
560 .name = "multipath",
561 .owner = THIS_MODULE,
562 .make_request = multipath_make_request,
563 .run = multipath_run,
564 .stop = multipath_stop,
565 .status = multipath_status,
566 .error_handler = multipath_error,
567 .hot_add_disk = multipath_add_disk,
568 .hot_remove_disk= multipath_remove_disk,
569};
570
571static int __init multipath_init (void)
572{
573 return register_md_personality (MULTIPATH, &multipath_personality);
574}
575
576static void __exit multipath_exit (void)
577{
578 unregister_md_personality (MULTIPATH);
579}
580
581module_init(multipath_init);
582module_exit(multipath_exit);
583MODULE_LICENSE("GPL");
584MODULE_ALIAS("md-personality-7"); /* MULTIPATH */