Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | raid0.c : Multiple Devices driver for Linux |
| 3 | Copyright (C) 1994-96 Marc ZYNGIER |
| 4 | <zyngier@ufr-info-p7.ibp.fr> or |
| 5 | <maz@gloups.fdn.fr> |
| 6 | Copyright (C) 1999, 2000 Ingo Molnar, Red Hat |
| 7 | |
| 8 | |
| 9 | RAID-0 management functions. |
| 10 | |
| 11 | This program is free software; you can redistribute it and/or modify |
| 12 | it under the terms of the GNU General Public License as published by |
| 13 | the Free Software Foundation; either version 2, or (at your option) |
| 14 | any later version. |
| 15 | |
| 16 | You should have received a copy of the GNU General Public License |
| 17 | (for example /usr/src/linux/COPYING); if not, write to the Free |
| 18 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 19 | */ |
| 20 | |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 21 | #include <linux/blkdev.h> |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 22 | #include <linux/seq_file.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> |
NeilBrown | 43b2e5d | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 24 | #include "md.h" |
Christoph Hellwig | ef740c3 | 2009-03-31 14:27:03 +1100 | [diff] [blame] | 25 | #include "raid0.h" |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 26 | #include "raid5.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
NeilBrown | 26be34d | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 28 | static int raid0_congested(void *data, int bits) |
| 29 | { |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 30 | struct mddev *mddev = data; |
NeilBrown | 070ec55 | 2009-06-16 16:54:21 +1000 | [diff] [blame] | 31 | raid0_conf_t *conf = mddev->private; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 32 | struct md_rdev **devlist = conf->devlist; |
NeilBrown | 84707f3 | 2010-03-16 17:23:35 +1100 | [diff] [blame] | 33 | int raid_disks = conf->strip_zone[0].nb_dev; |
NeilBrown | 26be34d | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 34 | int i, ret = 0; |
| 35 | |
NeilBrown | 3fa841d | 2009-09-23 18:10:29 +1000 | [diff] [blame] | 36 | if (mddev_congested(mddev, bits)) |
| 37 | return 1; |
| 38 | |
NeilBrown | 84707f3 | 2010-03-16 17:23:35 +1100 | [diff] [blame] | 39 | for (i = 0; i < raid_disks && !ret ; i++) { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 40 | struct request_queue *q = bdev_get_queue(devlist[i]->bdev); |
NeilBrown | 26be34d | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 41 | |
| 42 | ret |= bdi_congested(&q->backing_dev_info, bits); |
| 43 | } |
| 44 | return ret; |
| 45 | } |
| 46 | |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 47 | /* |
| 48 | * inform the user of the raid configuration |
| 49 | */ |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 50 | static void dump_zones(struct mddev *mddev) |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 51 | { |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 52 | int j, k; |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 53 | sector_t zone_size = 0; |
| 54 | sector_t zone_start = 0; |
| 55 | char b[BDEVNAME_SIZE]; |
| 56 | raid0_conf_t *conf = mddev->private; |
NeilBrown | 84707f3 | 2010-03-16 17:23:35 +1100 | [diff] [blame] | 57 | int raid_disks = conf->strip_zone[0].nb_dev; |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 58 | printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", |
| 59 | mdname(mddev), |
| 60 | conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 61 | for (j = 0; j < conf->nr_strip_zones; j++) { |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 62 | printk(KERN_INFO "md: zone%d=[", j); |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 63 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 64 | printk(KERN_CONT "%s%s", k?"/":"", |
NeilBrown | 84707f3 | 2010-03-16 17:23:35 +1100 | [diff] [blame] | 65 | bdevname(conf->devlist[j*raid_disks |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 66 | + k]->bdev, b)); |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 67 | printk(KERN_CONT "]\n"); |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 68 | |
| 69 | zone_size = conf->strip_zone[j].zone_end - zone_start; |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 70 | printk(KERN_INFO " zone-offset=%10lluKB, " |
| 71 | "device-offset=%10lluKB, size=%10lluKB\n", |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 72 | (unsigned long long)zone_start>>1, |
| 73 | (unsigned long long)conf->strip_zone[j].dev_start>>1, |
| 74 | (unsigned long long)zone_size>>1); |
| 75 | zone_start = conf->strip_zone[j].zone_end; |
| 76 | } |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 77 | printk(KERN_INFO "\n"); |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 78 | } |
| 79 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 80 | static int create_strip_zones(struct mddev *mddev, raid0_conf_t **private_conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | { |
NeilBrown | a9f326e | 2009-09-23 18:06:41 +1000 | [diff] [blame] | 82 | int i, c, err; |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 83 | sector_t curr_zone_end, sectors; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 84 | struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | struct strip_zone *zone; |
| 86 | int cnt; |
| 87 | char b[BDEVNAME_SIZE]; |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 88 | char b2[BDEVNAME_SIZE]; |
Andre Noll | ed7b003 | 2009-06-16 16:47:36 +1000 | [diff] [blame] | 89 | raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL); |
| 90 | |
| 91 | if (!conf) |
| 92 | return -ENOMEM; |
Cheng Renquan | 159ec1f | 2009-01-09 08:31:08 +1100 | [diff] [blame] | 93 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 94 | pr_debug("md/raid0:%s: looking at %s\n", |
| 95 | mdname(mddev), |
| 96 | bdevname(rdev1->bdev, b)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | c = 0; |
NeilBrown | 13f2682 | 2009-06-18 08:48:55 +1000 | [diff] [blame] | 98 | |
| 99 | /* round size to chunk_size */ |
| 100 | sectors = rdev1->sectors; |
| 101 | sector_div(sectors, mddev->chunk_sectors); |
| 102 | rdev1->sectors = sectors * mddev->chunk_sectors; |
| 103 | |
Cheng Renquan | 159ec1f | 2009-01-09 08:31:08 +1100 | [diff] [blame] | 104 | list_for_each_entry(rdev2, &mddev->disks, same_set) { |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 105 | pr_debug("md/raid0:%s: comparing %s(%llu)" |
| 106 | " with %s(%llu)\n", |
| 107 | mdname(mddev), |
| 108 | bdevname(rdev1->bdev,b), |
| 109 | (unsigned long long)rdev1->sectors, |
| 110 | bdevname(rdev2->bdev,b2), |
| 111 | (unsigned long long)rdev2->sectors); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | if (rdev2 == rdev1) { |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 113 | pr_debug("md/raid0:%s: END\n", |
| 114 | mdname(mddev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | break; |
| 116 | } |
Andre Noll | dd8ac33 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 117 | if (rdev2->sectors == rdev1->sectors) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | /* |
| 119 | * Not unique, don't count it as a new |
| 120 | * group |
| 121 | */ |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 122 | pr_debug("md/raid0:%s: EQUAL\n", |
| 123 | mdname(mddev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | c = 1; |
| 125 | break; |
| 126 | } |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 127 | pr_debug("md/raid0:%s: NOT EQUAL\n", |
| 128 | mdname(mddev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | } |
| 130 | if (!c) { |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 131 | pr_debug("md/raid0:%s: ==> UNIQUE\n", |
| 132 | mdname(mddev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | conf->nr_strip_zones++; |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 134 | pr_debug("md/raid0:%s: %d zones\n", |
| 135 | mdname(mddev), conf->nr_strip_zones); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | } |
| 137 | } |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 138 | pr_debug("md/raid0:%s: FINAL %d zones\n", |
| 139 | mdname(mddev), conf->nr_strip_zones); |
Andre Noll | ed7b003 | 2009-06-16 16:47:36 +1000 | [diff] [blame] | 140 | err = -ENOMEM; |
NeilBrown | 9ffae0c | 2006-01-06 00:20:32 -0800 | [diff] [blame] | 141 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | conf->nr_strip_zones, GFP_KERNEL); |
| 143 | if (!conf->strip_zone) |
Andre Noll | ed7b003 | 2009-06-16 16:47:36 +1000 | [diff] [blame] | 144 | goto abort; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 145 | conf->devlist = kzalloc(sizeof(struct md_rdev*)* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | conf->nr_strip_zones*mddev->raid_disks, |
| 147 | GFP_KERNEL); |
| 148 | if (!conf->devlist) |
Andre Noll | ed7b003 | 2009-06-16 16:47:36 +1000 | [diff] [blame] | 149 | goto abort; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | /* The first zone must contain all devices, so here we check that |
| 152 | * there is a proper alignment of slots to devices and find them all |
| 153 | */ |
| 154 | zone = &conf->strip_zone[0]; |
| 155 | cnt = 0; |
| 156 | smallest = NULL; |
NeilBrown | b414579 | 2009-06-16 16:50:52 +1000 | [diff] [blame] | 157 | dev = conf->devlist; |
Andre Noll | ed7b003 | 2009-06-16 16:47:36 +1000 | [diff] [blame] | 158 | err = -EINVAL; |
Cheng Renquan | 159ec1f | 2009-01-09 08:31:08 +1100 | [diff] [blame] | 159 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | int j = rdev1->raid_disk; |
| 161 | |
NeilBrown | e93f68a | 2010-06-15 09:36:03 +0100 | [diff] [blame] | 162 | if (mddev->level == 10) { |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 163 | /* taking over a raid10-n2 array */ |
| 164 | j /= 2; |
NeilBrown | e93f68a | 2010-06-15 09:36:03 +0100 | [diff] [blame] | 165 | rdev1->new_raid_disk = j; |
| 166 | } |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 167 | |
Krzysztof Wojcik | fc3a08b | 2011-01-31 13:47:13 +1100 | [diff] [blame] | 168 | if (mddev->level == 1) { |
| 169 | /* taiking over a raid1 array- |
| 170 | * we have only one active disk |
| 171 | */ |
| 172 | j = 0; |
| 173 | rdev1->new_raid_disk = j; |
| 174 | } |
| 175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | if (j < 0 || j >= mddev->raid_disks) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 177 | printk(KERN_ERR "md/raid0:%s: bad disk number %d - " |
| 178 | "aborting!\n", mdname(mddev), j); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | goto abort; |
| 180 | } |
NeilBrown | b414579 | 2009-06-16 16:50:52 +1000 | [diff] [blame] | 181 | if (dev[j]) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 182 | printk(KERN_ERR "md/raid0:%s: multiple devices for %d - " |
| 183 | "aborting!\n", mdname(mddev), j); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | goto abort; |
| 185 | } |
NeilBrown | b414579 | 2009-06-16 16:50:52 +1000 | [diff] [blame] | 186 | dev[j] = rdev1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | |
Martin K. Petersen | 8f6c2e4 | 2009-07-01 11:13:45 +1000 | [diff] [blame] | 188 | disk_stack_limits(mddev->gendisk, rdev1->bdev, |
| 189 | rdev1->data_offset << 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | /* as we don't honour merge_bvec_fn, we must never risk |
NeilBrown | 627a2d3 | 2010-03-08 16:44:38 +1100 | [diff] [blame] | 191 | * violating it, so limit ->max_segments to 1, lying within |
| 192 | * a single page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | */ |
| 194 | |
NeilBrown | 627a2d3 | 2010-03-08 16:44:38 +1100 | [diff] [blame] | 195 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) { |
| 196 | blk_queue_max_segments(mddev->queue, 1); |
| 197 | blk_queue_segment_boundary(mddev->queue, |
| 198 | PAGE_CACHE_SIZE - 1); |
| 199 | } |
Andre Noll | dd8ac33 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 200 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | smallest = rdev1; |
| 202 | cnt++; |
| 203 | } |
| 204 | if (cnt != mddev->raid_disks) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 205 | printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " |
| 206 | "aborting!\n", mdname(mddev), cnt, mddev->raid_disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | goto abort; |
| 208 | } |
| 209 | zone->nb_dev = cnt; |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 210 | zone->zone_end = smallest->sectors * cnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 212 | curr_zone_end = zone->zone_end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | |
| 214 | /* now do the other zones */ |
| 215 | for (i = 1; i < conf->nr_strip_zones; i++) |
| 216 | { |
NeilBrown | a9f326e | 2009-09-23 18:06:41 +1000 | [diff] [blame] | 217 | int j; |
| 218 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | zone = conf->strip_zone + i; |
NeilBrown | b414579 | 2009-06-16 16:50:52 +1000 | [diff] [blame] | 220 | dev = conf->devlist + i * mddev->raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 222 | pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); |
NeilBrown | d27a43ab | 2009-06-16 16:46:46 +1000 | [diff] [blame] | 223 | zone->dev_start = smallest->sectors; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | smallest = NULL; |
| 225 | c = 0; |
| 226 | |
| 227 | for (j=0; j<cnt; j++) { |
NeilBrown | b414579 | 2009-06-16 16:50:52 +1000 | [diff] [blame] | 228 | rdev = conf->devlist[j]; |
NeilBrown | d27a43ab | 2009-06-16 16:46:46 +1000 | [diff] [blame] | 229 | if (rdev->sectors <= zone->dev_start) { |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 230 | pr_debug("md/raid0:%s: checking %s ... nope\n", |
| 231 | mdname(mddev), |
| 232 | bdevname(rdev->bdev, b)); |
Andre Noll | dd8ac33 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 233 | continue; |
| 234 | } |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 235 | pr_debug("md/raid0:%s: checking %s ..." |
| 236 | " contained as device %d\n", |
| 237 | mdname(mddev), |
| 238 | bdevname(rdev->bdev, b), c); |
NeilBrown | b414579 | 2009-06-16 16:50:52 +1000 | [diff] [blame] | 239 | dev[c] = rdev; |
Andre Noll | dd8ac33 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 240 | c++; |
| 241 | if (!smallest || rdev->sectors < smallest->sectors) { |
| 242 | smallest = rdev; |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 243 | pr_debug("md/raid0:%s: (%llu) is smallest!.\n", |
| 244 | mdname(mddev), |
| 245 | (unsigned long long)rdev->sectors); |
Andre Noll | dd8ac33 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 246 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | zone->nb_dev = c; |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 250 | sectors = (smallest->sectors - zone->dev_start) * c; |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 251 | pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", |
| 252 | mdname(mddev), |
| 253 | zone->nb_dev, (unsigned long long)sectors); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 255 | curr_zone_end += sectors; |
NeilBrown | d27a43ab | 2009-06-16 16:46:46 +1000 | [diff] [blame] | 256 | zone->zone_end = curr_zone_end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 258 | pr_debug("md/raid0:%s: current zone start: %llu\n", |
| 259 | mdname(mddev), |
| 260 | (unsigned long long)smallest->sectors); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | } |
NeilBrown | 26be34d | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 262 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; |
| 263 | mddev->queue->backing_dev_info.congested_data = mddev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
raz ben yehuda | 92e59b6 | 2009-06-16 17:00:57 +1000 | [diff] [blame] | 265 | /* |
| 266 | * now since we have the hard sector sizes, we can make sure |
| 267 | * chunk size is a multiple of that sector size |
| 268 | */ |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 269 | if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 270 | printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n", |
raz ben yehuda | 92e59b6 | 2009-06-16 17:00:57 +1000 | [diff] [blame] | 271 | mdname(mddev), |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 272 | mddev->chunk_sectors << 9); |
raz ben yehuda | 92e59b6 | 2009-06-16 17:00:57 +1000 | [diff] [blame] | 273 | goto abort; |
| 274 | } |
Martin K. Petersen | 8f6c2e4 | 2009-07-01 11:13:45 +1000 | [diff] [blame] | 275 | |
| 276 | blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); |
| 277 | blk_queue_io_opt(mddev->queue, |
| 278 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
| 279 | |
NeilBrown | 50de8df | 2011-10-07 14:23:22 +1100 | [diff] [blame] | 280 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 281 | *private_conf = conf; |
| 282 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | return 0; |
Andre Noll | 5568a60 | 2009-06-16 16:47:21 +1000 | [diff] [blame] | 284 | abort: |
Andre Noll | ed7b003 | 2009-06-16 16:47:36 +1000 | [diff] [blame] | 285 | kfree(conf->strip_zone); |
| 286 | kfree(conf->devlist); |
| 287 | kfree(conf); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 288 | *private_conf = NULL; |
Andre Noll | ed7b003 | 2009-06-16 16:47:36 +1000 | [diff] [blame] | 289 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | /** |
| 293 | * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged |
| 294 | * @q: request queue |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 295 | * @bvm: properties of new bio |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | * @biovec: the request that could be merged to it. |
| 297 | * |
| 298 | * Return amount of bytes we can accept at this offset |
| 299 | */ |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 300 | static int raid0_mergeable_bvec(struct request_queue *q, |
| 301 | struct bvec_merge_data *bvm, |
| 302 | struct bio_vec *biovec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | { |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 304 | struct mddev *mddev = q->queuedata; |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 305 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | int max; |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 307 | unsigned int chunk_sectors = mddev->chunk_sectors; |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 308 | unsigned int bio_sectors = bvm->bi_size >> 9; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | |
NeilBrown | d6e412e | 2009-06-18 08:47:00 +1000 | [diff] [blame] | 310 | if (is_power_of_2(chunk_sectors)) |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 311 | max = (chunk_sectors - ((sector & (chunk_sectors-1)) |
| 312 | + bio_sectors)) << 9; |
| 313 | else |
| 314 | max = (chunk_sectors - (sector_div(sector, chunk_sectors) |
| 315 | + bio_sectors)) << 9; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | if (max < 0) max = 0; /* bio_add cannot handle a negative return */ |
| 317 | if (max <= biovec->bv_len && bio_sectors == 0) |
| 318 | return biovec->bv_len; |
| 319 | else |
| 320 | return max; |
| 321 | } |
| 322 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 323 | static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 324 | { |
| 325 | sector_t array_sectors = 0; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 326 | struct md_rdev *rdev; |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 327 | |
| 328 | WARN_ONCE(sectors || raid_disks, |
| 329 | "%s does not support generic reshape\n", __func__); |
| 330 | |
| 331 | list_for_each_entry(rdev, &mddev->disks, same_set) |
| 332 | array_sectors += rdev->sectors; |
| 333 | |
| 334 | return array_sectors; |
| 335 | } |
| 336 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 337 | static int raid0_run(struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 339 | raid0_conf_t *conf; |
Andre Noll | 5568a60 | 2009-06-16 16:47:21 +1000 | [diff] [blame] | 340 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 342 | if (mddev->chunk_sectors == 0) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 343 | printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n", |
| 344 | mdname(mddev)); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 345 | return -EINVAL; |
| 346 | } |
Andre Noll | 0894cc3 | 2009-06-18 08:49:23 +1000 | [diff] [blame] | 347 | if (md_check_no_bitmap(mddev)) |
| 348 | return -EINVAL; |
Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 349 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 351 | /* if private is not null, we are here after takeover */ |
| 352 | if (mddev->private == NULL) { |
| 353 | ret = create_strip_zones(mddev, &conf); |
| 354 | if (ret < 0) |
| 355 | return ret; |
| 356 | mddev->private = conf; |
| 357 | } |
| 358 | conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | |
| 360 | /* calculate array device size */ |
Dan Williams | 1f40362 | 2009-03-31 14:59:03 +1100 | [diff] [blame] | 361 | md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 363 | printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", |
| 364 | mdname(mddev), |
| 365 | (unsigned long long)mddev->array_sectors); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | /* calculate the max read-ahead size. |
| 367 | * For read-ahead of large files to be effective, we need to |
| 368 | * readahead at least twice a whole stripe. i.e. number of devices |
| 369 | * multiplied by chunk size times 2. |
| 370 | * If an individual device has an ra_pages greater than the |
| 371 | * chunk size, then we will not drive that device as hard as it |
| 372 | * wants. We consider this a configuration error: a larger |
| 373 | * chunksize should be used in that case. |
| 374 | */ |
| 375 | { |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 376 | int stripe = mddev->raid_disks * |
| 377 | (mddev->chunk_sectors << 9) / PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) |
| 379 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; |
| 380 | } |
| 381 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); |
raz ben yehuda | 4699419 | 2009-06-16 17:00:54 +1000 | [diff] [blame] | 383 | dump_zones(mddev); |
Martin K. Petersen | a91a278 | 2011-03-17 11:11:05 +0100 | [diff] [blame] | 384 | return md_integrity_register(mddev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | } |
| 386 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 387 | static int raid0_stop(struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | { |
NeilBrown | 070ec55 | 2009-06-16 16:54:21 +1000 | [diff] [blame] | 389 | raid0_conf_t *conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | |
| 391 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
Jesper Juhl | 990a8ba | 2005-06-21 17:17:30 -0700 | [diff] [blame] | 392 | kfree(conf->strip_zone); |
Andre Noll | fb5ab4b | 2009-06-16 16:48:19 +1000 | [diff] [blame] | 393 | kfree(conf->devlist); |
Jesper Juhl | 990a8ba | 2005-06-21 17:17:30 -0700 | [diff] [blame] | 394 | kfree(conf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | mddev->private = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | return 0; |
| 397 | } |
| 398 | |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 399 | /* Find the zone which holds a particular offset |
| 400 | * Update *sectorp to be an offset in that zone |
| 401 | */ |
Andre Noll | dc58266 | 2009-06-16 16:18:43 +1000 | [diff] [blame] | 402 | static struct strip_zone *find_zone(struct raid0_private_data *conf, |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 403 | sector_t *sectorp) |
Andre Noll | dc58266 | 2009-06-16 16:18:43 +1000 | [diff] [blame] | 404 | { |
| 405 | int i; |
| 406 | struct strip_zone *z = conf->strip_zone; |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 407 | sector_t sector = *sectorp; |
Andre Noll | dc58266 | 2009-06-16 16:18:43 +1000 | [diff] [blame] | 408 | |
| 409 | for (i = 0; i < conf->nr_strip_zones; i++) |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 410 | if (sector < z[i].zone_end) { |
| 411 | if (i) |
| 412 | *sectorp = sector - z[i-1].zone_end; |
Andre Noll | dc58266 | 2009-06-16 16:18:43 +1000 | [diff] [blame] | 413 | return z + i; |
NeilBrown | 49f357a2 | 2009-06-16 16:50:35 +1000 | [diff] [blame] | 414 | } |
Andre Noll | dc58266 | 2009-06-16 16:18:43 +1000 | [diff] [blame] | 415 | BUG(); |
| 416 | } |
| 417 | |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 418 | /* |
| 419 | * remaps the bio to the target device. we separate two flows. |
| 420 | * power 2 flow and a general flow for the sake of perfromance |
| 421 | */ |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 422 | static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 423 | sector_t sector, sector_t *sector_offset) |
| 424 | { |
| 425 | unsigned int sect_in_chunk; |
| 426 | sector_t chunk; |
| 427 | raid0_conf_t *conf = mddev->private; |
NeilBrown | 84707f3 | 2010-03-16 17:23:35 +1100 | [diff] [blame] | 428 | int raid_disks = conf->strip_zone[0].nb_dev; |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 429 | unsigned int chunk_sects = mddev->chunk_sectors; |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 430 | |
NeilBrown | d6e412e | 2009-06-18 08:47:00 +1000 | [diff] [blame] | 431 | if (is_power_of_2(chunk_sects)) { |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 432 | int chunksect_bits = ffz(~chunk_sects); |
| 433 | /* find the sector offset inside the chunk */ |
| 434 | sect_in_chunk = sector & (chunk_sects - 1); |
| 435 | sector >>= chunksect_bits; |
| 436 | /* chunk in zone */ |
| 437 | chunk = *sector_offset; |
| 438 | /* quotient is the chunk in real device*/ |
| 439 | sector_div(chunk, zone->nb_dev << chunksect_bits); |
| 440 | } else{ |
| 441 | sect_in_chunk = sector_div(sector, chunk_sects); |
| 442 | chunk = *sector_offset; |
| 443 | sector_div(chunk, chunk_sects * zone->nb_dev); |
| 444 | } |
| 445 | /* |
| 446 | * position the bio over the real device |
| 447 | * real sector = chunk in device + starting of zone |
| 448 | * + the position in the chunk |
| 449 | */ |
| 450 | *sector_offset = (chunk * chunk_sects) + sect_in_chunk; |
NeilBrown | 84707f3 | 2010-03-16 17:23:35 +1100 | [diff] [blame] | 451 | return conf->devlist[(zone - conf->strip_zone)*raid_disks |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 452 | + sector_div(sector, zone->nb_dev)]; |
| 453 | } |
| 454 | |
| 455 | /* |
| 456 | * Is io distribute over 1 or more chunks ? |
| 457 | */ |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 458 | static inline int is_io_in_chunk_boundary(struct mddev *mddev, |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 459 | unsigned int chunk_sects, struct bio *bio) |
| 460 | { |
NeilBrown | d6e412e | 2009-06-18 08:47:00 +1000 | [diff] [blame] | 461 | if (likely(is_power_of_2(chunk_sects))) { |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 462 | return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) |
| 463 | + (bio->bi_size >> 9)); |
| 464 | } else{ |
| 465 | sector_t sector = bio->bi_sector; |
| 466 | return chunk_sects >= (sector_div(sector, chunk_sects) |
| 467 | + (bio->bi_size >> 9)); |
| 468 | } |
| 469 | } |
| 470 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 471 | static int raid0_make_request(struct mddev *mddev, struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | { |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 473 | unsigned int chunk_sects; |
| 474 | sector_t sector_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | struct strip_zone *zone; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 476 | struct md_rdev *tmp_dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 478 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
| 479 | md_flush_request(mddev, bio); |
NeilBrown | e5dcdd8 | 2005-09-09 16:23:41 -0700 | [diff] [blame] | 480 | return 0; |
| 481 | } |
| 482 | |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 483 | chunk_sects = mddev->chunk_sectors; |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 484 | if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { |
| 485 | sector_t sector = bio->bi_sector; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | struct bio_pair *bp; |
| 487 | /* Sanity check -- queue functions should prevent this happening */ |
| 488 | if (bio->bi_vcnt != 1 || |
| 489 | bio->bi_idx != 0) |
| 490 | goto bad_map; |
| 491 | /* This is a one page bio that upper layers |
| 492 | * refuse to split for us, so we need to split it. |
| 493 | */ |
NeilBrown | d6e412e | 2009-06-18 08:47:00 +1000 | [diff] [blame] | 494 | if (likely(is_power_of_2(chunk_sects))) |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 495 | bp = bio_split(bio, chunk_sects - (sector & |
| 496 | (chunk_sects-1))); |
| 497 | else |
| 498 | bp = bio_split(bio, chunk_sects - |
| 499 | sector_div(sector, chunk_sects)); |
NeilBrown | 21a52c6 | 2010-04-01 15:02:13 +1100 | [diff] [blame] | 500 | if (raid0_make_request(mddev, &bp->bio1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | generic_make_request(&bp->bio1); |
NeilBrown | 21a52c6 | 2010-04-01 15:02:13 +1100 | [diff] [blame] | 502 | if (raid0_make_request(mddev, &bp->bio2)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | generic_make_request(&bp->bio2); |
| 504 | |
| 505 | bio_pair_release(bp); |
| 506 | return 0; |
| 507 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 509 | sector_offset = bio->bi_sector; |
| 510 | zone = find_zone(mddev->private, §or_offset); |
| 511 | tmp_dev = map_sector(mddev, zone, bio->bi_sector, |
| 512 | §or_offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | bio->bi_bdev = tmp_dev->bdev; |
raz ben yehuda | fbb704e | 2009-06-16 17:02:05 +1000 | [diff] [blame] | 514 | bio->bi_sector = sector_offset + zone->dev_start + |
| 515 | tmp_dev->data_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | /* |
| 517 | * Let the main block layer submit the IO and resolve recursion: |
| 518 | */ |
| 519 | return 1; |
| 520 | |
| 521 | bad_map: |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 522 | printk("md/raid0:%s: make_request bug: can't convert block across chunks" |
| 523 | " or bigger than %dk %llu %d\n", |
| 524 | mdname(mddev), chunk_sects / 2, |
| 525 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 527 | bio_io_error(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | return 0; |
| 529 | } |
NeilBrown | 8299d7f | 2007-10-16 23:30:53 -0700 | [diff] [blame] | 530 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 531 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | { |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 533 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | return; |
| 535 | } |
| 536 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 537 | static void *raid0_takeover_raid45(struct mddev *mddev) |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 538 | { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 539 | struct md_rdev *rdev; |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 540 | raid0_conf_t *priv_conf; |
| 541 | |
| 542 | if (mddev->degraded != 1) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 543 | printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", |
| 544 | mdname(mddev), |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 545 | mddev->degraded); |
| 546 | return ERR_PTR(-EINVAL); |
| 547 | } |
| 548 | |
| 549 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
| 550 | /* check slot number for a disk */ |
| 551 | if (rdev->raid_disk == mddev->raid_disks-1) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 552 | printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", |
| 553 | mdname(mddev)); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 554 | return ERR_PTR(-EINVAL); |
| 555 | } |
| 556 | } |
| 557 | |
| 558 | /* Set new parameters */ |
| 559 | mddev->new_level = 0; |
Maciej Trela | 001048a | 2010-06-16 11:55:14 +0100 | [diff] [blame] | 560 | mddev->new_layout = 0; |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 561 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
| 562 | mddev->raid_disks--; |
| 563 | mddev->delta_disks = -1; |
| 564 | /* make sure it will be not marked as dirty */ |
| 565 | mddev->recovery_cp = MaxSector; |
| 566 | |
| 567 | create_strip_zones(mddev, &priv_conf); |
| 568 | return priv_conf; |
| 569 | } |
| 570 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 571 | static void *raid0_takeover_raid10(struct mddev *mddev) |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 572 | { |
| 573 | raid0_conf_t *priv_conf; |
| 574 | |
| 575 | /* Check layout: |
| 576 | * - far_copies must be 1 |
| 577 | * - near_copies must be 2 |
| 578 | * - disks number must be even |
| 579 | * - all mirrors must be already degraded |
| 580 | */ |
| 581 | if (mddev->layout != ((1 << 8) + 2)) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 582 | printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n", |
| 583 | mdname(mddev), |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 584 | mddev->layout); |
| 585 | return ERR_PTR(-EINVAL); |
| 586 | } |
| 587 | if (mddev->raid_disks & 1) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 588 | printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n", |
| 589 | mdname(mddev)); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 590 | return ERR_PTR(-EINVAL); |
| 591 | } |
| 592 | if (mddev->degraded != (mddev->raid_disks>>1)) { |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 593 | printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n", |
| 594 | mdname(mddev)); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 595 | return ERR_PTR(-EINVAL); |
| 596 | } |
| 597 | |
| 598 | /* Set new parameters */ |
| 599 | mddev->new_level = 0; |
Maciej Trela | 001048a | 2010-06-16 11:55:14 +0100 | [diff] [blame] | 600 | mddev->new_layout = 0; |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 601 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
| 602 | mddev->delta_disks = - mddev->raid_disks / 2; |
| 603 | mddev->raid_disks += mddev->delta_disks; |
| 604 | mddev->degraded = 0; |
| 605 | /* make sure it will be not marked as dirty */ |
| 606 | mddev->recovery_cp = MaxSector; |
| 607 | |
| 608 | create_strip_zones(mddev, &priv_conf); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 609 | return priv_conf; |
| 610 | } |
| 611 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 612 | static void *raid0_takeover_raid1(struct mddev *mddev) |
Krzysztof Wojcik | fc3a08b | 2011-01-31 13:47:13 +1100 | [diff] [blame] | 613 | { |
| 614 | raid0_conf_t *priv_conf; |
| 615 | |
| 616 | /* Check layout: |
| 617 | * - (N - 1) mirror drives must be already faulty |
| 618 | */ |
| 619 | if ((mddev->raid_disks - 1) != mddev->degraded) { |
| 620 | printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", |
| 621 | mdname(mddev)); |
| 622 | return ERR_PTR(-EINVAL); |
| 623 | } |
| 624 | |
| 625 | /* Set new parameters */ |
| 626 | mddev->new_level = 0; |
| 627 | mddev->new_layout = 0; |
| 628 | mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ |
| 629 | mddev->delta_disks = 1 - mddev->raid_disks; |
Krzysztof Wojcik | f7bee80 | 2011-02-14 10:01:41 +1100 | [diff] [blame] | 630 | mddev->raid_disks = 1; |
Krzysztof Wojcik | fc3a08b | 2011-01-31 13:47:13 +1100 | [diff] [blame] | 631 | /* make sure it will be not marked as dirty */ |
| 632 | mddev->recovery_cp = MaxSector; |
| 633 | |
| 634 | create_strip_zones(mddev, &priv_conf); |
| 635 | return priv_conf; |
| 636 | } |
| 637 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 638 | static void *raid0_takeover(struct mddev *mddev) |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 639 | { |
| 640 | /* raid0 can take over: |
Maciej Trela | 049d6c1 | 2010-06-16 11:56:12 +0100 | [diff] [blame] | 641 | * raid4 - if all data disks are active. |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 642 | * raid5 - providing it is Raid4 layout and one disk is faulty |
| 643 | * raid10 - assuming we have all necessary active disks |
Krzysztof Wojcik | fc3a08b | 2011-01-31 13:47:13 +1100 | [diff] [blame] | 644 | * raid1 - with (N -1) mirror drives faulty |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 645 | */ |
Maciej Trela | 049d6c1 | 2010-06-16 11:56:12 +0100 | [diff] [blame] | 646 | if (mddev->level == 4) |
| 647 | return raid0_takeover_raid45(mddev); |
| 648 | |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 649 | if (mddev->level == 5) { |
| 650 | if (mddev->layout == ALGORITHM_PARITY_N) |
Maciej Trela | 049d6c1 | 2010-06-16 11:56:12 +0100 | [diff] [blame] | 651 | return raid0_takeover_raid45(mddev); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 652 | |
NeilBrown | b5a2096 | 2010-05-03 15:06:27 +1000 | [diff] [blame] | 653 | printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", |
| 654 | mdname(mddev), ALGORITHM_PARITY_N); |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 655 | } |
| 656 | |
| 657 | if (mddev->level == 10) |
| 658 | return raid0_takeover_raid10(mddev); |
| 659 | |
Krzysztof Wojcik | fc3a08b | 2011-01-31 13:47:13 +1100 | [diff] [blame] | 660 | if (mddev->level == 1) |
| 661 | return raid0_takeover_raid1(mddev); |
| 662 | |
| 663 | printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n", |
| 664 | mddev->level); |
| 665 | |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 666 | return ERR_PTR(-EINVAL); |
| 667 | } |
| 668 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame^] | 669 | static void raid0_quiesce(struct mddev *mddev, int state) |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 670 | { |
| 671 | } |
| 672 | |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 673 | static struct mdk_personality raid0_personality= |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | { |
| 675 | .name = "raid0", |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 676 | .level = 0, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | .owner = THIS_MODULE, |
| 678 | .make_request = raid0_make_request, |
| 679 | .run = raid0_run, |
| 680 | .stop = raid0_stop, |
| 681 | .status = raid0_status, |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 682 | .size = raid0_size, |
Trela, Maciej | 9af204c | 2010-03-08 16:02:44 +1100 | [diff] [blame] | 683 | .takeover = raid0_takeover, |
| 684 | .quiesce = raid0_quiesce, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | }; |
| 686 | |
| 687 | static int __init raid0_init (void) |
| 688 | { |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 689 | return register_md_personality (&raid0_personality); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | } |
| 691 | |
| 692 | static void raid0_exit (void) |
| 693 | { |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 694 | unregister_md_personality (&raid0_personality); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | module_init(raid0_init); |
| 698 | module_exit(raid0_exit); |
| 699 | MODULE_LICENSE("GPL"); |
NeilBrown | 0efb9e6 | 2009-12-14 12:49:58 +1100 | [diff] [blame] | 700 | MODULE_DESCRIPTION("RAID0 (striping) personality for MD"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | MODULE_ALIAS("md-personality-2"); /* RAID0 */ |
NeilBrown | d9d166c | 2006-01-06 00:20:51 -0800 | [diff] [blame] | 702 | MODULE_ALIAS("md-raid0"); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 703 | MODULE_ALIAS("md-level-0"); |