blob: 5ecba9eef441fb3ddf0d6929a91a894553e2363a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 raid0.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10003 Copyright (C) 1994-96 Marc ZYNGIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
NeilBrownf72ffdd2014-09-30 14:23:59 +10006 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
8 RAID-0 management functions.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
NeilBrownf72ffdd2014-09-30 14:23:59 +100014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License
16 (for example /usr/src/linux/COPYING); if not, write to the Free
NeilBrownf72ffdd2014-09-30 14:23:59 +100017 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070018*/
19
NeilBrownbff61972009-03-31 14:33:13 +110020#include <linux/blkdev.h>
NeilBrownbff61972009-03-31 14:33:13 +110021#include <linux/seq_file.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040022#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
NeilBrown109e3762016-11-18 13:22:04 +110024#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110025#include "md.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110026#include "raid0.h"
Trela, Maciej9af204c2010-03-08 16:02:44 +110027#include "raid5.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Shaohua Li394ed8e2017-01-04 16:10:19 -080029#define UNSUPPORTED_MDDEV_FLAGS \
30 ((1L << MD_HAS_JOURNAL) | \
31 (1L << MD_JOURNAL_CLEAN) | \
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +010032 (1L << MD_FAILFAST_SUPPORTED) |\
Pawel Baldysiakddc08822017-08-16 17:13:45 +020033 (1L << MD_HAS_PPL) | \
34 (1L << MD_HAS_MULTIPLE_PPLS))
Shaohua Li394ed8e2017-01-04 16:10:19 -080035
NeilBrown5c675f82014-12-15 12:56:56 +110036static int raid0_congested(struct mddev *mddev, int bits)
NeilBrown26be34d2006-10-03 01:15:53 -070037{
NeilBrowne373ab12011-10-11 16:48:59 +110038 struct r0conf *conf = mddev->private;
NeilBrown3cb03002011-10-11 16:45:26 +110039 struct md_rdev **devlist = conf->devlist;
NeilBrown84707f32010-03-16 17:23:35 +110040 int raid_disks = conf->strip_zone[0].nb_dev;
NeilBrown26be34d2006-10-03 01:15:53 -070041 int i, ret = 0;
42
NeilBrown84707f32010-03-16 17:23:35 +110043 for (i = 0; i < raid_disks && !ret ; i++) {
Jens Axboe165125e2007-07-24 09:28:11 +020044 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
NeilBrown26be34d2006-10-03 01:15:53 -070045
Jan Karadc3b17c2017-02-02 15:56:50 +010046 ret |= bdi_congested(q->backing_dev_info, bits);
NeilBrown26be34d2006-10-03 01:15:53 -070047 }
48 return ret;
49}
50
raz ben yehuda46994192009-06-16 17:00:54 +100051/*
52 * inform the user of the raid configuration
53*/
NeilBrownfd01b882011-10-11 16:47:53 +110054static void dump_zones(struct mddev *mddev)
raz ben yehuda46994192009-06-16 17:00:54 +100055{
NeilBrown50de8df2011-10-07 14:23:22 +110056 int j, k;
raz ben yehuda46994192009-06-16 17:00:54 +100057 sector_t zone_size = 0;
58 sector_t zone_start = 0;
59 char b[BDEVNAME_SIZE];
NeilBrowne373ab12011-10-11 16:48:59 +110060 struct r0conf *conf = mddev->private;
NeilBrown84707f32010-03-16 17:23:35 +110061 int raid_disks = conf->strip_zone[0].nb_dev;
NeilBrown76603882016-11-02 14:16:50 +110062 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
63 mdname(mddev),
64 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
raz ben yehuda46994192009-06-16 17:00:54 +100065 for (j = 0; j < conf->nr_strip_zones; j++) {
NeilBrown76603882016-11-02 14:16:50 +110066 char line[200];
67 int len = 0;
68
raz ben yehuda46994192009-06-16 17:00:54 +100069 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
NeilBrown76603882016-11-02 14:16:50 +110070 len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
71 bdevname(conf->devlist[j*raid_disks
72 + k]->bdev, b));
73 pr_debug("md: zone%d=[%s]\n", j, line);
raz ben yehuda46994192009-06-16 17:00:54 +100074
75 zone_size = conf->strip_zone[j].zone_end - zone_start;
NeilBrown76603882016-11-02 14:16:50 +110076 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
raz ben yehuda46994192009-06-16 17:00:54 +100077 (unsigned long long)zone_start>>1,
78 (unsigned long long)conf->strip_zone[j].dev_start>>1,
79 (unsigned long long)zone_size>>1);
80 zone_start = conf->strip_zone[j].zone_end;
81 }
raz ben yehuda46994192009-06-16 17:00:54 +100082}
83
NeilBrowne373ab12011-10-11 16:48:59 +110084static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085{
NeilBrowna9f326e2009-09-23 18:06:41 +100086 int i, c, err;
NeilBrown49f357a22009-06-16 16:50:35 +100087 sector_t curr_zone_end, sectors;
NeilBrown3cb03002011-10-11 16:45:26 +110088 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 struct strip_zone *zone;
90 int cnt;
91 char b[BDEVNAME_SIZE];
NeilBrown50de8df2011-10-07 14:23:22 +110092 char b2[BDEVNAME_SIZE];
NeilBrowne373ab12011-10-11 16:48:59 +110093 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
NeilBrown199dc6e2015-08-03 13:11:47 +100094 unsigned short blksize = 512;
Andre Nolled7b0032009-06-16 16:47:36 +100095
Dan Carpenter7dedd152016-04-14 12:31:49 +030096 *private_conf = ERR_PTR(-ENOMEM);
Andre Nolled7b0032009-06-16 16:47:36 +100097 if (!conf)
98 return -ENOMEM;
NeilBrowndafb20f2012-03-19 12:46:39 +110099 rdev_for_each(rdev1, mddev) {
NeilBrown50de8df2011-10-07 14:23:22 +1100100 pr_debug("md/raid0:%s: looking at %s\n",
101 mdname(mddev),
102 bdevname(rdev1->bdev, b));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 c = 0;
NeilBrown13f26822009-06-18 08:48:55 +1000104
105 /* round size to chunk_size */
106 sectors = rdev1->sectors;
107 sector_div(sectors, mddev->chunk_sectors);
108 rdev1->sectors = sectors * mddev->chunk_sectors;
109
NeilBrown199dc6e2015-08-03 13:11:47 +1000110 blksize = max(blksize, queue_logical_block_size(
111 rdev1->bdev->bd_disk->queue));
112
NeilBrowndafb20f2012-03-19 12:46:39 +1100113 rdev_for_each(rdev2, mddev) {
NeilBrown50de8df2011-10-07 14:23:22 +1100114 pr_debug("md/raid0:%s: comparing %s(%llu)"
115 " with %s(%llu)\n",
116 mdname(mddev),
117 bdevname(rdev1->bdev,b),
118 (unsigned long long)rdev1->sectors,
119 bdevname(rdev2->bdev,b2),
120 (unsigned long long)rdev2->sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 if (rdev2 == rdev1) {
NeilBrown50de8df2011-10-07 14:23:22 +1100122 pr_debug("md/raid0:%s: END\n",
123 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 break;
125 }
Andre Nolldd8ac332009-03-31 14:33:13 +1100126 if (rdev2->sectors == rdev1->sectors) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 /*
128 * Not unique, don't count it as a new
129 * group
130 */
NeilBrown50de8df2011-10-07 14:23:22 +1100131 pr_debug("md/raid0:%s: EQUAL\n",
132 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 c = 1;
134 break;
135 }
NeilBrown50de8df2011-10-07 14:23:22 +1100136 pr_debug("md/raid0:%s: NOT EQUAL\n",
137 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 }
139 if (!c) {
NeilBrown50de8df2011-10-07 14:23:22 +1100140 pr_debug("md/raid0:%s: ==> UNIQUE\n",
141 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 conf->nr_strip_zones++;
NeilBrown50de8df2011-10-07 14:23:22 +1100143 pr_debug("md/raid0:%s: %d zones\n",
144 mdname(mddev), conf->nr_strip_zones);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 }
146 }
NeilBrown50de8df2011-10-07 14:23:22 +1100147 pr_debug("md/raid0:%s: FINAL %d zones\n",
148 mdname(mddev), conf->nr_strip_zones);
NeilBrown199dc6e2015-08-03 13:11:47 +1000149 /*
150 * now since we have the hard sector sizes, we can make sure
151 * chunk size is a multiple of that sector size
152 */
153 if ((mddev->chunk_sectors << 9) % blksize) {
NeilBrown76603882016-11-02 14:16:50 +1100154 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
155 mdname(mddev),
156 mddev->chunk_sectors << 9, blksize);
NeilBrown199dc6e2015-08-03 13:11:47 +1000157 err = -EINVAL;
158 goto abort;
159 }
160
Andre Nolled7b0032009-06-16 16:47:36 +1000161 err = -ENOMEM;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800162 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 conf->nr_strip_zones, GFP_KERNEL);
164 if (!conf->strip_zone)
Andre Nolled7b0032009-06-16 16:47:36 +1000165 goto abort;
NeilBrown3cb03002011-10-11 16:45:26 +1100166 conf->devlist = kzalloc(sizeof(struct md_rdev*)*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 conf->nr_strip_zones*mddev->raid_disks,
168 GFP_KERNEL);
169 if (!conf->devlist)
Andre Nolled7b0032009-06-16 16:47:36 +1000170 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 /* The first zone must contain all devices, so here we check that
173 * there is a proper alignment of slots to devices and find them all
174 */
175 zone = &conf->strip_zone[0];
176 cnt = 0;
177 smallest = NULL;
NeilBrownb4145792009-06-16 16:50:52 +1000178 dev = conf->devlist;
Andre Nolled7b0032009-06-16 16:47:36 +1000179 err = -EINVAL;
NeilBrowndafb20f2012-03-19 12:46:39 +1100180 rdev_for_each(rdev1, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 int j = rdev1->raid_disk;
182
NeilBrowne93f68a2010-06-15 09:36:03 +0100183 if (mddev->level == 10) {
Trela, Maciej9af204c2010-03-08 16:02:44 +1100184 /* taking over a raid10-n2 array */
185 j /= 2;
NeilBrowne93f68a2010-06-15 09:36:03 +0100186 rdev1->new_raid_disk = j;
187 }
Trela, Maciej9af204c2010-03-08 16:02:44 +1100188
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100189 if (mddev->level == 1) {
190 /* taiking over a raid1 array-
191 * we have only one active disk
192 */
193 j = 0;
194 rdev1->new_raid_disk = j;
195 }
196
NeilBrownf96c9f32013-02-21 15:50:07 +1100197 if (j < 0) {
NeilBrown76603882016-11-02 14:16:50 +1100198 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
199 mdname(mddev));
NeilBrownf96c9f32013-02-21 15:50:07 +1100200 goto abort;
201 }
202 if (j >= mddev->raid_disks) {
NeilBrown76603882016-11-02 14:16:50 +1100203 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
204 mdname(mddev), j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 goto abort;
206 }
NeilBrownb4145792009-06-16 16:50:52 +1000207 if (dev[j]) {
NeilBrown76603882016-11-02 14:16:50 +1100208 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
209 mdname(mddev), j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 goto abort;
211 }
NeilBrownb4145792009-06-16 16:50:52 +1000212 dev[j] = rdev1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Andre Nolldd8ac332009-03-31 14:33:13 +1100214 if (!smallest || (rdev1->sectors < smallest->sectors))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 smallest = rdev1;
216 cnt++;
217 }
218 if (cnt != mddev->raid_disks) {
NeilBrown76603882016-11-02 14:16:50 +1100219 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
220 mdname(mddev), cnt, mddev->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 goto abort;
222 }
223 zone->nb_dev = cnt;
NeilBrown49f357a22009-06-16 16:50:35 +1000224 zone->zone_end = smallest->sectors * cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
NeilBrown49f357a22009-06-16 16:50:35 +1000226 curr_zone_end = zone->zone_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* now do the other zones */
229 for (i = 1; i < conf->nr_strip_zones; i++)
230 {
NeilBrowna9f326e2009-09-23 18:06:41 +1000231 int j;
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 zone = conf->strip_zone + i;
NeilBrownb4145792009-06-16 16:50:52 +1000234 dev = conf->devlist + i * mddev->raid_disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
NeilBrown50de8df2011-10-07 14:23:22 +1100236 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
NeilBrownd27a43ab2009-06-16 16:46:46 +1000237 zone->dev_start = smallest->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 smallest = NULL;
239 c = 0;
240
241 for (j=0; j<cnt; j++) {
NeilBrownb4145792009-06-16 16:50:52 +1000242 rdev = conf->devlist[j];
NeilBrownd27a43ab2009-06-16 16:46:46 +1000243 if (rdev->sectors <= zone->dev_start) {
NeilBrown50de8df2011-10-07 14:23:22 +1100244 pr_debug("md/raid0:%s: checking %s ... nope\n",
245 mdname(mddev),
246 bdevname(rdev->bdev, b));
Andre Nolldd8ac332009-03-31 14:33:13 +1100247 continue;
248 }
NeilBrown50de8df2011-10-07 14:23:22 +1100249 pr_debug("md/raid0:%s: checking %s ..."
250 " contained as device %d\n",
251 mdname(mddev),
252 bdevname(rdev->bdev, b), c);
NeilBrownb4145792009-06-16 16:50:52 +1000253 dev[c] = rdev;
Andre Nolldd8ac332009-03-31 14:33:13 +1100254 c++;
255 if (!smallest || rdev->sectors < smallest->sectors) {
256 smallest = rdev;
NeilBrown50de8df2011-10-07 14:23:22 +1100257 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
258 mdname(mddev),
259 (unsigned long long)rdev->sectors);
Andre Nolldd8ac332009-03-31 14:33:13 +1100260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
262
263 zone->nb_dev = c;
NeilBrown49f357a22009-06-16 16:50:35 +1000264 sectors = (smallest->sectors - zone->dev_start) * c;
NeilBrown50de8df2011-10-07 14:23:22 +1100265 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
266 mdname(mddev),
267 zone->nb_dev, (unsigned long long)sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
NeilBrown49f357a22009-06-16 16:50:35 +1000269 curr_zone_end += sectors;
NeilBrownd27a43ab2009-06-16 16:46:46 +1000270 zone->zone_end = curr_zone_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
NeilBrown50de8df2011-10-07 14:23:22 +1100272 pr_debug("md/raid0:%s: current zone start: %llu\n",
273 mdname(mddev),
274 (unsigned long long)smallest->sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
NeilBrown50de8df2011-10-07 14:23:22 +1100277 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100278 *private_conf = conf;
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 return 0;
Andre Noll5568a602009-06-16 16:47:21 +1000281abort:
Andre Nolled7b0032009-06-16 16:47:36 +1000282 kfree(conf->strip_zone);
283 kfree(conf->devlist);
284 kfree(conf);
NeilBrown58ebb342013-02-21 15:36:38 +1100285 *private_conf = ERR_PTR(err);
Andre Nolled7b0032009-06-16 16:47:36 +1000286 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
NeilBrownba13da42012-03-19 12:46:39 +1100289/* Find the zone which holds a particular offset
290 * Update *sectorp to be an offset in that zone
291 */
292static struct strip_zone *find_zone(struct r0conf *conf,
293 sector_t *sectorp)
294{
295 int i;
296 struct strip_zone *z = conf->strip_zone;
297 sector_t sector = *sectorp;
298
299 for (i = 0; i < conf->nr_strip_zones; i++)
300 if (sector < z[i].zone_end) {
301 if (i)
302 *sectorp = sector - z[i-1].zone_end;
303 return z + i;
304 }
305 BUG();
306}
307
308/*
309 * remaps the bio to the target device. we separate two flows.
NeilBrown47d68972015-04-10 13:19:04 +1000310 * power 2 flow and a general flow for the sake of performance
NeilBrownba13da42012-03-19 12:46:39 +1100311*/
312static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
313 sector_t sector, sector_t *sector_offset)
314{
315 unsigned int sect_in_chunk;
316 sector_t chunk;
317 struct r0conf *conf = mddev->private;
318 int raid_disks = conf->strip_zone[0].nb_dev;
319 unsigned int chunk_sects = mddev->chunk_sectors;
320
321 if (is_power_of_2(chunk_sects)) {
322 int chunksect_bits = ffz(~chunk_sects);
323 /* find the sector offset inside the chunk */
324 sect_in_chunk = sector & (chunk_sects - 1);
325 sector >>= chunksect_bits;
326 /* chunk in zone */
327 chunk = *sector_offset;
328 /* quotient is the chunk in real device*/
329 sector_div(chunk, zone->nb_dev << chunksect_bits);
330 } else{
331 sect_in_chunk = sector_div(sector, chunk_sects);
332 chunk = *sector_offset;
333 sector_div(chunk, chunk_sects * zone->nb_dev);
334 }
335 /*
336 * position the bio over the real device
337 * real sector = chunk in device + starting of zone
338 * + the position in the chunk
339 */
340 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
341 return conf->devlist[(zone - conf->strip_zone)*raid_disks
342 + sector_div(sector, zone->nb_dev)];
343}
344
NeilBrownfd01b882011-10-11 16:47:53 +1100345static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
Dan Williams80c3a6c2009-03-17 18:10:40 -0700346{
347 sector_t array_sectors = 0;
NeilBrown3cb03002011-10-11 16:45:26 +1100348 struct md_rdev *rdev;
Dan Williams80c3a6c2009-03-17 18:10:40 -0700349
350 WARN_ONCE(sectors || raid_disks,
351 "%s does not support generic reshape\n", __func__);
352
NeilBrowndafb20f2012-03-19 12:46:39 +1100353 rdev_for_each(rdev, mddev)
NeilBrowna6468532013-02-21 14:33:17 +1100354 array_sectors += (rdev->sectors &
355 ~(sector_t)(mddev->chunk_sectors-1));
Dan Williams80c3a6c2009-03-17 18:10:40 -0700356
357 return array_sectors;
358}
359
NeilBrownafa0f552014-12-15 12:56:58 +1100360static void raid0_free(struct mddev *mddev, void *priv);
majianpeng0366ef82012-04-02 09:48:37 +1000361
NeilBrownfd01b882011-10-11 16:47:53 +1100362static int raid0_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
NeilBrowne373ab12011-10-11 16:48:59 +1100364 struct r0conf *conf;
Andre Noll5568a602009-06-16 16:47:21 +1000365 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Andre Noll9d8f0362009-06-18 08:45:01 +1000367 if (mddev->chunk_sectors == 0) {
NeilBrown76603882016-11-02 14:16:50 +1100368 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
NeilBrown2604b702006-01-06 00:20:36 -0800369 return -EINVAL;
370 }
Andre Noll0894cc32009-06-18 08:49:23 +1000371 if (md_check_no_bitmap(mddev))
372 return -EINVAL;
Heinz Mauelshagen753f2852015-02-13 19:48:01 +0100373
Trela, Maciej9af204c2010-03-08 16:02:44 +1100374 /* if private is not null, we are here after takeover */
375 if (mddev->private == NULL) {
376 ret = create_strip_zones(mddev, &conf);
377 if (ret < 0)
378 return ret;
379 mddev->private = conf;
380 }
381 conf = mddev->private;
NeilBrown199dc6e2015-08-03 13:11:47 +1000382 if (mddev->queue) {
383 struct md_rdev *rdev;
384 bool discard_supported = false;
385
NeilBrown199dc6e2015-08-03 13:11:47 +1000386 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
387 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
Christoph Hellwig3deff1a2017-04-05 19:21:03 +0200388 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
Shaohua Li29efc392017-05-07 17:36:24 -0700389 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
NeilBrown199dc6e2015-08-03 13:11:47 +1000390
391 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
392 blk_queue_io_opt(mddev->queue,
393 (mddev->chunk_sectors << 9) * mddev->raid_disks);
394
NeilBrown66eefe52015-09-24 15:47:47 +1000395 rdev_for_each(rdev, mddev) {
396 disk_stack_limits(mddev->gendisk, rdev->bdev,
397 rdev->data_offset << 9);
398 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
399 discard_supported = true;
400 }
NeilBrown199dc6e2015-08-03 13:11:47 +1000401 if (!discard_supported)
402 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
403 else
404 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 /* calculate array device size */
Dan Williams1f403622009-03-31 14:59:03 +1100408 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
NeilBrown76603882016-11-02 14:16:50 +1100410 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
411 mdname(mddev),
412 (unsigned long long)mddev->array_sectors);
Heinz Mauelshagen753f2852015-02-13 19:48:01 +0100413
414 if (mddev->queue) {
415 /* calculate the max read-ahead size.
416 * For read-ahead of large files to be effective, we need to
417 * readahead at least twice a whole stripe. i.e. number of devices
418 * multiplied by chunk size times 2.
419 * If an individual device has an ra_pages greater than the
420 * chunk size, then we will not drive that device as hard as it
421 * wants. We consider this a configuration error: a larger
422 * chunksize should be used in that case.
423 */
Andre Noll9d8f0362009-06-18 08:45:01 +1000424 int stripe = mddev->raid_disks *
425 (mddev->chunk_sectors << 9) / PAGE_SIZE;
Jan Karadc3b17c2017-02-02 15:56:50 +0100426 if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
427 mddev->queue->backing_dev_info->ra_pages = 2* stripe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 }
429
raz ben yehuda46994192009-06-16 17:00:54 +1000430 dump_zones(mddev);
majianpeng0366ef82012-04-02 09:48:37 +1000431
432 ret = md_integrity_register(mddev);
majianpeng0366ef82012-04-02 09:48:37 +1000433
434 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435}
436
NeilBrownafa0f552014-12-15 12:56:58 +1100437static void raid0_free(struct mddev *mddev, void *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438{
NeilBrownafa0f552014-12-15 12:56:58 +1100439 struct r0conf *conf = priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700441 kfree(conf->strip_zone);
Andre Nollfb5ab4b2009-06-16 16:48:19 +1000442 kfree(conf->devlist);
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700443 kfree(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444}
445
raz ben yehudafbb704e2009-06-16 17:02:05 +1000446/*
447 * Is io distribute over 1 or more chunks ?
448*/
NeilBrownfd01b882011-10-11 16:47:53 +1100449static inline int is_io_in_chunk_boundary(struct mddev *mddev,
raz ben yehudafbb704e2009-06-16 17:02:05 +1000450 unsigned int chunk_sects, struct bio *bio)
451{
NeilBrownd6e412e2009-06-18 08:47:00 +1000452 if (likely(is_power_of_2(chunk_sects))) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700453 return chunk_sects >=
454 ((bio->bi_iter.bi_sector & (chunk_sects-1))
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800455 + bio_sectors(bio));
raz ben yehudafbb704e2009-06-16 17:02:05 +1000456 } else{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700457 sector_t sector = bio->bi_iter.bi_sector;
raz ben yehudafbb704e2009-06-16 17:02:05 +1000458 return chunk_sects >= (sector_div(sector, chunk_sects)
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800459 + bio_sectors(bio));
raz ben yehudafbb704e2009-06-16 17:02:05 +1000460 }
461}
462
Shaohua Li29efc392017-05-07 17:36:24 -0700463static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
464{
465 struct r0conf *conf = mddev->private;
466 struct strip_zone *zone;
467 sector_t start = bio->bi_iter.bi_sector;
468 sector_t end;
469 unsigned int stripe_size;
470 sector_t first_stripe_index, last_stripe_index;
471 sector_t start_disk_offset;
472 unsigned int start_disk_index;
473 sector_t end_disk_offset;
474 unsigned int end_disk_index;
475 unsigned int disk;
476
477 zone = find_zone(conf, &start);
478
479 if (bio_end_sector(bio) > zone->zone_end) {
480 struct bio *split = bio_split(bio,
481 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
482 mddev->bio_set);
483 bio_chain(split, bio);
484 generic_make_request(bio);
485 bio = split;
486 end = zone->zone_end;
487 } else
488 end = bio_end_sector(bio);
489
490 if (zone != conf->strip_zone)
491 end = end - zone[-1].zone_end;
492
493 /* Now start and end is the offset in zone */
494 stripe_size = zone->nb_dev * mddev->chunk_sectors;
495
496 first_stripe_index = start;
497 sector_div(first_stripe_index, stripe_size);
498 last_stripe_index = end;
499 sector_div(last_stripe_index, stripe_size);
500
501 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
502 mddev->chunk_sectors;
503 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
504 mddev->chunk_sectors) +
505 first_stripe_index * mddev->chunk_sectors;
506 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
507 mddev->chunk_sectors;
508 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
509 mddev->chunk_sectors) +
510 last_stripe_index * mddev->chunk_sectors;
511
512 for (disk = 0; disk < zone->nb_dev; disk++) {
513 sector_t dev_start, dev_end;
514 struct bio *discard_bio = NULL;
515 struct md_rdev *rdev;
516
517 if (disk < start_disk_index)
518 dev_start = (first_stripe_index + 1) *
519 mddev->chunk_sectors;
520 else if (disk > start_disk_index)
521 dev_start = first_stripe_index * mddev->chunk_sectors;
522 else
523 dev_start = start_disk_offset;
524
525 if (disk < end_disk_index)
526 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
527 else if (disk > end_disk_index)
528 dev_end = last_stripe_index * mddev->chunk_sectors;
529 else
530 dev_end = end_disk_offset;
531
532 if (dev_end <= dev_start)
533 continue;
534
535 rdev = conf->devlist[(zone - conf->strip_zone) *
536 conf->strip_zone[0].nb_dev + disk];
537 if (__blkdev_issue_discard(rdev->bdev,
538 dev_start + zone->dev_start + rdev->data_offset,
539 dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
540 !discard_bio)
541 continue;
542 bio_chain(discard_bio, bio);
Shaohua Li8a8e6f82017-08-18 10:27:59 -0700543 bio_clone_blkcg_association(discard_bio, bio);
Shaohua Li29efc392017-05-07 17:36:24 -0700544 if (mddev->gendisk)
545 trace_block_bio_remap(bdev_get_queue(rdev->bdev),
546 discard_bio, disk_devt(mddev->gendisk),
547 bio->bi_iter.bi_sector);
548 generic_make_request(discard_bio);
549 }
550 bio_endio(bio);
551}
552
NeilBrowncc27b0c2017-06-05 16:49:39 +1000553static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 struct strip_zone *zone;
NeilBrown3cb03002011-10-11 16:45:26 +1100556 struct md_rdev *tmp_dev;
NeilBrownf00d7c82017-04-05 14:05:51 +1000557 sector_t bio_sector;
558 sector_t sector;
559 unsigned chunk_sects;
560 unsigned sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Jens Axboe1eff9d32016-08-05 15:35:16 -0600562 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
Tejun Heoe9c74692010-09-03 11:56:18 +0200563 md_flush_request(mddev, bio);
NeilBrowncc27b0c2017-06-05 16:49:39 +1000564 return true;
NeilBrowne5dcdd82005-09-09 16:23:41 -0700565 }
566
Shaohua Li29efc392017-05-07 17:36:24 -0700567 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
568 raid0_handle_discard(mddev, bio);
NeilBrowncc27b0c2017-06-05 16:49:39 +1000569 return true;
Shaohua Li29efc392017-05-07 17:36:24 -0700570 }
571
NeilBrownf00d7c82017-04-05 14:05:51 +1000572 bio_sector = bio->bi_iter.bi_sector;
573 sector = bio_sector;
574 chunk_sects = mddev->chunk_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
NeilBrownf00d7c82017-04-05 14:05:51 +1000576 sectors = chunk_sects -
577 (likely(is_power_of_2(chunk_sects))
578 ? (sector & (chunk_sects-1))
579 : sector_div(sector, chunk_sects));
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200580
NeilBrownf00d7c82017-04-05 14:05:51 +1000581 /* Restore due to sector_div */
582 sector = bio_sector;
Eric Worka8115772015-05-18 23:26:23 -0700583
NeilBrownf00d7c82017-04-05 14:05:51 +1000584 if (sectors < bio_sectors(bio)) {
585 struct bio *split = bio_split(bio, sectors, GFP_NOIO, mddev->bio_set);
586 bio_chain(split, bio);
587 generic_make_request(bio);
588 bio = split;
589 }
Shaohua Lic83057a2012-10-11 13:25:44 +1100590
NeilBrownf00d7c82017-04-05 14:05:51 +1000591 zone = find_zone(mddev->private, &sector);
592 tmp_dev = map_sector(mddev, zone, sector, &sector);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200593 bio_set_dev(bio, tmp_dev->bdev);
NeilBrownf00d7c82017-04-05 14:05:51 +1000594 bio->bi_iter.bi_sector = sector + zone->dev_start +
595 tmp_dev->data_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Shaohua Li29efc392017-05-07 17:36:24 -0700597 if (mddev->gendisk)
Christoph Hellwig74d46992017-08-23 19:10:32 +0200598 trace_block_bio_remap(bio->bi_disk->queue, bio,
599 disk_devt(mddev->gendisk), bio_sector);
Shaohua Li29efc392017-05-07 17:36:24 -0700600 mddev_check_writesame(mddev, bio);
601 mddev_check_write_zeroes(mddev, bio);
602 generic_make_request(bio);
NeilBrowncc27b0c2017-06-05 16:49:39 +1000603 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604}
NeilBrown8299d7f2007-10-16 23:30:53 -0700605
NeilBrownfd01b882011-10-11 16:47:53 +1100606static void raid0_status(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607{
Andre Noll9d8f0362009-06-18 08:45:01 +1000608 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 return;
610}
611
NeilBrownfd01b882011-10-11 16:47:53 +1100612static void *raid0_takeover_raid45(struct mddev *mddev)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100613{
NeilBrown3cb03002011-10-11 16:45:26 +1100614 struct md_rdev *rdev;
NeilBrowne373ab12011-10-11 16:48:59 +1100615 struct r0conf *priv_conf;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100616
617 if (mddev->degraded != 1) {
NeilBrown76603882016-11-02 14:16:50 +1100618 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
619 mdname(mddev),
620 mddev->degraded);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100621 return ERR_PTR(-EINVAL);
622 }
623
NeilBrowndafb20f2012-03-19 12:46:39 +1100624 rdev_for_each(rdev, mddev) {
Trela, Maciej9af204c2010-03-08 16:02:44 +1100625 /* check slot number for a disk */
626 if (rdev->raid_disk == mddev->raid_disks-1) {
NeilBrown76603882016-11-02 14:16:50 +1100627 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
628 mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100629 return ERR_PTR(-EINVAL);
630 }
NeilBrowneea136d2013-06-26 11:55:20 +1000631 rdev->sectors = mddev->dev_sectors;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100632 }
633
634 /* Set new parameters */
635 mddev->new_level = 0;
Maciej Trela001048a2010-06-16 11:55:14 +0100636 mddev->new_layout = 0;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100637 mddev->new_chunk_sectors = mddev->chunk_sectors;
638 mddev->raid_disks--;
639 mddev->delta_disks = -1;
640 /* make sure it will be not marked as dirty */
641 mddev->recovery_cp = MaxSector;
Shaohua Li394ed8e2017-01-04 16:10:19 -0800642 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100643
644 create_strip_zones(mddev, &priv_conf);
Shaohua Li6995f0b2016-12-08 15:48:17 -0800645
Trela, Maciej9af204c2010-03-08 16:02:44 +1100646 return priv_conf;
647}
648
NeilBrownfd01b882011-10-11 16:47:53 +1100649static void *raid0_takeover_raid10(struct mddev *mddev)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100650{
NeilBrowne373ab12011-10-11 16:48:59 +1100651 struct r0conf *priv_conf;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100652
653 /* Check layout:
654 * - far_copies must be 1
655 * - near_copies must be 2
656 * - disks number must be even
657 * - all mirrors must be already degraded
658 */
659 if (mddev->layout != ((1 << 8) + 2)) {
NeilBrown76603882016-11-02 14:16:50 +1100660 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
661 mdname(mddev),
662 mddev->layout);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100663 return ERR_PTR(-EINVAL);
664 }
665 if (mddev->raid_disks & 1) {
NeilBrown76603882016-11-02 14:16:50 +1100666 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
667 mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100668 return ERR_PTR(-EINVAL);
669 }
670 if (mddev->degraded != (mddev->raid_disks>>1)) {
NeilBrown76603882016-11-02 14:16:50 +1100671 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
672 mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100673 return ERR_PTR(-EINVAL);
674 }
675
676 /* Set new parameters */
677 mddev->new_level = 0;
Maciej Trela001048a2010-06-16 11:55:14 +0100678 mddev->new_layout = 0;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100679 mddev->new_chunk_sectors = mddev->chunk_sectors;
680 mddev->delta_disks = - mddev->raid_disks / 2;
681 mddev->raid_disks += mddev->delta_disks;
682 mddev->degraded = 0;
683 /* make sure it will be not marked as dirty */
684 mddev->recovery_cp = MaxSector;
Shaohua Li394ed8e2017-01-04 16:10:19 -0800685 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100686
687 create_strip_zones(mddev, &priv_conf);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100688 return priv_conf;
689}
690
NeilBrownfd01b882011-10-11 16:47:53 +1100691static void *raid0_takeover_raid1(struct mddev *mddev)
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100692{
NeilBrowne373ab12011-10-11 16:48:59 +1100693 struct r0conf *priv_conf;
Jes Sorensen24b961f2012-04-01 23:48:38 +1000694 int chunksect;
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100695
696 /* Check layout:
697 * - (N - 1) mirror drives must be already faulty
698 */
699 if ((mddev->raid_disks - 1) != mddev->degraded) {
NeilBrown76603882016-11-02 14:16:50 +1100700 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100701 mdname(mddev));
702 return ERR_PTR(-EINVAL);
703 }
704
Jes Sorensen24b961f2012-04-01 23:48:38 +1000705 /*
706 * a raid1 doesn't have the notion of chunk size, so
707 * figure out the largest suitable size we can use.
708 */
709 chunksect = 64 * 2; /* 64K by default */
710
711 /* The array must be an exact multiple of chunksize */
712 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
713 chunksect >>= 1;
714
715 if ((chunksect << 9) < PAGE_SIZE)
716 /* array size does not allow a suitable chunk size */
717 return ERR_PTR(-EINVAL);
718
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100719 /* Set new parameters */
720 mddev->new_level = 0;
721 mddev->new_layout = 0;
Jes Sorensen24b961f2012-04-01 23:48:38 +1000722 mddev->new_chunk_sectors = chunksect;
723 mddev->chunk_sectors = chunksect;
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100724 mddev->delta_disks = 1 - mddev->raid_disks;
Krzysztof Wojcikf7bee802011-02-14 10:01:41 +1100725 mddev->raid_disks = 1;
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100726 /* make sure it will be not marked as dirty */
727 mddev->recovery_cp = MaxSector;
Shaohua Li394ed8e2017-01-04 16:10:19 -0800728 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100729
730 create_strip_zones(mddev, &priv_conf);
731 return priv_conf;
732}
733
NeilBrownfd01b882011-10-11 16:47:53 +1100734static void *raid0_takeover(struct mddev *mddev)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100735{
736 /* raid0 can take over:
Maciej Trela049d6c12010-06-16 11:56:12 +0100737 * raid4 - if all data disks are active.
Trela, Maciej9af204c2010-03-08 16:02:44 +1100738 * raid5 - providing it is Raid4 layout and one disk is faulty
739 * raid10 - assuming we have all necessary active disks
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100740 * raid1 - with (N -1) mirror drives faulty
Trela, Maciej9af204c2010-03-08 16:02:44 +1100741 */
NeilBrowna8461a62014-08-06 16:34:27 +1000742
743 if (mddev->bitmap) {
NeilBrown76603882016-11-02 14:16:50 +1100744 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
745 mdname(mddev));
NeilBrowna8461a62014-08-06 16:34:27 +1000746 return ERR_PTR(-EBUSY);
747 }
Maciej Trela049d6c12010-06-16 11:56:12 +0100748 if (mddev->level == 4)
749 return raid0_takeover_raid45(mddev);
750
Trela, Maciej9af204c2010-03-08 16:02:44 +1100751 if (mddev->level == 5) {
752 if (mddev->layout == ALGORITHM_PARITY_N)
Maciej Trela049d6c12010-06-16 11:56:12 +0100753 return raid0_takeover_raid45(mddev);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100754
NeilBrown76603882016-11-02 14:16:50 +1100755 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
756 mdname(mddev), ALGORITHM_PARITY_N);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100757 }
758
759 if (mddev->level == 10)
760 return raid0_takeover_raid10(mddev);
761
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100762 if (mddev->level == 1)
763 return raid0_takeover_raid1(mddev);
764
NeilBrown76603882016-11-02 14:16:50 +1100765 pr_warn("Takeover from raid%i to raid0 not supported\n",
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100766 mddev->level);
767
Trela, Maciej9af204c2010-03-08 16:02:44 +1100768 return ERR_PTR(-EINVAL);
769}
770
NeilBrownb03e0cc2017-10-19 12:49:15 +1100771static void raid0_quiesce(struct mddev *mddev, int quiesce)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100772{
773}
774
NeilBrown84fc4b52011-10-11 16:49:58 +1100775static struct md_personality raid0_personality=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776{
777 .name = "raid0",
NeilBrown2604b702006-01-06 00:20:36 -0800778 .level = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 .owner = THIS_MODULE,
780 .make_request = raid0_make_request,
781 .run = raid0_run,
NeilBrownafa0f552014-12-15 12:56:58 +1100782 .free = raid0_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 .status = raid0_status,
Dan Williams80c3a6c2009-03-17 18:10:40 -0700784 .size = raid0_size,
Trela, Maciej9af204c2010-03-08 16:02:44 +1100785 .takeover = raid0_takeover,
786 .quiesce = raid0_quiesce,
NeilBrown5c675f82014-12-15 12:56:56 +1100787 .congested = raid0_congested,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788};
789
790static int __init raid0_init (void)
791{
NeilBrown2604b702006-01-06 00:20:36 -0800792 return register_md_personality (&raid0_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793}
794
795static void raid0_exit (void)
796{
NeilBrown2604b702006-01-06 00:20:36 -0800797 unregister_md_personality (&raid0_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798}
799
800module_init(raid0_init);
801module_exit(raid0_exit);
802MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +1100803MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804MODULE_ALIAS("md-personality-2"); /* RAID0 */
NeilBrownd9d166c2006-01-06 00:20:51 -0800805MODULE_ALIAS("md-raid0");
NeilBrown2604b702006-01-06 00:20:36 -0800806MODULE_ALIAS("md-level-0");