blob: b9764429d856ead58ec0ffd51bc06da21e0891fc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
Jan Engelhardt96de0e22007-10-19 23:21:04 +020012 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
NeilBrown191ea9b2005-06-21 17:17:23 -070015 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
NeilBrown191ea9b2005-06-21 17:17:23 -070034#include "dm-bio-list.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/raid/raid1.h>
NeilBrown191ea9b2005-06-21 17:17:23 -070036#include <linux/raid/bitmap.h>
37
38#define DEBUG 0
39#if DEBUG
40#define PRINTK(x...) printk(x)
41#else
42#define PRINTK(x...)
43#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46 * Number of guaranteed r1bios in case of extreme VM load:
47 */
48#define NR_RAID1_BIOS 256
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51static void unplug_slaves(mddev_t *mddev);
52
NeilBrown17999be2006-01-06 00:20:12 -080053static void allow_barrier(conf_t *conf);
54static void lower_barrier(conf_t *conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Al Virodd0fc662005-10-07 07:46:04 +010056static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
58 struct pool_info *pi = data;
59 r1bio_t *r1_bio;
60 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
61
62 /* allocate a r1bio with room for raid_disks entries in the bios array */
NeilBrown9ffae0c2006-01-06 00:20:32 -080063 r1_bio = kzalloc(size, gfp_flags);
64 if (!r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 unplug_slaves(pi->mddev);
66
67 return r1_bio;
68}
69
70static void r1bio_pool_free(void *r1_bio, void *data)
71{
72 kfree(r1_bio);
73}
74
75#define RESYNC_BLOCK_SIZE (64*1024)
76//#define RESYNC_BLOCK_SIZE PAGE_SIZE
77#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
78#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
79#define RESYNC_WINDOW (2048*1024)
80
Al Virodd0fc662005-10-07 07:46:04 +010081static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 struct pool_info *pi = data;
84 struct page *page;
85 r1bio_t *r1_bio;
86 struct bio *bio;
87 int i, j;
88
89 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
90 if (!r1_bio) {
91 unplug_slaves(pi->mddev);
92 return NULL;
93 }
94
95 /*
96 * Allocate bios : 1 for reading, n-1 for writing
97 */
98 for (j = pi->raid_disks ; j-- ; ) {
99 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
100 if (!bio)
101 goto out_free_bio;
102 r1_bio->bios[j] = bio;
103 }
104 /*
105 * Allocate RESYNC_PAGES data pages and attach them to
NeilBrownd11c1712006-01-06 00:20:26 -0800106 * the first bio.
107 * If this is a user-requested check/repair, allocate
108 * RESYNC_PAGES for each bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 */
NeilBrownd11c1712006-01-06 00:20:26 -0800110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
111 j = pi->raid_disks;
112 else
113 j = 1;
114 while(j--) {
115 bio = r1_bio->bios[j];
116 for (i = 0; i < RESYNC_PAGES; i++) {
117 page = alloc_page(gfp_flags);
118 if (unlikely(!page))
119 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
NeilBrownd11c1712006-01-06 00:20:26 -0800121 bio->bi_io_vec[i].bv_page = page;
122 }
123 }
124 /* If not user-requests, copy the page pointers to all bios */
125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
126 for (i=0; i<RESYNC_PAGES ; i++)
127 for (j=1; j<pi->raid_disks; j++)
128 r1_bio->bios[j]->bi_io_vec[i].bv_page =
129 r1_bio->bios[0]->bi_io_vec[i].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131
132 r1_bio->master_bio = NULL;
133
134 return r1_bio;
135
136out_free_pages:
NeilBrownd11c1712006-01-06 00:20:26 -0800137 for (i=0; i < RESYNC_PAGES ; i++)
138 for (j=0 ; j < pi->raid_disks; j++)
NeilBrown1345b1d2006-01-06 00:20:40 -0800139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800140 j = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141out_free_bio:
142 while ( ++j < pi->raid_disks )
143 bio_put(r1_bio->bios[j]);
144 r1bio_pool_free(r1_bio, data);
145 return NULL;
146}
147
148static void r1buf_pool_free(void *__r1_bio, void *data)
149{
150 struct pool_info *pi = data;
NeilBrownd11c1712006-01-06 00:20:26 -0800151 int i,j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 r1bio_t *r1bio = __r1_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
NeilBrownd11c1712006-01-06 00:20:26 -0800154 for (i = 0; i < RESYNC_PAGES; i++)
155 for (j = pi->raid_disks; j-- ;) {
156 if (j == 0 ||
157 r1bio->bios[j]->bi_io_vec[i].bv_page !=
158 r1bio->bios[0]->bi_io_vec[i].bv_page)
NeilBrown1345b1d2006-01-06 00:20:40 -0800159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 for (i=0 ; i < pi->raid_disks; i++)
162 bio_put(r1bio->bios[i]);
163
164 r1bio_pool_free(r1bio, data);
165}
166
167static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
168{
169 int i;
170
171 for (i = 0; i < conf->raid_disks; i++) {
172 struct bio **bio = r1_bio->bios + i;
NeilBrowncf30a472006-01-06 00:20:23 -0800173 if (*bio && *bio != IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 bio_put(*bio);
175 *bio = NULL;
176 }
177}
178
Arjan van de Ven858119e2006-01-14 13:20:43 -0800179static void free_r1bio(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 conf_t *conf = mddev_to_conf(r1_bio->mddev);
182
183 /*
184 * Wake up any possible resync thread that waits for the device
185 * to go idle.
186 */
NeilBrown17999be2006-01-06 00:20:12 -0800187 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 put_all_bios(conf, r1_bio);
190 mempool_free(r1_bio, conf->r1bio_pool);
191}
192
Arjan van de Ven858119e2006-01-14 13:20:43 -0800193static void put_buf(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
195 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown3e198f72006-01-06 00:20:21 -0800196 int i;
197
198 for (i=0; i<conf->raid_disks; i++) {
199 struct bio *bio = r1_bio->bios[i];
200 if (bio->bi_end_io)
201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 mempool_free(r1_bio, conf->r1buf_pool);
205
NeilBrown17999be2006-01-06 00:20:12 -0800206 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209static void reschedule_retry(r1bio_t *r1_bio)
210{
211 unsigned long flags;
212 mddev_t *mddev = r1_bio->mddev;
213 conf_t *conf = mddev_to_conf(mddev);
214
215 spin_lock_irqsave(&conf->device_lock, flags);
216 list_add(&r1_bio->retry_list, &conf->retry_list);
NeilBrownddaf22a2006-01-06 00:20:19 -0800217 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 spin_unlock_irqrestore(&conf->device_lock, flags);
219
NeilBrown17999be2006-01-06 00:20:12 -0800220 wake_up(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 md_wakeup_thread(mddev->thread);
222}
223
224/*
225 * raid_end_bio_io() is called when we have finished servicing a mirrored
226 * operation and are ready to return a success/failure code to the buffer
227 * cache layer.
228 */
229static void raid_end_bio_io(r1bio_t *r1_bio)
230{
231 struct bio *bio = r1_bio->master_bio;
232
NeilBrown4b6d2872005-09-09 16:23:47 -0700233 /* if nobody has done the final endio yet, do it now */
234 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
235 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
236 (bio_data_dir(bio) == WRITE) ? "write" : "read",
237 (unsigned long long) bio->bi_sector,
238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1);
240
NeilBrown6712ecf2007-09-27 12:47:43 +0200241 bio_endio(bio,
NeilBrown4b6d2872005-09-09 16:23:47 -0700242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 free_r1bio(r1_bio);
245}
246
247/*
248 * Update disk head position estimator based on IRQ completion info.
249 */
250static inline void update_head_pos(int disk, r1bio_t *r1_bio)
251{
252 conf_t *conf = mddev_to_conf(r1_bio->mddev);
253
254 conf->mirrors[disk].head_position =
255 r1_bio->sector + (r1_bio->sectors);
256}
257
NeilBrown6712ecf2007-09-27 12:47:43 +0200258static void raid1_end_read_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 mirror = r1_bio->read_disk;
266 /*
267 * this branch is our 'one mirror IO has finished' event handler:
268 */
NeilBrownddaf22a2006-01-06 00:20:19 -0800269 update_head_pos(mirror, r1_bio);
270
NeilBrowndd00a992007-05-10 03:15:50 -0700271 if (uptodate)
272 set_bit(R1BIO_Uptodate, &r1_bio->state);
273 else {
274 /* If all other devices have failed, we want to return
275 * the error upwards rather than fail the last device.
276 * Here we redefine "uptodate" to mean "Don't want to retry"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 */
NeilBrowndd00a992007-05-10 03:15:50 -0700278 unsigned long flags;
279 spin_lock_irqsave(&conf->device_lock, flags);
280 if (r1_bio->mddev->degraded == conf->raid_disks ||
281 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
282 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
283 uptodate = 1;
284 spin_unlock_irqrestore(&conf->device_lock, flags);
285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
NeilBrowndd00a992007-05-10 03:15:50 -0700287 if (uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 raid_end_bio_io(r1_bio);
NeilBrowndd00a992007-05-10 03:15:50 -0700289 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 /*
291 * oops, read error:
292 */
293 char b[BDEVNAME_SIZE];
294 if (printk_ratelimit())
295 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
296 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
297 reschedule_retry(r1_bio);
298 }
299
300 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
NeilBrown6712ecf2007-09-27 12:47:43 +0200303static void raid1_end_write_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrowna9701a32005-11-08 21:39:34 -0800307 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown04b857f2006-03-09 17:33:46 -0800309 struct bio *to_put = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 for (mirror = 0; mirror < conf->raid_disks; mirror++)
313 if (r1_bio->bios[mirror] == bio)
314 break;
315
NeilBrownbea27712006-05-01 12:15:46 -0700316 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
NeilBrowna9701a32005-11-08 21:39:34 -0800317 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
318 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
319 r1_bio->mddev->barriers_work = 0;
NeilBrown5e7dd2a2006-05-01 12:15:47 -0700320 /* Don't rdev_dec_pending in this branch - keep it for the retry */
NeilBrowna9701a32005-11-08 21:39:34 -0800321 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 /*
NeilBrowna9701a32005-11-08 21:39:34 -0800323 * this branch is our 'one mirror IO has finished' event handler:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 */
NeilBrowna9701a32005-11-08 21:39:34 -0800325 r1_bio->bios[mirror] = NULL;
NeilBrown04b857f2006-03-09 17:33:46 -0800326 to_put = bio;
NeilBrowna9701a32005-11-08 21:39:34 -0800327 if (!uptodate) {
328 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
329 /* an I/O failed, we can't clear the bitmap */
330 set_bit(R1BIO_Degraded, &r1_bio->state);
331 } else
332 /*
333 * Set R1BIO_Uptodate in our master bio, so that
334 * we will return a good error code for to the higher
335 * levels even if IO on some other mirrored buffer fails.
336 *
337 * The 'master' represents the composite IO operation to
338 * user-side. So if something waits for IO, then it will
339 * wait for the 'master' bio.
340 */
341 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
NeilBrowna9701a32005-11-08 21:39:34 -0800343 update_head_pos(mirror, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
NeilBrowna9701a32005-11-08 21:39:34 -0800345 if (behind) {
346 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
347 atomic_dec(&r1_bio->behind_remaining);
NeilBrown4b6d2872005-09-09 16:23:47 -0700348
NeilBrowna9701a32005-11-08 21:39:34 -0800349 /* In behind mode, we ACK the master bio once the I/O has safely
350 * reached all non-writemostly disks. Setting the Returned bit
351 * ensures that this gets done only once -- we don't ever want to
352 * return -EIO here, instead we'll wait */
NeilBrown4b6d2872005-09-09 16:23:47 -0700353
NeilBrowna9701a32005-11-08 21:39:34 -0800354 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
355 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
356 /* Maybe we can return now */
357 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
358 struct bio *mbio = r1_bio->master_bio;
359 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
360 (unsigned long long) mbio->bi_sector,
361 (unsigned long long) mbio->bi_sector +
362 (mbio->bi_size >> 9) - 1);
NeilBrown6712ecf2007-09-27 12:47:43 +0200363 bio_endio(mbio, 0);
NeilBrowna9701a32005-11-08 21:39:34 -0800364 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700365 }
366 }
NeilBrown5e7dd2a2006-05-01 12:15:47 -0700367 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -0700368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /*
370 *
371 * Let's see if all mirrored write operations have finished
372 * already.
373 */
374 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrownc70810b2006-06-26 00:27:35 -0700375 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
NeilBrowna9701a32005-11-08 21:39:34 -0800376 reschedule_retry(r1_bio);
NeilBrownc70810b2006-06-26 00:27:35 -0700377 else {
378 /* it really is the end of this request */
379 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
380 /* free extra copy of the data pages */
381 int i = bio->bi_vcnt;
382 while (i--)
383 safe_put_page(bio->bi_io_vec[i].bv_page);
384 }
385 /* clear the bitmap if all writes complete successfully */
386 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
387 r1_bio->sectors,
388 !test_bit(R1BIO_Degraded, &r1_bio->state),
389 behind);
390 md_write_end(r1_bio->mddev);
391 raid_end_bio_io(r1_bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
NeilBrownc70810b2006-06-26 00:27:35 -0700394
NeilBrown04b857f2006-03-09 17:33:46 -0800395 if (to_put)
396 bio_put(to_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
399
400/*
401 * This routine returns the disk from which the requested read should
402 * be done. There is a per-array 'next expected sequential IO' sector
403 * number - if this matches on the next IO then we use the last disk.
404 * There is also a per-disk 'last know head position' sector that is
405 * maintained from IRQ contexts, both the normal and the resync IO
406 * completion handlers update this position correctly. If there is no
407 * perfect sequential match then we pick the disk whose head is closest.
408 *
409 * If there are 2 mirrors in the same 2 devices, performance degrades
410 * because position is mirror, not device based.
411 *
412 * The rdev for the device selected will have nr_pending incremented.
413 */
414static int read_balance(conf_t *conf, r1bio_t *r1_bio)
415{
416 const unsigned long this_sector = r1_bio->sector;
417 int new_disk = conf->last_used, disk = new_disk;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700418 int wonly_disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 const int sectors = r1_bio->sectors;
420 sector_t new_distance, current_distance;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700421 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 rcu_read_lock();
424 /*
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700425 * Check if we can balance. We can balance on the whole
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 * device if no resync is going on, or below the resync window.
427 * We take the first readable disk when above the resync window.
428 */
429 retry:
430 if (conf->mddev->recovery_cp < MaxSector &&
431 (this_sector + sectors >= conf->next_resync)) {
432 /* Choose the first operation device, for consistancy */
433 new_disk = 0;
434
Suzanne Woodd6065f72005-11-08 21:39:27 -0800435 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800436 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800437 !rdev || !test_bit(In_sync, &rdev->flags)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700438 || test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800439 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700440
NeilBrowncf30a472006-01-06 00:20:23 -0800441 if (rdev && test_bit(In_sync, &rdev->flags) &&
442 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700443 wonly_disk = new_disk;
444
445 if (new_disk == conf->raid_disks - 1) {
446 new_disk = wonly_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 break;
448 }
449 }
450 goto rb_out;
451 }
452
453
454 /* make sure the disk is operational */
Suzanne Woodd6065f72005-11-08 21:39:27 -0800455 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800456 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800457 !rdev || !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700458 test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800459 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700460
NeilBrowncf30a472006-01-06 00:20:23 -0800461 if (rdev && test_bit(In_sync, &rdev->flags) &&
462 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700463 wonly_disk = new_disk;
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 if (new_disk <= 0)
466 new_disk = conf->raid_disks;
467 new_disk--;
468 if (new_disk == disk) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700469 new_disk = wonly_disk;
470 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700473
474 if (new_disk < 0)
475 goto rb_out;
476
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 disk = new_disk;
478 /* now disk == new_disk == starting point for search */
479
480 /*
481 * Don't change to another disk for sequential reads:
482 */
483 if (conf->next_seq_sect == this_sector)
484 goto rb_out;
485 if (this_sector == conf->mirrors[new_disk].head_position)
486 goto rb_out;
487
488 current_distance = abs(this_sector - conf->mirrors[disk].head_position);
489
490 /* Find the disk whose head is closest */
491
492 do {
493 if (disk <= 0)
494 disk = conf->raid_disks;
495 disk--;
496
Suzanne Woodd6065f72005-11-08 21:39:27 -0800497 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700498
NeilBrowncf30a472006-01-06 00:20:23 -0800499 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800500 !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700501 test_bit(WriteMostly, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 continue;
503
504 if (!atomic_read(&rdev->nr_pending)) {
505 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 break;
507 }
508 new_distance = abs(this_sector - conf->mirrors[disk].head_position);
509 if (new_distance < current_distance) {
510 current_distance = new_distance;
511 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 }
513 } while (disk != conf->last_used);
514
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700515 rb_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517
518 if (new_disk >= 0) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800519 rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700520 if (!rdev)
521 goto retry;
522 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800523 if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 /* cannot risk returning a device that failed
525 * before we inc'ed nr_pending
526 */
NeilBrown03c902e2006-01-06 00:20:46 -0800527 rdev_dec_pending(rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 goto retry;
529 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700530 conf->next_seq_sect = this_sector + sectors;
531 conf->last_used = new_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533 rcu_read_unlock();
534
535 return new_disk;
536}
537
538static void unplug_slaves(mddev_t *mddev)
539{
540 conf_t *conf = mddev_to_conf(mddev);
541 int i;
542
543 rcu_read_lock();
544 for (i=0; i<mddev->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800545 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800546 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Jens Axboe165125e2007-07-24 09:28:11 +0200547 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
549 atomic_inc(&rdev->nr_pending);
550 rcu_read_unlock();
551
Alan D. Brunelle2ad8b1e2007-11-07 14:26:56 -0500552 blk_unplug(r_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 rdev_dec_pending(rdev, mddev);
555 rcu_read_lock();
556 }
557 }
558 rcu_read_unlock();
559}
560
Jens Axboe165125e2007-07-24 09:28:11 +0200561static void raid1_unplug(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562{
NeilBrown191ea9b2005-06-21 17:17:23 -0700563 mddev_t *mddev = q->queuedata;
564
565 unplug_slaves(mddev);
566 md_wakeup_thread(mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567}
568
NeilBrown0d129222006-10-03 01:15:54 -0700569static int raid1_congested(void *data, int bits)
570{
571 mddev_t *mddev = data;
572 conf_t *conf = mddev_to_conf(mddev);
573 int i, ret = 0;
574
575 rcu_read_lock();
576 for (i = 0; i < mddev->raid_disks; i++) {
577 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
578 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Jens Axboe165125e2007-07-24 09:28:11 +0200579 struct request_queue *q = bdev_get_queue(rdev->bdev);
NeilBrown0d129222006-10-03 01:15:54 -0700580
581 /* Note the '|| 1' - when read_balance prefers
582 * non-congested targets, it can be removed
583 */
584 if ((bits & (1<<BDI_write_congested)) || 1)
585 ret |= bdi_congested(&q->backing_dev_info, bits);
586 else
587 ret &= bdi_congested(&q->backing_dev_info, bits);
588 }
589 }
590 rcu_read_unlock();
591 return ret;
592}
593
594
NeilBrowna35e63e2008-03-04 14:29:29 -0800595static int flush_pending_writes(conf_t *conf)
596{
597 /* Any writes that have been queued but are awaiting
598 * bitmap updates get flushed here.
599 * We return 1 if any requests were actually submitted.
600 */
601 int rv = 0;
602
603 spin_lock_irq(&conf->device_lock);
604
605 if (conf->pending_bio_list.head) {
606 struct bio *bio;
607 bio = bio_list_get(&conf->pending_bio_list);
608 blk_remove_plug(conf->mddev->queue);
609 spin_unlock_irq(&conf->device_lock);
610 /* flush any pending bitmap writes to
611 * disk before proceeding w/ I/O */
612 bitmap_unplug(conf->mddev->bitmap);
613
614 while (bio) { /* submit pending writes */
615 struct bio *next = bio->bi_next;
616 bio->bi_next = NULL;
617 generic_make_request(bio);
618 bio = next;
619 }
620 rv = 1;
621 } else
622 spin_unlock_irq(&conf->device_lock);
623 return rv;
624}
625
NeilBrown17999be2006-01-06 00:20:12 -0800626/* Barriers....
627 * Sometimes we need to suspend IO while we do something else,
628 * either some resync/recovery, or reconfigure the array.
629 * To do this we raise a 'barrier'.
630 * The 'barrier' is a counter that can be raised multiple times
631 * to count how many activities are happening which preclude
632 * normal IO.
633 * We can only raise the barrier if there is no pending IO.
634 * i.e. if nr_pending == 0.
635 * We choose only to raise the barrier if no-one is waiting for the
636 * barrier to go down. This means that as soon as an IO request
637 * is ready, no other operations which require a barrier will start
638 * until the IO request has had a chance.
639 *
640 * So: regular IO calls 'wait_barrier'. When that returns there
641 * is no backgroup IO happening, It must arrange to call
642 * allow_barrier when it has finished its IO.
643 * backgroup IO calls must call raise_barrier. Once that returns
644 * there is no normal IO happeing. It must arrange to call
645 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 */
647#define RESYNC_DEPTH 32
648
NeilBrown17999be2006-01-06 00:20:12 -0800649static void raise_barrier(conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
651 spin_lock_irq(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -0800652
653 /* Wait until no block IO is waiting */
654 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
655 conf->resync_lock,
656 raid1_unplug(conf->mddev->queue));
657
658 /* block any new IO from starting */
659 conf->barrier++;
660
661 /* No wait for all pending IO to complete */
662 wait_event_lock_irq(conf->wait_barrier,
663 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
664 conf->resync_lock,
665 raid1_unplug(conf->mddev->queue));
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 spin_unlock_irq(&conf->resync_lock);
668}
669
NeilBrown17999be2006-01-06 00:20:12 -0800670static void lower_barrier(conf_t *conf)
671{
672 unsigned long flags;
673 spin_lock_irqsave(&conf->resync_lock, flags);
674 conf->barrier--;
675 spin_unlock_irqrestore(&conf->resync_lock, flags);
676 wake_up(&conf->wait_barrier);
677}
678
679static void wait_barrier(conf_t *conf)
680{
681 spin_lock_irq(&conf->resync_lock);
682 if (conf->barrier) {
683 conf->nr_waiting++;
684 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
685 conf->resync_lock,
686 raid1_unplug(conf->mddev->queue));
687 conf->nr_waiting--;
688 }
689 conf->nr_pending++;
690 spin_unlock_irq(&conf->resync_lock);
691}
692
693static void allow_barrier(conf_t *conf)
694{
695 unsigned long flags;
696 spin_lock_irqsave(&conf->resync_lock, flags);
697 conf->nr_pending--;
698 spin_unlock_irqrestore(&conf->resync_lock, flags);
699 wake_up(&conf->wait_barrier);
700}
701
NeilBrownddaf22a2006-01-06 00:20:19 -0800702static void freeze_array(conf_t *conf)
703{
704 /* stop syncio and normal IO and wait for everything to
705 * go quite.
706 * We increment barrier and nr_waiting, and then
NeilBrown1c830532008-03-04 14:29:35 -0800707 * wait until nr_pending match nr_queued+1
708 * This is called in the context of one normal IO request
709 * that has failed. Thus any sync request that might be pending
710 * will be blocked by nr_pending, and we need to wait for
711 * pending IO requests to complete or be queued for re-try.
712 * Thus the number queued (nr_queued) plus this request (1)
713 * must match the number of pending IOs (nr_pending) before
714 * we continue.
NeilBrownddaf22a2006-01-06 00:20:19 -0800715 */
716 spin_lock_irq(&conf->resync_lock);
717 conf->barrier++;
718 conf->nr_waiting++;
719 wait_event_lock_irq(conf->wait_barrier,
NeilBrown1c830532008-03-04 14:29:35 -0800720 conf->nr_pending == conf->nr_queued+1,
NeilBrownddaf22a2006-01-06 00:20:19 -0800721 conf->resync_lock,
NeilBrowna35e63e2008-03-04 14:29:29 -0800722 ({ flush_pending_writes(conf);
723 raid1_unplug(conf->mddev->queue); }));
NeilBrownddaf22a2006-01-06 00:20:19 -0800724 spin_unlock_irq(&conf->resync_lock);
725}
726static void unfreeze_array(conf_t *conf)
727{
728 /* reverse the effect of the freeze */
729 spin_lock_irq(&conf->resync_lock);
730 conf->barrier--;
731 conf->nr_waiting--;
732 wake_up(&conf->wait_barrier);
733 spin_unlock_irq(&conf->resync_lock);
734}
735
NeilBrown17999be2006-01-06 00:20:12 -0800736
NeilBrown4b6d2872005-09-09 16:23:47 -0700737/* duplicate the data pages for behind I/O */
738static struct page **alloc_behind_pages(struct bio *bio)
739{
740 int i;
741 struct bio_vec *bvec;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800742 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
NeilBrown4b6d2872005-09-09 16:23:47 -0700743 GFP_NOIO);
744 if (unlikely(!pages))
745 goto do_sync_io;
746
NeilBrown4b6d2872005-09-09 16:23:47 -0700747 bio_for_each_segment(bvec, bio, i) {
748 pages[i] = alloc_page(GFP_NOIO);
749 if (unlikely(!pages[i]))
750 goto do_sync_io;
751 memcpy(kmap(pages[i]) + bvec->bv_offset,
752 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
753 kunmap(pages[i]);
754 kunmap(bvec->bv_page);
755 }
756
757 return pages;
758
759do_sync_io:
760 if (pages)
761 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
NeilBrown2d1f3b52006-01-06 00:20:31 -0800762 put_page(pages[i]);
NeilBrown4b6d2872005-09-09 16:23:47 -0700763 kfree(pages);
764 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
765 return NULL;
766}
767
Jens Axboe165125e2007-07-24 09:28:11 +0200768static int make_request(struct request_queue *q, struct bio * bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769{
770 mddev_t *mddev = q->queuedata;
771 conf_t *conf = mddev_to_conf(mddev);
772 mirror_info_t *mirror;
773 r1bio_t *r1_bio;
774 struct bio *read_bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700775 int i, targets = 0, disks;
NeilBrown84255d12008-05-23 13:04:32 -0700776 struct bitmap *bitmap;
NeilBrown191ea9b2005-06-21 17:17:23 -0700777 unsigned long flags;
778 struct bio_list bl;
NeilBrown4b6d2872005-09-09 16:23:47 -0700779 struct page **behind_pages = NULL;
Jens Axboea3623572005-11-01 09:26:16 +0100780 const int rw = bio_data_dir(bio);
Lars Ellenberge3881a62007-01-10 23:15:37 -0800781 const int do_sync = bio_sync(bio);
Tejun Heoc9959052008-08-25 19:47:21 +0900782 int cpu, do_barriers;
Dan Williams6bfe0b42008-04-30 00:52:32 -0700783 mdk_rdev_t *blocked_rdev;
NeilBrown191ea9b2005-06-21 17:17:23 -0700784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 /*
786 * Register the new request and wait if the reconstruction
787 * thread has put up a bar for new requests.
788 * Continue immediately if no resync is active currently.
NeilBrown62de6082006-05-01 12:15:47 -0700789 * We test barriers_work *after* md_write_start as md_write_start
790 * may cause the first superblock write, and that will check out
791 * if barriers work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 */
NeilBrown62de6082006-05-01 12:15:47 -0700793
NeilBrown3d310eb2005-06-21 17:17:26 -0700794 md_write_start(mddev, bio); /* wait on superblock update early */
795
NeilBrown62de6082006-05-01 12:15:47 -0700796 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
797 if (rw == WRITE)
798 md_write_end(mddev);
NeilBrown6712ecf2007-09-27 12:47:43 +0200799 bio_endio(bio, -EOPNOTSUPP);
NeilBrown62de6082006-05-01 12:15:47 -0700800 return 0;
801 }
802
NeilBrown17999be2006-01-06 00:20:12 -0800803 wait_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
NeilBrown84255d12008-05-23 13:04:32 -0700805 bitmap = mddev->bitmap;
806
Tejun Heo074a7ac2008-08-25 19:56:14 +0900807 cpu = part_stat_lock();
808 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
809 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
810 bio_sectors(bio));
811 part_stat_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813 /*
814 * make_request() can abort the operation when READA is being
815 * used and no empty request is available.
816 *
817 */
818 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
819
820 r1_bio->master_bio = bio;
821 r1_bio->sectors = bio->bi_size >> 9;
NeilBrown191ea9b2005-06-21 17:17:23 -0700822 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 r1_bio->mddev = mddev;
824 r1_bio->sector = bio->bi_sector;
825
Jens Axboea3623572005-11-01 09:26:16 +0100826 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 /*
828 * read balancing logic:
829 */
830 int rdisk = read_balance(conf, r1_bio);
831
832 if (rdisk < 0) {
833 /* couldn't find anywhere to read from */
834 raid_end_bio_io(r1_bio);
835 return 0;
836 }
837 mirror = conf->mirrors + rdisk;
838
839 r1_bio->read_disk = rdisk;
840
841 read_bio = bio_clone(bio, GFP_NOIO);
842
843 r1_bio->bios[rdisk] = read_bio;
844
845 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
846 read_bio->bi_bdev = mirror->rdev->bdev;
847 read_bio->bi_end_io = raid1_end_read_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -0800848 read_bio->bi_rw = READ | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 read_bio->bi_private = r1_bio;
850
851 generic_make_request(read_bio);
852 return 0;
853 }
854
855 /*
856 * WRITE:
857 */
858 /* first select target devices under spinlock and
859 * inc refcount on their rdev. Record them by setting
860 * bios[x] to bio
861 */
862 disks = conf->raid_disks;
NeilBrown191ea9b2005-06-21 17:17:23 -0700863#if 0
864 { static int first=1;
865 if (first) printk("First Write sector %llu disks %d\n",
866 (unsigned long long)r1_bio->sector, disks);
867 first = 0;
868 }
869#endif
Dan Williams6bfe0b42008-04-30 00:52:32 -0700870 retry_write:
871 blocked_rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 rcu_read_lock();
873 for (i = 0; i < disks; i++) {
Dan Williams6bfe0b42008-04-30 00:52:32 -0700874 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
875 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
876 atomic_inc(&rdev->nr_pending);
877 blocked_rdev = rdev;
878 break;
879 }
880 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800882 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown03c902e2006-01-06 00:20:46 -0800883 rdev_dec_pending(rdev, mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 r1_bio->bios[i] = NULL;
885 } else
886 r1_bio->bios[i] = bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700887 targets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 } else
889 r1_bio->bios[i] = NULL;
890 }
891 rcu_read_unlock();
892
Dan Williams6bfe0b42008-04-30 00:52:32 -0700893 if (unlikely(blocked_rdev)) {
894 /* Wait for this device to become unblocked */
895 int j;
896
897 for (j = 0; j < i; j++)
898 if (r1_bio->bios[j])
899 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
900
901 allow_barrier(conf);
902 md_wait_for_blocked_rdev(blocked_rdev, mddev);
903 wait_barrier(conf);
904 goto retry_write;
905 }
906
NeilBrown4b6d2872005-09-09 16:23:47 -0700907 BUG_ON(targets == 0); /* we never fail the last device */
908
NeilBrown191ea9b2005-06-21 17:17:23 -0700909 if (targets < conf->raid_disks) {
910 /* array is degraded, we will not clear the bitmap
911 * on I/O completion (see raid1_end_write_request) */
912 set_bit(R1BIO_Degraded, &r1_bio->state);
913 }
NeilBrown06d91a52005-06-21 17:17:12 -0700914
NeilBrown4b6d2872005-09-09 16:23:47 -0700915 /* do behind I/O ? */
916 if (bitmap &&
917 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
918 (behind_pages = alloc_behind_pages(bio)) != NULL)
919 set_bit(R1BIO_BehindIO, &r1_bio->state);
920
NeilBrown191ea9b2005-06-21 17:17:23 -0700921 atomic_set(&r1_bio->remaining, 0);
NeilBrown4b6d2872005-09-09 16:23:47 -0700922 atomic_set(&r1_bio->behind_remaining, 0);
NeilBrown191ea9b2005-06-21 17:17:23 -0700923
NeilBrown04b857f2006-03-09 17:33:46 -0800924 do_barriers = bio_barrier(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800925 if (do_barriers)
926 set_bit(R1BIO_Barrier, &r1_bio->state);
927
NeilBrown191ea9b2005-06-21 17:17:23 -0700928 bio_list_init(&bl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 for (i = 0; i < disks; i++) {
930 struct bio *mbio;
931 if (!r1_bio->bios[i])
932 continue;
933
934 mbio = bio_clone(bio, GFP_NOIO);
935 r1_bio->bios[i] = mbio;
936
937 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
938 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
939 mbio->bi_end_io = raid1_end_write_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -0800940 mbio->bi_rw = WRITE | do_barriers | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 mbio->bi_private = r1_bio;
942
NeilBrown4b6d2872005-09-09 16:23:47 -0700943 if (behind_pages) {
944 struct bio_vec *bvec;
945 int j;
946
947 /* Yes, I really want the '__' version so that
948 * we clear any unused pointer in the io_vec, rather
949 * than leave them unchanged. This is important
950 * because when we come to free the pages, we won't
951 * know the originial bi_idx, so we just free
952 * them all
953 */
954 __bio_for_each_segment(bvec, mbio, j, 0)
955 bvec->bv_page = behind_pages[j];
956 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
957 atomic_inc(&r1_bio->behind_remaining);
958 }
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 atomic_inc(&r1_bio->remaining);
NeilBrown191ea9b2005-06-21 17:17:23 -0700961
962 bio_list_add(&bl, mbio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700964 kfree(behind_pages); /* the behind pages are attached to the bios now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
NeilBrown4b6d2872005-09-09 16:23:47 -0700966 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
967 test_bit(R1BIO_BehindIO, &r1_bio->state));
NeilBrown191ea9b2005-06-21 17:17:23 -0700968 spin_lock_irqsave(&conf->device_lock, flags);
969 bio_list_merge(&conf->pending_bio_list, &bl);
970 bio_list_init(&bl);
971
972 blk_plug_device(mddev->queue);
973 spin_unlock_irqrestore(&conf->device_lock, flags);
974
NeilBrowna35e63e2008-03-04 14:29:29 -0800975 /* In case raid1d snuck into freeze_array */
976 wake_up(&conf->wait_barrier);
977
Lars Ellenberge3881a62007-01-10 23:15:37 -0800978 if (do_sync)
979 md_wakeup_thread(mddev->thread);
NeilBrown191ea9b2005-06-21 17:17:23 -0700980#if 0
981 while ((bio = bio_list_pop(&bl)) != NULL)
982 generic_make_request(bio);
983#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 return 0;
986}
987
988static void status(struct seq_file *seq, mddev_t *mddev)
989{
990 conf_t *conf = mddev_to_conf(mddev);
991 int i;
992
993 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
NeilBrown11ce99e2006-10-03 01:15:52 -0700994 conf->raid_disks - mddev->degraded);
NeilBrownddac7c72006-08-31 21:27:36 -0700995 rcu_read_lock();
996 for (i = 0; i < conf->raid_disks; i++) {
997 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 seq_printf(seq, "%s",
NeilBrownddac7c72006-08-31 21:27:36 -0700999 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1000 }
1001 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 seq_printf(seq, "]");
1003}
1004
1005
1006static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1007{
1008 char b[BDEVNAME_SIZE];
1009 conf_t *conf = mddev_to_conf(mddev);
1010
1011 /*
1012 * If it is not operational, then we have already marked it as dead
1013 * else if it is the last working disks, ignore the error, let the
1014 * next level up know.
1015 * else mark the drive as failed
1016 */
NeilBrownb2d444d2005-11-08 21:39:31 -08001017 if (test_bit(In_sync, &rdev->flags)
NeilBrown11ce99e2006-10-03 01:15:52 -07001018 && (conf->raid_disks - mddev->degraded) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 /*
1020 * Don't fail the drive, act as though we were just a
1021 * normal single drive
1022 */
1023 return;
NeilBrownc04be0a2006-10-03 01:15:53 -07001024 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1025 unsigned long flags;
1026 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 mddev->degraded++;
NeilBrowndd00a992007-05-10 03:15:50 -07001028 set_bit(Faulty, &rdev->flags);
NeilBrownc04be0a2006-10-03 01:15:53 -07001029 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 /*
1031 * if recovery is running, make sure it aborts.
1032 */
NeilBrowndfc70642008-05-23 13:04:39 -07001033 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrowndd00a992007-05-10 03:15:50 -07001034 } else
1035 set_bit(Faulty, &rdev->flags);
NeilBrown850b2b42006-10-03 01:15:46 -07001036 set_bit(MD_CHANGE_DEVS, &mddev->flags);
Nick Andrewd7a420c2008-04-28 02:15:55 -07001037 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
1038 "raid1: Operation continuing on %d devices.\n",
NeilBrown11ce99e2006-10-03 01:15:52 -07001039 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
1042static void print_conf(conf_t *conf)
1043{
1044 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 printk("RAID1 conf printout:\n");
1047 if (!conf) {
1048 printk("(!conf)\n");
1049 return;
1050 }
NeilBrown11ce99e2006-10-03 01:15:52 -07001051 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 conf->raid_disks);
1053
NeilBrownddac7c72006-08-31 21:27:36 -07001054 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 for (i = 0; i < conf->raid_disks; i++) {
1056 char b[BDEVNAME_SIZE];
NeilBrownddac7c72006-08-31 21:27:36 -07001057 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1058 if (rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownddac7c72006-08-31 21:27:36 -07001060 i, !test_bit(In_sync, &rdev->flags),
1061 !test_bit(Faulty, &rdev->flags),
1062 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 }
NeilBrownddac7c72006-08-31 21:27:36 -07001064 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
1067static void close_sync(conf_t *conf)
1068{
NeilBrown17999be2006-01-06 00:20:12 -08001069 wait_barrier(conf);
1070 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
1072 mempool_destroy(conf->r1buf_pool);
1073 conf->r1buf_pool = NULL;
1074}
1075
1076static int raid1_spare_active(mddev_t *mddev)
1077{
1078 int i;
1079 conf_t *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
1081 /*
1082 * Find all failed disks within the RAID1 configuration
NeilBrownddac7c72006-08-31 21:27:36 -07001083 * and mark them readable.
1084 * Called under mddev lock, so rcu protection not needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 */
1086 for (i = 0; i < conf->raid_disks; i++) {
NeilBrownddac7c72006-08-31 21:27:36 -07001087 mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1088 if (rdev
1089 && !test_bit(Faulty, &rdev->flags)
NeilBrownc04be0a2006-10-03 01:15:53 -07001090 && !test_and_set_bit(In_sync, &rdev->flags)) {
1091 unsigned long flags;
1092 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 mddev->degraded--;
NeilBrownc04be0a2006-10-03 01:15:53 -07001094 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
1096 }
1097
1098 print_conf(conf);
1099 return 0;
1100}
1101
1102
1103static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1104{
1105 conf_t *conf = mddev->private;
Neil Brown199050e2008-06-28 08:31:33 +10001106 int err = -EEXIST;
NeilBrown41158c72005-06-21 17:17:25 -07001107 int mirror = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 mirror_info_t *p;
Neil Brown6c2fce22008-06-28 08:31:31 +10001109 int first = 0;
1110 int last = mddev->raid_disks - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Neil Brown6c2fce22008-06-28 08:31:31 +10001112 if (rdev->raid_disk >= 0)
1113 first = last = rdev->raid_disk;
1114
1115 for (mirror = first; mirror <= last; mirror++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 if ( !(p=conf->mirrors+mirror)->rdev) {
1117
1118 blk_queue_stack_limits(mddev->queue,
1119 rdev->bdev->bd_disk->queue);
1120 /* as we don't honour merge_bvec_fn, we must never risk
1121 * violating it, so limit ->max_sector to one PAGE, as
1122 * a one page request is never in violation.
1123 */
1124 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1125 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1126 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1127
1128 p->head_position = 0;
1129 rdev->raid_disk = mirror;
Neil Brown199050e2008-06-28 08:31:33 +10001130 err = 0;
NeilBrown6aea114a2005-11-28 13:44:13 -08001131 /* As all devices are equivalent, we don't need a full recovery
1132 * if this was recently any drive of the array
1133 */
1134 if (rdev->saved_raid_disk < 0)
NeilBrown41158c72005-06-21 17:17:25 -07001135 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08001136 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 break;
1138 }
1139
1140 print_conf(conf);
Neil Brown199050e2008-06-28 08:31:33 +10001141 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142}
1143
1144static int raid1_remove_disk(mddev_t *mddev, int number)
1145{
1146 conf_t *conf = mddev->private;
1147 int err = 0;
1148 mdk_rdev_t *rdev;
1149 mirror_info_t *p = conf->mirrors+ number;
1150
1151 print_conf(conf);
1152 rdev = p->rdev;
1153 if (rdev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001154 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 atomic_read(&rdev->nr_pending)) {
1156 err = -EBUSY;
1157 goto abort;
1158 }
NeilBrowndfc70642008-05-23 13:04:39 -07001159 /* Only remove non-faulty devices is recovery
1160 * is not possible.
1161 */
1162 if (!test_bit(Faulty, &rdev->flags) &&
1163 mddev->degraded < conf->raid_disks) {
1164 err = -EBUSY;
1165 goto abort;
1166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07001168 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 if (atomic_read(&rdev->nr_pending)) {
1170 /* lost the race, try later */
1171 err = -EBUSY;
1172 p->rdev = rdev;
1173 }
1174 }
1175abort:
1176
1177 print_conf(conf);
1178 return err;
1179}
1180
1181
NeilBrown6712ecf2007-09-27 12:47:43 +02001182static void end_sync_read(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrownd11c1712006-01-06 00:20:26 -08001185 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
NeilBrownd11c1712006-01-06 00:20:26 -08001187 for (i=r1_bio->mddev->raid_disks; i--; )
1188 if (r1_bio->bios[i] == bio)
1189 break;
1190 BUG_ON(i < 0);
1191 update_head_pos(i, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 /*
1193 * we have read a block, now it needs to be re-written,
1194 * or re-read if the read failed.
1195 * We don't do much here, just schedule handling by raid1d
1196 */
NeilBrown69382e82006-01-06 00:20:22 -08001197 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 set_bit(R1BIO_Uptodate, &r1_bio->state);
NeilBrownd11c1712006-01-06 00:20:26 -08001199
1200 if (atomic_dec_and_test(&r1_bio->remaining))
1201 reschedule_retry(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202}
1203
NeilBrown6712ecf2007-09-27 12:47:43 +02001204static void end_sync_write(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205{
1206 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1207 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1208 mddev_t *mddev = r1_bio->mddev;
1209 conf_t *conf = mddev_to_conf(mddev);
1210 int i;
1211 int mirror=0;
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 for (i = 0; i < conf->raid_disks; i++)
1214 if (r1_bio->bios[i] == bio) {
1215 mirror = i;
1216 break;
1217 }
NeilBrown6b1117d2006-03-31 02:31:57 -08001218 if (!uptodate) {
1219 int sync_blocks = 0;
1220 sector_t s = r1_bio->sector;
1221 long sectors_to_go = r1_bio->sectors;
1222 /* make sure these bits doesn't get cleared. */
1223 do {
NeilBrown5e3db642006-07-10 04:44:18 -07001224 bitmap_end_sync(mddev->bitmap, s,
NeilBrown6b1117d2006-03-31 02:31:57 -08001225 &sync_blocks, 1);
1226 s += sync_blocks;
1227 sectors_to_go -= sync_blocks;
1228 } while (sectors_to_go > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 md_error(mddev, conf->mirrors[mirror].rdev);
NeilBrown6b1117d2006-03-31 02:31:57 -08001230 }
NeilBrowne3b97032005-08-04 12:53:34 -07001231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 update_head_pos(mirror, r1_bio);
1233
1234 if (atomic_dec_and_test(&r1_bio->remaining)) {
1235 md_done_sync(mddev, r1_bio->sectors, uptodate);
1236 put_buf(r1_bio);
1237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238}
1239
1240static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1241{
1242 conf_t *conf = mddev_to_conf(mddev);
1243 int i;
1244 int disks = conf->raid_disks;
1245 struct bio *bio, *wbio;
1246
1247 bio = r1_bio->bios[r1_bio->read_disk];
1248
NeilBrown69382e82006-01-06 00:20:22 -08001249
NeilBrownd11c1712006-01-06 00:20:26 -08001250 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1251 /* We have read all readable devices. If we haven't
1252 * got the block, then there is no hope left.
1253 * If we have, then we want to do a comparison
1254 * and skip the write if everything is the same.
1255 * If any blocks failed to read, then we need to
1256 * attempt an over-write
1257 */
1258 int primary;
1259 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1260 for (i=0; i<mddev->raid_disks; i++)
1261 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1262 md_error(mddev, conf->mirrors[i].rdev);
1263
1264 md_done_sync(mddev, r1_bio->sectors, 1);
1265 put_buf(r1_bio);
1266 return;
1267 }
1268 for (primary=0; primary<mddev->raid_disks; primary++)
1269 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1270 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1271 r1_bio->bios[primary]->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001272 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
NeilBrownd11c1712006-01-06 00:20:26 -08001273 break;
1274 }
1275 r1_bio->read_disk = primary;
1276 for (i=0; i<mddev->raid_disks; i++)
Mike Accettaed456662007-06-16 10:16:07 -07001277 if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
NeilBrownd11c1712006-01-06 00:20:26 -08001278 int j;
1279 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1280 struct bio *pbio = r1_bio->bios[primary];
1281 struct bio *sbio = r1_bio->bios[i];
Mike Accettaed456662007-06-16 10:16:07 -07001282
1283 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1284 for (j = vcnt; j-- ; ) {
1285 struct page *p, *s;
1286 p = pbio->bi_io_vec[j].bv_page;
1287 s = sbio->bi_io_vec[j].bv_page;
1288 if (memcmp(page_address(p),
1289 page_address(s),
1290 PAGE_SIZE))
1291 break;
1292 }
1293 } else
1294 j = 0;
NeilBrownd11c1712006-01-06 00:20:26 -08001295 if (j >= 0)
1296 mddev->resync_mismatches += r1_bio->sectors;
NeilBrowncf7a4412007-10-16 23:30:55 -07001297 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1298 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
NeilBrownd11c1712006-01-06 00:20:26 -08001299 sbio->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001300 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1301 } else {
NeilBrownd11c1712006-01-06 00:20:26 -08001302 /* fixup the bio for reuse */
NeilBrown698b18c2008-05-23 13:04:35 -07001303 int size;
NeilBrownd11c1712006-01-06 00:20:26 -08001304 sbio->bi_vcnt = vcnt;
1305 sbio->bi_size = r1_bio->sectors << 9;
1306 sbio->bi_idx = 0;
1307 sbio->bi_phys_segments = 0;
NeilBrownd11c1712006-01-06 00:20:26 -08001308 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1309 sbio->bi_flags |= 1 << BIO_UPTODATE;
1310 sbio->bi_next = NULL;
1311 sbio->bi_sector = r1_bio->sector +
1312 conf->mirrors[i].rdev->data_offset;
1313 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
NeilBrown698b18c2008-05-23 13:04:35 -07001314 size = sbio->bi_size;
1315 for (j = 0; j < vcnt ; j++) {
1316 struct bio_vec *bi;
1317 bi = &sbio->bi_io_vec[j];
1318 bi->bv_offset = 0;
1319 if (size > PAGE_SIZE)
1320 bi->bv_len = PAGE_SIZE;
1321 else
1322 bi->bv_len = size;
1323 size -= PAGE_SIZE;
1324 memcpy(page_address(bi->bv_page),
NeilBrown3eda22d2007-01-26 00:57:01 -08001325 page_address(pbio->bi_io_vec[j].bv_page),
1326 PAGE_SIZE);
NeilBrown698b18c2008-05-23 13:04:35 -07001327 }
NeilBrown3eda22d2007-01-26 00:57:01 -08001328
NeilBrownd11c1712006-01-06 00:20:26 -08001329 }
1330 }
1331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
NeilBrown69382e82006-01-06 00:20:22 -08001333 /* ouch - failed to read all of that.
1334 * Try some synchronous reads of other devices to get
1335 * good data, much like with normal read errors. Only
NeilBrownddac7c72006-08-31 21:27:36 -07001336 * read into the pages we already have so we don't
NeilBrown69382e82006-01-06 00:20:22 -08001337 * need to re-issue the read request.
1338 * We don't need to freeze the array, because being in an
1339 * active sync request, there is no normal IO, and
1340 * no overlapping syncs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 */
NeilBrown69382e82006-01-06 00:20:22 -08001342 sector_t sect = r1_bio->sector;
1343 int sectors = r1_bio->sectors;
1344 int idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
NeilBrown69382e82006-01-06 00:20:22 -08001346 while(sectors) {
1347 int s = sectors;
1348 int d = r1_bio->read_disk;
1349 int success = 0;
1350 mdk_rdev_t *rdev;
1351
1352 if (s > (PAGE_SIZE>>9))
1353 s = PAGE_SIZE >> 9;
1354 do {
1355 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
NeilBrownddac7c72006-08-31 21:27:36 -07001356 /* No rcu protection needed here devices
1357 * can only be removed when no resync is
1358 * active, and resync is currently active
1359 */
NeilBrown69382e82006-01-06 00:20:22 -08001360 rdev = conf->mirrors[d].rdev;
1361 if (sync_page_io(rdev->bdev,
1362 sect + rdev->data_offset,
1363 s<<9,
1364 bio->bi_io_vec[idx].bv_page,
1365 READ)) {
1366 success = 1;
1367 break;
1368 }
1369 }
1370 d++;
1371 if (d == conf->raid_disks)
1372 d = 0;
1373 } while (!success && d != r1_bio->read_disk);
1374
1375 if (success) {
NeilBrown097426f2006-01-06 00:20:37 -08001376 int start = d;
NeilBrown69382e82006-01-06 00:20:22 -08001377 /* write it back and re-read */
1378 set_bit(R1BIO_Uptodate, &r1_bio->state);
1379 while (d != r1_bio->read_disk) {
1380 if (d == 0)
1381 d = conf->raid_disks;
1382 d--;
1383 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1384 continue;
1385 rdev = conf->mirrors[d].rdev;
NeilBrown4dbcdc72006-01-06 00:20:52 -08001386 atomic_add(s, &rdev->corrected_errors);
NeilBrown69382e82006-01-06 00:20:22 -08001387 if (sync_page_io(rdev->bdev,
1388 sect + rdev->data_offset,
1389 s<<9,
1390 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001391 WRITE) == 0)
1392 md_error(mddev, rdev);
1393 }
1394 d = start;
1395 while (d != r1_bio->read_disk) {
1396 if (d == 0)
1397 d = conf->raid_disks;
1398 d--;
1399 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1400 continue;
1401 rdev = conf->mirrors[d].rdev;
1402 if (sync_page_io(rdev->bdev,
NeilBrown69382e82006-01-06 00:20:22 -08001403 sect + rdev->data_offset,
1404 s<<9,
1405 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001406 READ) == 0)
NeilBrown69382e82006-01-06 00:20:22 -08001407 md_error(mddev, rdev);
NeilBrown69382e82006-01-06 00:20:22 -08001408 }
1409 } else {
1410 char b[BDEVNAME_SIZE];
1411 /* Cannot read from anywhere, array is toast */
1412 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1413 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1414 " for block %llu\n",
1415 bdevname(bio->bi_bdev,b),
1416 (unsigned long long)r1_bio->sector);
1417 md_done_sync(mddev, r1_bio->sectors, 0);
1418 put_buf(r1_bio);
1419 return;
1420 }
1421 sectors -= s;
1422 sect += s;
1423 idx ++;
1424 }
1425 }
NeilBrownd11c1712006-01-06 00:20:26 -08001426
1427 /*
1428 * schedule writes
1429 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 atomic_set(&r1_bio->remaining, 1);
1431 for (i = 0; i < disks ; i++) {
1432 wbio = r1_bio->bios[i];
NeilBrown3e198f72006-01-06 00:20:21 -08001433 if (wbio->bi_end_io == NULL ||
1434 (wbio->bi_end_io == end_sync_read &&
1435 (i == r1_bio->read_disk ||
1436 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 continue;
1438
NeilBrown3e198f72006-01-06 00:20:21 -08001439 wbio->bi_rw = WRITE;
1440 wbio->bi_end_io = end_sync_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 atomic_inc(&r1_bio->remaining);
1442 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
NeilBrown191ea9b2005-06-21 17:17:23 -07001443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 generic_make_request(wbio);
1445 }
1446
1447 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001448 /* if we're here, all write(s) have completed, so clean up */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 md_done_sync(mddev, r1_bio->sectors, 1);
1450 put_buf(r1_bio);
1451 }
1452}
1453
1454/*
1455 * This is a kernel thread which:
1456 *
1457 * 1. Retries failed read operations on working mirrors.
1458 * 2. Updates the raid superblock when problems encounter.
1459 * 3. Performs writes following reads for array syncronising.
1460 */
1461
NeilBrown867868f2006-10-03 01:15:51 -07001462static void fix_read_error(conf_t *conf, int read_disk,
1463 sector_t sect, int sectors)
1464{
1465 mddev_t *mddev = conf->mddev;
1466 while(sectors) {
1467 int s = sectors;
1468 int d = read_disk;
1469 int success = 0;
1470 int start;
1471 mdk_rdev_t *rdev;
1472
1473 if (s > (PAGE_SIZE>>9))
1474 s = PAGE_SIZE >> 9;
1475
1476 do {
1477 /* Note: no rcu protection needed here
1478 * as this is synchronous in the raid1d thread
1479 * which is the thread that might remove
1480 * a device. If raid1d ever becomes multi-threaded....
1481 */
1482 rdev = conf->mirrors[d].rdev;
1483 if (rdev &&
1484 test_bit(In_sync, &rdev->flags) &&
1485 sync_page_io(rdev->bdev,
1486 sect + rdev->data_offset,
1487 s<<9,
1488 conf->tmppage, READ))
1489 success = 1;
1490 else {
1491 d++;
1492 if (d == conf->raid_disks)
1493 d = 0;
1494 }
1495 } while (!success && d != read_disk);
1496
1497 if (!success) {
1498 /* Cannot read from anywhere -- bye bye array */
1499 md_error(mddev, conf->mirrors[read_disk].rdev);
1500 break;
1501 }
1502 /* write it back and re-read */
1503 start = d;
1504 while (d != read_disk) {
1505 if (d==0)
1506 d = conf->raid_disks;
1507 d--;
1508 rdev = conf->mirrors[d].rdev;
1509 if (rdev &&
1510 test_bit(In_sync, &rdev->flags)) {
1511 if (sync_page_io(rdev->bdev,
1512 sect + rdev->data_offset,
1513 s<<9, conf->tmppage, WRITE)
1514 == 0)
1515 /* Well, this device is dead */
1516 md_error(mddev, rdev);
1517 }
1518 }
1519 d = start;
1520 while (d != read_disk) {
1521 char b[BDEVNAME_SIZE];
1522 if (d==0)
1523 d = conf->raid_disks;
1524 d--;
1525 rdev = conf->mirrors[d].rdev;
1526 if (rdev &&
1527 test_bit(In_sync, &rdev->flags)) {
1528 if (sync_page_io(rdev->bdev,
1529 sect + rdev->data_offset,
1530 s<<9, conf->tmppage, READ)
1531 == 0)
1532 /* Well, this device is dead */
1533 md_error(mddev, rdev);
1534 else {
1535 atomic_add(s, &rdev->corrected_errors);
1536 printk(KERN_INFO
1537 "raid1:%s: read error corrected "
1538 "(%d sectors at %llu on %s)\n",
1539 mdname(mddev), s,
Randy Dunlap969b7552006-10-28 10:38:32 -07001540 (unsigned long long)(sect +
1541 rdev->data_offset),
NeilBrown867868f2006-10-03 01:15:51 -07001542 bdevname(rdev->bdev, b));
1543 }
1544 }
1545 }
1546 sectors -= s;
1547 sect += s;
1548 }
1549}
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551static void raid1d(mddev_t *mddev)
1552{
1553 r1bio_t *r1_bio;
1554 struct bio *bio;
1555 unsigned long flags;
1556 conf_t *conf = mddev_to_conf(mddev);
1557 struct list_head *head = &conf->retry_list;
1558 int unplug=0;
1559 mdk_rdev_t *rdev;
1560
1561 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
1563 for (;;) {
1564 char b[BDEVNAME_SIZE];
NeilBrowna35e63e2008-03-04 14:29:29 -08001565
1566 unplug += flush_pending_writes(conf);
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrowna35e63e2008-03-04 14:29:29 -08001569 if (list_empty(head)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001570 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 break;
NeilBrowna35e63e2008-03-04 14:29:29 -08001572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1574 list_del(head->prev);
NeilBrownddaf22a2006-01-06 00:20:19 -08001575 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 spin_unlock_irqrestore(&conf->device_lock, flags);
1577
1578 mddev = r1_bio->mddev;
1579 conf = mddev_to_conf(mddev);
1580 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1581 sync_request_write(mddev, r1_bio);
1582 unplug = 1;
NeilBrowna9701a32005-11-08 21:39:34 -08001583 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1584 /* some requests in the r1bio were BIO_RW_BARRIER
NeilBrownbea27712006-05-01 12:15:46 -07001585 * requests which failed with -EOPNOTSUPP. Hohumm..
NeilBrowna9701a32005-11-08 21:39:34 -08001586 * Better resubmit without the barrier.
1587 * We know which devices to resubmit for, because
1588 * all others have had their bios[] entry cleared.
NeilBrown5e7dd2a2006-05-01 12:15:47 -07001589 * We already have a nr_pending reference on these rdevs.
NeilBrowna9701a32005-11-08 21:39:34 -08001590 */
1591 int i;
Lars Ellenberge3881a62007-01-10 23:15:37 -08001592 const int do_sync = bio_sync(r1_bio->master_bio);
NeilBrowna9701a32005-11-08 21:39:34 -08001593 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1594 clear_bit(R1BIO_Barrier, &r1_bio->state);
1595 for (i=0; i < conf->raid_disks; i++)
NeilBrown2f889122006-03-27 01:18:19 -08001596 if (r1_bio->bios[i])
1597 atomic_inc(&r1_bio->remaining);
1598 for (i=0; i < conf->raid_disks; i++)
NeilBrowna9701a32005-11-08 21:39:34 -08001599 if (r1_bio->bios[i]) {
1600 struct bio_vec *bvec;
1601 int j;
1602
1603 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1604 /* copy pages from the failed bio, as
1605 * this might be a write-behind device */
1606 __bio_for_each_segment(bvec, bio, j, 0)
1607 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1608 bio_put(r1_bio->bios[i]);
1609 bio->bi_sector = r1_bio->sector +
1610 conf->mirrors[i].rdev->data_offset;
1611 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1612 bio->bi_end_io = raid1_end_write_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -08001613 bio->bi_rw = WRITE | do_sync;
NeilBrowna9701a32005-11-08 21:39:34 -08001614 bio->bi_private = r1_bio;
1615 r1_bio->bios[i] = bio;
1616 generic_make_request(bio);
1617 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 } else {
1619 int disk;
NeilBrownddaf22a2006-01-06 00:20:19 -08001620
1621 /* we got a read error. Maybe the drive is bad. Maybe just
1622 * the block and we can fix it.
1623 * We freeze all other IO, and try reading the block from
1624 * other devices. When we find one, we re-write
1625 * and check it that fixes the read error.
1626 * This is all done synchronously while the array is
1627 * frozen
1628 */
NeilBrown867868f2006-10-03 01:15:51 -07001629 if (mddev->ro == 0) {
1630 freeze_array(conf);
1631 fix_read_error(conf, r1_bio->read_disk,
1632 r1_bio->sector,
1633 r1_bio->sectors);
1634 unfreeze_array(conf);
NeilBrownddaf22a2006-01-06 00:20:19 -08001635 }
1636
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 bio = r1_bio->bios[r1_bio->read_disk];
1638 if ((disk=read_balance(conf, r1_bio)) == -1) {
1639 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1640 " read error for block %llu\n",
1641 bdevname(bio->bi_bdev,b),
1642 (unsigned long long)r1_bio->sector);
1643 raid_end_bio_io(r1_bio);
1644 } else {
Lars Ellenberge3881a62007-01-10 23:15:37 -08001645 const int do_sync = bio_sync(r1_bio->master_bio);
NeilBrowncf30a472006-01-06 00:20:23 -08001646 r1_bio->bios[r1_bio->read_disk] =
1647 mddev->ro ? IO_BLOCKED : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 r1_bio->read_disk = disk;
1649 bio_put(bio);
1650 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1651 r1_bio->bios[r1_bio->read_disk] = bio;
1652 rdev = conf->mirrors[disk].rdev;
1653 if (printk_ratelimit())
1654 printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
1655 " another mirror\n",
1656 bdevname(rdev->bdev,b),
1657 (unsigned long long)r1_bio->sector);
1658 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1659 bio->bi_bdev = rdev->bdev;
1660 bio->bi_end_io = raid1_end_read_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -08001661 bio->bi_rw = READ | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 bio->bi_private = r1_bio;
1663 unplug = 1;
1664 generic_make_request(bio);
1665 }
1666 }
1667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 if (unplug)
1669 unplug_slaves(mddev);
1670}
1671
1672
1673static int init_resync(conf_t *conf)
1674{
1675 int buffs;
1676
1677 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
Eric Sesterhenn9e77c482006-04-01 01:08:49 +02001678 BUG_ON(conf->r1buf_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1680 conf->poolinfo);
1681 if (!conf->r1buf_pool)
1682 return -ENOMEM;
1683 conf->next_resync = 0;
1684 return 0;
1685}
1686
1687/*
1688 * perform a "sync" on one "block"
1689 *
1690 * We need to make sure that no normal I/O request - particularly write
1691 * requests - conflict with active sync requests.
1692 *
1693 * This is achieved by tracking pending requests and a 'barrier' concept
1694 * that can be installed to exclude normal IO requests.
1695 */
1696
NeilBrown57afd892005-06-21 17:17:13 -07001697static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698{
1699 conf_t *conf = mddev_to_conf(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 r1bio_t *r1_bio;
1701 struct bio *bio;
1702 sector_t max_sector, nr_sectors;
NeilBrown3e198f72006-01-06 00:20:21 -08001703 int disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 int i;
NeilBrown3e198f72006-01-06 00:20:21 -08001705 int wonly = -1;
1706 int write_targets = 0, read_targets = 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001707 int sync_blocks;
NeilBrowne3b97032005-08-04 12:53:34 -07001708 int still_degraded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
1710 if (!conf->r1buf_pool)
NeilBrown191ea9b2005-06-21 17:17:23 -07001711 {
1712/*
1713 printk("sync start - bitmap %p\n", mddev->bitmap);
1714*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07001716 return 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001717 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 max_sector = mddev->size << 1;
1720 if (sector_nr >= max_sector) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001721 /* If we aborted, we need to abort the
1722 * sync on the 'current' bitmap chunk (there will
1723 * only be one in raid1 resync.
1724 * We can find the current addess in mddev->curr_resync
1725 */
NeilBrown6a806c52005-07-15 03:56:35 -07001726 if (mddev->curr_resync < max_sector) /* aborted */
1727 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
NeilBrown191ea9b2005-06-21 17:17:23 -07001728 &sync_blocks, 1);
NeilBrown6a806c52005-07-15 03:56:35 -07001729 else /* completed sync */
NeilBrown191ea9b2005-06-21 17:17:23 -07001730 conf->fullsync = 0;
NeilBrown6a806c52005-07-15 03:56:35 -07001731
1732 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 close_sync(conf);
1734 return 0;
1735 }
1736
NeilBrown07d84d102006-06-26 00:27:56 -07001737 if (mddev->bitmap == NULL &&
1738 mddev->recovery_cp == MaxSector &&
NeilBrown6394cca2006-08-27 01:23:50 -07001739 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
NeilBrown07d84d102006-06-26 00:27:56 -07001740 conf->fullsync == 0) {
1741 *skipped = 1;
1742 return max_sector - sector_nr;
1743 }
NeilBrown6394cca2006-08-27 01:23:50 -07001744 /* before building a request, check if we can skip these blocks..
1745 * This call the bitmap_start_sync doesn't actually record anything
1746 */
NeilBrowne3b97032005-08-04 12:53:34 -07001747 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
NeilBrowne5de4852005-11-08 21:39:38 -08001748 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001749 /* We can skip this block, and probably several more */
1750 *skipped = 1;
1751 return sync_blocks;
1752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 /*
NeilBrown17999be2006-01-06 00:20:12 -08001754 * If there is non-resync activity waiting for a turn,
1755 * and resync is going fast enough,
1756 * then let it though before starting on this new sync request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 */
NeilBrown17999be2006-01-06 00:20:12 -08001758 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 msleep_interruptible(1000);
NeilBrown17999be2006-01-06 00:20:12 -08001760
NeilBrownb47490c2008-02-06 01:39:50 -08001761 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
NeilBrown17999be2006-01-06 00:20:12 -08001762 raise_barrier(conf);
1763
1764 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
NeilBrown3e198f72006-01-06 00:20:21 -08001767 rcu_read_lock();
1768 /*
1769 * If we get a correctably read error during resync or recovery,
1770 * we might want to read from a different device. So we
1771 * flag all drives that could conceivably be read from for READ,
1772 * and any others (which will be non-In_sync devices) for WRITE.
1773 * If a read fails, we try reading from something else for which READ
1774 * is OK.
1775 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 r1_bio->mddev = mddev;
1778 r1_bio->sector = sector_nr;
NeilBrown191ea9b2005-06-21 17:17:23 -07001779 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 set_bit(R1BIO_IsSync, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
1782 for (i=0; i < conf->raid_disks; i++) {
NeilBrown3e198f72006-01-06 00:20:21 -08001783 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 bio = r1_bio->bios[i];
1785
1786 /* take from bio_init */
1787 bio->bi_next = NULL;
1788 bio->bi_flags |= 1 << BIO_UPTODATE;
NeilBrown802ba062006-12-13 00:34:13 -08001789 bio->bi_rw = READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 bio->bi_vcnt = 0;
1791 bio->bi_idx = 0;
1792 bio->bi_phys_segments = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 bio->bi_size = 0;
1794 bio->bi_end_io = NULL;
1795 bio->bi_private = NULL;
1796
NeilBrown3e198f72006-01-06 00:20:21 -08001797 rdev = rcu_dereference(conf->mirrors[i].rdev);
1798 if (rdev == NULL ||
1799 test_bit(Faulty, &rdev->flags)) {
NeilBrowne3b97032005-08-04 12:53:34 -07001800 still_degraded = 1;
1801 continue;
NeilBrown3e198f72006-01-06 00:20:21 -08001802 } else if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 bio->bi_rw = WRITE;
1804 bio->bi_end_io = end_sync_write;
1805 write_targets ++;
NeilBrown3e198f72006-01-06 00:20:21 -08001806 } else {
1807 /* may need to read from here */
1808 bio->bi_rw = READ;
1809 bio->bi_end_io = end_sync_read;
1810 if (test_bit(WriteMostly, &rdev->flags)) {
1811 if (wonly < 0)
1812 wonly = i;
1813 } else {
1814 if (disk < 0)
1815 disk = i;
1816 }
1817 read_targets++;
1818 }
1819 atomic_inc(&rdev->nr_pending);
1820 bio->bi_sector = sector_nr + rdev->data_offset;
1821 bio->bi_bdev = rdev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 bio->bi_private = r1_bio;
1823 }
NeilBrown3e198f72006-01-06 00:20:21 -08001824 rcu_read_unlock();
1825 if (disk < 0)
1826 disk = wonly;
1827 r1_bio->read_disk = disk;
NeilBrown191ea9b2005-06-21 17:17:23 -07001828
NeilBrown3e198f72006-01-06 00:20:21 -08001829 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1830 /* extra read targets are also write targets */
1831 write_targets += read_targets-1;
1832
1833 if (write_targets == 0 || read_targets == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 /* There is nowhere to write, so all non-sync
1835 * drives must be failed - so we are finished
1836 */
NeilBrown57afd892005-06-21 17:17:13 -07001837 sector_t rv = max_sector - sector_nr;
1838 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 put_buf(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 return rv;
1841 }
1842
NeilBrownc6207272008-02-06 01:39:52 -08001843 if (max_sector > mddev->resync_max)
1844 max_sector = mddev->resync_max; /* Don't do IO beyond here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 nr_sectors = 0;
NeilBrown289e99e2005-06-21 17:17:24 -07001846 sync_blocks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 do {
1848 struct page *page;
1849 int len = PAGE_SIZE;
1850 if (sector_nr + (len>>9) > max_sector)
1851 len = (max_sector - sector_nr) << 9;
1852 if (len == 0)
1853 break;
NeilBrown6a806c52005-07-15 03:56:35 -07001854 if (sync_blocks == 0) {
1855 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
NeilBrowne5de4852005-11-08 21:39:38 -08001856 &sync_blocks, still_degraded) &&
1857 !conf->fullsync &&
1858 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
NeilBrown6a806c52005-07-15 03:56:35 -07001859 break;
Eric Sesterhenn9e77c482006-04-01 01:08:49 +02001860 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
NeilBrown6a806c52005-07-15 03:56:35 -07001861 if (len > (sync_blocks<<9))
1862 len = sync_blocks<<9;
NeilBrownab7a30c2005-06-21 17:17:23 -07001863 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 for (i=0 ; i < conf->raid_disks; i++) {
1866 bio = r1_bio->bios[i];
1867 if (bio->bi_end_io) {
NeilBrownd11c1712006-01-06 00:20:26 -08001868 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 if (bio_add_page(bio, page, len, 0) == 0) {
1870 /* stop here */
NeilBrownd11c1712006-01-06 00:20:26 -08001871 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 while (i > 0) {
1873 i--;
1874 bio = r1_bio->bios[i];
NeilBrown6a806c52005-07-15 03:56:35 -07001875 if (bio->bi_end_io==NULL)
1876 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 /* remove last page from this bio */
1878 bio->bi_vcnt--;
1879 bio->bi_size -= len;
1880 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
1881 }
1882 goto bio_full;
1883 }
1884 }
1885 }
1886 nr_sectors += len>>9;
1887 sector_nr += len>>9;
NeilBrown191ea9b2005-06-21 17:17:23 -07001888 sync_blocks -= (len>>9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1890 bio_full:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 r1_bio->sectors = nr_sectors;
1892
NeilBrownd11c1712006-01-06 00:20:26 -08001893 /* For a user-requested sync, we read all readable devices and do a
1894 * compare
1895 */
1896 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1897 atomic_set(&r1_bio->remaining, read_targets);
1898 for (i=0; i<conf->raid_disks; i++) {
1899 bio = r1_bio->bios[i];
1900 if (bio->bi_end_io == end_sync_read) {
NeilBrownddac7c72006-08-31 21:27:36 -07001901 md_sync_acct(bio->bi_bdev, nr_sectors);
NeilBrownd11c1712006-01-06 00:20:26 -08001902 generic_make_request(bio);
1903 }
1904 }
1905 } else {
1906 atomic_set(&r1_bio->remaining, 1);
1907 bio = r1_bio->bios[r1_bio->read_disk];
NeilBrownddac7c72006-08-31 21:27:36 -07001908 md_sync_acct(bio->bi_bdev, nr_sectors);
NeilBrownd11c1712006-01-06 00:20:26 -08001909 generic_make_request(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
NeilBrownd11c1712006-01-06 00:20:26 -08001911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 return nr_sectors;
1913}
1914
1915static int run(mddev_t *mddev)
1916{
1917 conf_t *conf;
1918 int i, j, disk_idx;
1919 mirror_info_t *disk;
1920 mdk_rdev_t *rdev;
1921 struct list_head *tmp;
1922
1923 if (mddev->level != 1) {
1924 printk("raid1: %s: raid level not set to mirroring (%d)\n",
1925 mdname(mddev), mddev->level);
1926 goto out;
1927 }
NeilBrownf6705572006-03-27 01:18:11 -08001928 if (mddev->reshape_position != MaxSector) {
1929 printk("raid1: %s: reshape_position set but not supported\n",
1930 mdname(mddev));
1931 goto out;
1932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 /*
1934 * copy the already verified devices into our private RAID1
1935 * bookkeeping area. [whatever we allocate in run(),
1936 * should be freed in stop()]
1937 */
NeilBrown9ffae0c2006-01-06 00:20:32 -08001938 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 mddev->private = conf;
1940 if (!conf)
1941 goto out_no_mem;
1942
NeilBrown9ffae0c2006-01-06 00:20:32 -08001943 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 GFP_KERNEL);
1945 if (!conf->mirrors)
1946 goto out_no_mem;
1947
NeilBrownddaf22a2006-01-06 00:20:19 -08001948 conf->tmppage = alloc_page(GFP_KERNEL);
1949 if (!conf->tmppage)
1950 goto out_no_mem;
1951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1953 if (!conf->poolinfo)
1954 goto out_no_mem;
1955 conf->poolinfo->mddev = mddev;
1956 conf->poolinfo->raid_disks = mddev->raid_disks;
1957 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1958 r1bio_pool_free,
1959 conf->poolinfo);
1960 if (!conf->r1bio_pool)
1961 goto out_no_mem;
1962
Neil Browne7e72bf2008-05-14 16:05:54 -07001963 spin_lock_init(&conf->device_lock);
1964 mddev->queue->queue_lock = &conf->device_lock;
1965
NeilBrownd089c6a2008-02-06 01:39:59 -08001966 rdev_for_each(rdev, tmp, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 disk_idx = rdev->raid_disk;
1968 if (disk_idx >= mddev->raid_disks
1969 || disk_idx < 0)
1970 continue;
1971 disk = conf->mirrors + disk_idx;
1972
1973 disk->rdev = rdev;
1974
1975 blk_queue_stack_limits(mddev->queue,
1976 rdev->bdev->bd_disk->queue);
1977 /* as we don't honour merge_bvec_fn, we must never risk
1978 * violating it, so limit ->max_sector to one PAGE, as
1979 * a one page request is never in violation.
1980 */
1981 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1982 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1983 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1984
1985 disk->head_position = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 }
1987 conf->raid_disks = mddev->raid_disks;
1988 conf->mddev = mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 INIT_LIST_HEAD(&conf->retry_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
1991 spin_lock_init(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -08001992 init_waitqueue_head(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
NeilBrown191ea9b2005-06-21 17:17:23 -07001994 bio_list_init(&conf->pending_bio_list);
1995 bio_list_init(&conf->flushing_bio_list);
1996
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
1998 mddev->degraded = 0;
1999 for (i = 0; i < conf->raid_disks; i++) {
2000
2001 disk = conf->mirrors + i;
2002
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002003 if (!disk->rdev ||
2004 !test_bit(In_sync, &disk->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 disk->head_position = 0;
2006 mddev->degraded++;
NeilBrown918f0232007-08-22 14:01:52 -07002007 if (disk->rdev)
2008 conf->fullsync = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 }
2010 }
NeilBrown11ce99e2006-10-03 01:15:52 -07002011 if (mddev->degraded == conf->raid_disks) {
2012 printk(KERN_ERR "raid1: no operational mirrors for %s\n",
2013 mdname(mddev));
2014 goto out_free_conf;
2015 }
2016 if (conf->raid_disks - mddev->degraded == 1)
2017 mddev->recovery_cp = MaxSector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
2019 /*
2020 * find the first working one and use it as a starting point
2021 * to read balancing.
2022 */
2023 for (j = 0; j < conf->raid_disks &&
2024 (!conf->mirrors[j].rdev ||
NeilBrownb2d444d2005-11-08 21:39:31 -08002025 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 /* nothing */;
2027 conf->last_used = j;
2028
2029
NeilBrown191ea9b2005-06-21 17:17:23 -07002030 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
2031 if (!mddev->thread) {
2032 printk(KERN_ERR
2033 "raid1: couldn't allocate thread for %s\n",
2034 mdname(mddev));
2035 goto out_free_conf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 }
NeilBrown191ea9b2005-06-21 17:17:23 -07002037
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 printk(KERN_INFO
2039 "raid1: raid set %s active with %d out of %d mirrors\n",
2040 mdname(mddev), mddev->raid_disks - mddev->degraded,
2041 mddev->raid_disks);
2042 /*
2043 * Ok, everything is just fine now
2044 */
Andre Nollf233ea52008-07-21 17:05:22 +10002045 mddev->array_sectors = mddev->size * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
NeilBrown7a5febe2005-05-16 21:53:16 -07002047 mddev->queue->unplug_fn = raid1_unplug;
NeilBrown0d129222006-10-03 01:15:54 -07002048 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2049 mddev->queue->backing_dev_info.congested_data = mddev;
NeilBrown7a5febe2005-05-16 21:53:16 -07002050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 return 0;
2052
2053out_no_mem:
2054 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
2055 mdname(mddev));
2056
2057out_free_conf:
2058 if (conf) {
2059 if (conf->r1bio_pool)
2060 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07002061 kfree(conf->mirrors);
NeilBrown1345b1d2006-01-06 00:20:40 -08002062 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07002063 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 kfree(conf);
2065 mddev->private = NULL;
2066 }
2067out:
2068 return -EIO;
2069}
2070
2071static int stop(mddev_t *mddev)
2072{
2073 conf_t *conf = mddev_to_conf(mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -07002074 struct bitmap *bitmap = mddev->bitmap;
2075 int behind_wait = 0;
2076
2077 /* wait for behind writes to complete */
2078 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2079 behind_wait++;
2080 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
2081 set_current_state(TASK_UNINTERRUPTIBLE);
2082 schedule_timeout(HZ); /* wait a second */
2083 /* need to kick something here to make sure I/O goes? */
2084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 md_unregister_thread(mddev->thread);
2087 mddev->thread = NULL;
2088 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2089 if (conf->r1bio_pool)
2090 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07002091 kfree(conf->mirrors);
2092 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 kfree(conf);
2094 mddev->private = NULL;
2095 return 0;
2096}
2097
2098static int raid1_resize(mddev_t *mddev, sector_t sectors)
2099{
2100 /* no resync is happening, and there is enough space
2101 * on all devices, so we can resize.
2102 * We need to make sure resync covers any new space.
2103 * If the array is shrinking we should possibly wait until
2104 * any io in the removed space completes, but it hardly seems
2105 * worth it.
2106 */
Andre Nollf233ea52008-07-21 17:05:22 +10002107 mddev->array_sectors = sectors;
2108 set_capacity(mddev->gendisk, mddev->array_sectors);
Linus Torvalds44ce6292007-05-09 18:51:36 -07002109 mddev->changed = 1;
Andre Nollf233ea52008-07-21 17:05:22 +10002110 if (mddev->array_sectors / 2 > mddev->size &&
2111 mddev->recovery_cp == MaxSector) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 mddev->recovery_cp = mddev->size << 1;
2113 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2114 }
Andre Nollf233ea52008-07-21 17:05:22 +10002115 mddev->size = mddev->array_sectors / 2;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07002116 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 return 0;
2118}
2119
NeilBrown63c70c42006-03-27 01:18:13 -08002120static int raid1_reshape(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121{
2122 /* We need to:
2123 * 1/ resize the r1bio_pool
2124 * 2/ resize conf->mirrors
2125 *
2126 * We allocate a new r1bio_pool if we can.
2127 * Then raise a device barrier and wait until all IO stops.
2128 * Then resize conf->mirrors and swap in the new r1bio pool.
NeilBrown6ea9c072005-06-21 17:17:09 -07002129 *
2130 * At the same time, we "pack" the devices so that all the missing
2131 * devices have the higher raid_disk numbers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 */
2133 mempool_t *newpool, *oldpool;
2134 struct pool_info *newpoolinfo;
2135 mirror_info_t *newmirrors;
2136 conf_t *conf = mddev_to_conf(mddev);
NeilBrown63c70c42006-03-27 01:18:13 -08002137 int cnt, raid_disks;
NeilBrownc04be0a2006-10-03 01:15:53 -07002138 unsigned long flags;
Dan Williamsb5470dc2008-06-27 21:44:04 -07002139 int d, d2, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
NeilBrown63c70c42006-03-27 01:18:13 -08002141 /* Cannot change chunk_size, layout, or level */
2142 if (mddev->chunk_size != mddev->new_chunk ||
2143 mddev->layout != mddev->new_layout ||
2144 mddev->level != mddev->new_level) {
2145 mddev->new_chunk = mddev->chunk_size;
2146 mddev->new_layout = mddev->layout;
2147 mddev->new_level = mddev->level;
2148 return -EINVAL;
2149 }
2150
Dan Williamsb5470dc2008-06-27 21:44:04 -07002151 err = md_allow_write(mddev);
2152 if (err)
2153 return err;
NeilBrown2a2275d2007-01-26 00:57:11 -08002154
NeilBrown63c70c42006-03-27 01:18:13 -08002155 raid_disks = mddev->raid_disks + mddev->delta_disks;
2156
NeilBrown6ea9c072005-06-21 17:17:09 -07002157 if (raid_disks < conf->raid_disks) {
2158 cnt=0;
2159 for (d= 0; d < conf->raid_disks; d++)
2160 if (conf->mirrors[d].rdev)
2161 cnt++;
2162 if (cnt > raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 return -EBUSY;
NeilBrown6ea9c072005-06-21 17:17:09 -07002164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2167 if (!newpoolinfo)
2168 return -ENOMEM;
2169 newpoolinfo->mddev = mddev;
2170 newpoolinfo->raid_disks = raid_disks;
2171
2172 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2173 r1bio_pool_free, newpoolinfo);
2174 if (!newpool) {
2175 kfree(newpoolinfo);
2176 return -ENOMEM;
2177 }
NeilBrown9ffae0c2006-01-06 00:20:32 -08002178 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 if (!newmirrors) {
2180 kfree(newpoolinfo);
2181 mempool_destroy(newpool);
2182 return -ENOMEM;
2183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
NeilBrown17999be2006-01-06 00:20:12 -08002185 raise_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
2187 /* ok, everything is stopped */
2188 oldpool = conf->r1bio_pool;
2189 conf->r1bio_pool = newpool;
NeilBrown6ea9c072005-06-21 17:17:09 -07002190
NeilBrowna88aa782007-08-22 14:01:53 -07002191 for (d = d2 = 0; d < conf->raid_disks; d++) {
2192 mdk_rdev_t *rdev = conf->mirrors[d].rdev;
2193 if (rdev && rdev->raid_disk != d2) {
2194 char nm[20];
2195 sprintf(nm, "rd%d", rdev->raid_disk);
2196 sysfs_remove_link(&mddev->kobj, nm);
2197 rdev->raid_disk = d2;
2198 sprintf(nm, "rd%d", rdev->raid_disk);
2199 sysfs_remove_link(&mddev->kobj, nm);
2200 if (sysfs_create_link(&mddev->kobj,
2201 &rdev->kobj, nm))
2202 printk(KERN_WARNING
2203 "md/raid1: cannot register "
2204 "%s for %s\n",
2205 nm, mdname(mddev));
NeilBrown6ea9c072005-06-21 17:17:09 -07002206 }
NeilBrowna88aa782007-08-22 14:01:53 -07002207 if (rdev)
2208 newmirrors[d2++].rdev = rdev;
2209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 kfree(conf->mirrors);
2211 conf->mirrors = newmirrors;
2212 kfree(conf->poolinfo);
2213 conf->poolinfo = newpoolinfo;
2214
NeilBrownc04be0a2006-10-03 01:15:53 -07002215 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 mddev->degraded += (raid_disks - conf->raid_disks);
NeilBrownc04be0a2006-10-03 01:15:53 -07002217 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 conf->raid_disks = mddev->raid_disks = raid_disks;
NeilBrown63c70c42006-03-27 01:18:13 -08002219 mddev->delta_disks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
NeilBrown6ea9c072005-06-21 17:17:09 -07002221 conf->last_used = 0; /* just make sure it is in-range */
NeilBrown17999be2006-01-06 00:20:12 -08002222 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
2224 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2225 md_wakeup_thread(mddev->thread);
2226
2227 mempool_destroy(oldpool);
2228 return 0;
2229}
2230
NeilBrown500af872005-09-09 16:23:58 -07002231static void raid1_quiesce(mddev_t *mddev, int state)
NeilBrown36fa3062005-09-09 16:23:45 -07002232{
2233 conf_t *conf = mddev_to_conf(mddev);
2234
2235 switch(state) {
NeilBrown9e6603d2005-09-09 16:23:48 -07002236 case 1:
NeilBrown17999be2006-01-06 00:20:12 -08002237 raise_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002238 break;
NeilBrown9e6603d2005-09-09 16:23:48 -07002239 case 0:
NeilBrown17999be2006-01-06 00:20:12 -08002240 lower_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002241 break;
2242 }
NeilBrown36fa3062005-09-09 16:23:45 -07002243}
2244
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
NeilBrown2604b702006-01-06 00:20:36 -08002246static struct mdk_personality raid1_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247{
2248 .name = "raid1",
NeilBrown2604b702006-01-06 00:20:36 -08002249 .level = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 .owner = THIS_MODULE,
2251 .make_request = make_request,
2252 .run = run,
2253 .stop = stop,
2254 .status = status,
2255 .error_handler = error,
2256 .hot_add_disk = raid1_add_disk,
2257 .hot_remove_disk= raid1_remove_disk,
2258 .spare_active = raid1_spare_active,
2259 .sync_request = sync_request,
2260 .resize = raid1_resize,
NeilBrown63c70c42006-03-27 01:18:13 -08002261 .check_reshape = raid1_reshape,
NeilBrown36fa3062005-09-09 16:23:45 -07002262 .quiesce = raid1_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263};
2264
2265static int __init raid_init(void)
2266{
NeilBrown2604b702006-01-06 00:20:36 -08002267 return register_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268}
2269
2270static void raid_exit(void)
2271{
NeilBrown2604b702006-01-06 00:20:36 -08002272 unregister_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273}
2274
2275module_init(raid_init);
2276module_exit(raid_exit);
2277MODULE_LICENSE("GPL");
2278MODULE_ALIAS("md-personality-3"); /* RAID1 */
NeilBrownd9d166c2006-01-06 00:20:51 -08002279MODULE_ALIAS("md-raid1");
NeilBrown2604b702006-01-06 00:20:36 -08002280MODULE_ALIAS("md-level-1");