blob: 7b4f5f7155d8726705c1f42757f68dd1de869624 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
Jan Engelhardt96de0e22007-10-19 23:21:04 +020012 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
NeilBrown191ea9b2005-06-21 17:17:23 -070015 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
NeilBrown191ea9b2005-06-21 17:17:23 -070034#include "dm-bio-list.h"
Stephen Rothwell25570722008-10-15 09:09:21 +110035#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/raid/raid1.h>
NeilBrown191ea9b2005-06-21 17:17:23 -070037#include <linux/raid/bitmap.h>
38
39#define DEBUG 0
40#if DEBUG
41#define PRINTK(x...) printk(x)
42#else
43#define PRINTK(x...)
44#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/*
47 * Number of guaranteed r1bios in case of extreme VM load:
48 */
49#define NR_RAID1_BIOS 256
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52static void unplug_slaves(mddev_t *mddev);
53
NeilBrown17999be2006-01-06 00:20:12 -080054static void allow_barrier(conf_t *conf);
55static void lower_barrier(conf_t *conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Al Virodd0fc662005-10-07 07:46:04 +010057static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
59 struct pool_info *pi = data;
60 r1bio_t *r1_bio;
61 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
62
63 /* allocate a r1bio with room for raid_disks entries in the bios array */
NeilBrown9ffae0c2006-01-06 00:20:32 -080064 r1_bio = kzalloc(size, gfp_flags);
65 if (!r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 unplug_slaves(pi->mddev);
67
68 return r1_bio;
69}
70
71static void r1bio_pool_free(void *r1_bio, void *data)
72{
73 kfree(r1_bio);
74}
75
76#define RESYNC_BLOCK_SIZE (64*1024)
77//#define RESYNC_BLOCK_SIZE PAGE_SIZE
78#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
79#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
80#define RESYNC_WINDOW (2048*1024)
81
Al Virodd0fc662005-10-07 07:46:04 +010082static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
84 struct pool_info *pi = data;
85 struct page *page;
86 r1bio_t *r1_bio;
87 struct bio *bio;
88 int i, j;
89
90 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
91 if (!r1_bio) {
92 unplug_slaves(pi->mddev);
93 return NULL;
94 }
95
96 /*
97 * Allocate bios : 1 for reading, n-1 for writing
98 */
99 for (j = pi->raid_disks ; j-- ; ) {
100 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
101 if (!bio)
102 goto out_free_bio;
103 r1_bio->bios[j] = bio;
104 }
105 /*
106 * Allocate RESYNC_PAGES data pages and attach them to
NeilBrownd11c1712006-01-06 00:20:26 -0800107 * the first bio.
108 * If this is a user-requested check/repair, allocate
109 * RESYNC_PAGES for each bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 */
NeilBrownd11c1712006-01-06 00:20:26 -0800111 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
112 j = pi->raid_disks;
113 else
114 j = 1;
115 while(j--) {
116 bio = r1_bio->bios[j];
117 for (i = 0; i < RESYNC_PAGES; i++) {
118 page = alloc_page(gfp_flags);
119 if (unlikely(!page))
120 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
NeilBrownd11c1712006-01-06 00:20:26 -0800122 bio->bi_io_vec[i].bv_page = page;
123 }
124 }
125 /* If not user-requests, copy the page pointers to all bios */
126 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
127 for (i=0; i<RESYNC_PAGES ; i++)
128 for (j=1; j<pi->raid_disks; j++)
129 r1_bio->bios[j]->bi_io_vec[i].bv_page =
130 r1_bio->bios[0]->bi_io_vec[i].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 }
132
133 r1_bio->master_bio = NULL;
134
135 return r1_bio;
136
137out_free_pages:
NeilBrownd11c1712006-01-06 00:20:26 -0800138 for (i=0; i < RESYNC_PAGES ; i++)
139 for (j=0 ; j < pi->raid_disks; j++)
NeilBrown1345b1d2006-01-06 00:20:40 -0800140 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800141 j = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142out_free_bio:
143 while ( ++j < pi->raid_disks )
144 bio_put(r1_bio->bios[j]);
145 r1bio_pool_free(r1_bio, data);
146 return NULL;
147}
148
149static void r1buf_pool_free(void *__r1_bio, void *data)
150{
151 struct pool_info *pi = data;
NeilBrownd11c1712006-01-06 00:20:26 -0800152 int i,j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 r1bio_t *r1bio = __r1_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
NeilBrownd11c1712006-01-06 00:20:26 -0800155 for (i = 0; i < RESYNC_PAGES; i++)
156 for (j = pi->raid_disks; j-- ;) {
157 if (j == 0 ||
158 r1bio->bios[j]->bi_io_vec[i].bv_page !=
159 r1bio->bios[0]->bi_io_vec[i].bv_page)
NeilBrown1345b1d2006-01-06 00:20:40 -0800160 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 for (i=0 ; i < pi->raid_disks; i++)
163 bio_put(r1bio->bios[i]);
164
165 r1bio_pool_free(r1bio, data);
166}
167
168static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
169{
170 int i;
171
172 for (i = 0; i < conf->raid_disks; i++) {
173 struct bio **bio = r1_bio->bios + i;
NeilBrowncf30a472006-01-06 00:20:23 -0800174 if (*bio && *bio != IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 bio_put(*bio);
176 *bio = NULL;
177 }
178}
179
Arjan van de Ven858119e2006-01-14 13:20:43 -0800180static void free_r1bio(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 conf_t *conf = mddev_to_conf(r1_bio->mddev);
183
184 /*
185 * Wake up any possible resync thread that waits for the device
186 * to go idle.
187 */
NeilBrown17999be2006-01-06 00:20:12 -0800188 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 put_all_bios(conf, r1_bio);
191 mempool_free(r1_bio, conf->r1bio_pool);
192}
193
Arjan van de Ven858119e2006-01-14 13:20:43 -0800194static void put_buf(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
196 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown3e198f72006-01-06 00:20:21 -0800197 int i;
198
199 for (i=0; i<conf->raid_disks; i++) {
200 struct bio *bio = r1_bio->bios[i];
201 if (bio->bi_end_io)
202 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205 mempool_free(r1_bio, conf->r1buf_pool);
206
NeilBrown17999be2006-01-06 00:20:12 -0800207 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
210static void reschedule_retry(r1bio_t *r1_bio)
211{
212 unsigned long flags;
213 mddev_t *mddev = r1_bio->mddev;
214 conf_t *conf = mddev_to_conf(mddev);
215
216 spin_lock_irqsave(&conf->device_lock, flags);
217 list_add(&r1_bio->retry_list, &conf->retry_list);
NeilBrownddaf22a2006-01-06 00:20:19 -0800218 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 spin_unlock_irqrestore(&conf->device_lock, flags);
220
NeilBrown17999be2006-01-06 00:20:12 -0800221 wake_up(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 md_wakeup_thread(mddev->thread);
223}
224
225/*
226 * raid_end_bio_io() is called when we have finished servicing a mirrored
227 * operation and are ready to return a success/failure code to the buffer
228 * cache layer.
229 */
230static void raid_end_bio_io(r1bio_t *r1_bio)
231{
232 struct bio *bio = r1_bio->master_bio;
233
NeilBrown4b6d2872005-09-09 16:23:47 -0700234 /* if nobody has done the final endio yet, do it now */
235 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
236 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
237 (bio_data_dir(bio) == WRITE) ? "write" : "read",
238 (unsigned long long) bio->bi_sector,
239 (unsigned long long) bio->bi_sector +
240 (bio->bi_size >> 9) - 1);
241
NeilBrown6712ecf2007-09-27 12:47:43 +0200242 bio_endio(bio,
NeilBrown4b6d2872005-09-09 16:23:47 -0700243 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 free_r1bio(r1_bio);
246}
247
248/*
249 * Update disk head position estimator based on IRQ completion info.
250 */
251static inline void update_head_pos(int disk, r1bio_t *r1_bio)
252{
253 conf_t *conf = mddev_to_conf(r1_bio->mddev);
254
255 conf->mirrors[disk].head_position =
256 r1_bio->sector + (r1_bio->sectors);
257}
258
NeilBrown6712ecf2007-09-27 12:47:43 +0200259static void raid1_end_read_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
262 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
263 int mirror;
264 conf_t *conf = mddev_to_conf(r1_bio->mddev);
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 mirror = r1_bio->read_disk;
267 /*
268 * this branch is our 'one mirror IO has finished' event handler:
269 */
NeilBrownddaf22a2006-01-06 00:20:19 -0800270 update_head_pos(mirror, r1_bio);
271
NeilBrowndd00a992007-05-10 03:15:50 -0700272 if (uptodate)
273 set_bit(R1BIO_Uptodate, &r1_bio->state);
274 else {
275 /* If all other devices have failed, we want to return
276 * the error upwards rather than fail the last device.
277 * Here we redefine "uptodate" to mean "Don't want to retry"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 */
NeilBrowndd00a992007-05-10 03:15:50 -0700279 unsigned long flags;
280 spin_lock_irqsave(&conf->device_lock, flags);
281 if (r1_bio->mddev->degraded == conf->raid_disks ||
282 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
283 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
284 uptodate = 1;
285 spin_unlock_irqrestore(&conf->device_lock, flags);
286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
NeilBrowndd00a992007-05-10 03:15:50 -0700288 if (uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 raid_end_bio_io(r1_bio);
NeilBrowndd00a992007-05-10 03:15:50 -0700290 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 /*
292 * oops, read error:
293 */
294 char b[BDEVNAME_SIZE];
295 if (printk_ratelimit())
296 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
297 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
298 reschedule_retry(r1_bio);
299 }
300
301 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303
NeilBrown6712ecf2007-09-27 12:47:43 +0200304static void raid1_end_write_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
306 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
307 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrowna9701a32005-11-08 21:39:34 -0800308 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown04b857f2006-03-09 17:33:46 -0800310 struct bio *to_put = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313 for (mirror = 0; mirror < conf->raid_disks; mirror++)
314 if (r1_bio->bios[mirror] == bio)
315 break;
316
NeilBrownbea27712006-05-01 12:15:46 -0700317 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
NeilBrowna9701a32005-11-08 21:39:34 -0800318 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
319 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
320 r1_bio->mddev->barriers_work = 0;
NeilBrown5e7dd2a2006-05-01 12:15:47 -0700321 /* Don't rdev_dec_pending in this branch - keep it for the retry */
NeilBrowna9701a32005-11-08 21:39:34 -0800322 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 /*
NeilBrowna9701a32005-11-08 21:39:34 -0800324 * this branch is our 'one mirror IO has finished' event handler:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 */
NeilBrowna9701a32005-11-08 21:39:34 -0800326 r1_bio->bios[mirror] = NULL;
NeilBrown04b857f2006-03-09 17:33:46 -0800327 to_put = bio;
NeilBrowna9701a32005-11-08 21:39:34 -0800328 if (!uptodate) {
329 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
330 /* an I/O failed, we can't clear the bitmap */
331 set_bit(R1BIO_Degraded, &r1_bio->state);
332 } else
333 /*
334 * Set R1BIO_Uptodate in our master bio, so that
335 * we will return a good error code for to the higher
336 * levels even if IO on some other mirrored buffer fails.
337 *
338 * The 'master' represents the composite IO operation to
339 * user-side. So if something waits for IO, then it will
340 * wait for the 'master' bio.
341 */
342 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
NeilBrowna9701a32005-11-08 21:39:34 -0800344 update_head_pos(mirror, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
NeilBrowna9701a32005-11-08 21:39:34 -0800346 if (behind) {
347 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
348 atomic_dec(&r1_bio->behind_remaining);
NeilBrown4b6d2872005-09-09 16:23:47 -0700349
NeilBrowna9701a32005-11-08 21:39:34 -0800350 /* In behind mode, we ACK the master bio once the I/O has safely
351 * reached all non-writemostly disks. Setting the Returned bit
352 * ensures that this gets done only once -- we don't ever want to
353 * return -EIO here, instead we'll wait */
NeilBrown4b6d2872005-09-09 16:23:47 -0700354
NeilBrowna9701a32005-11-08 21:39:34 -0800355 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
356 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
357 /* Maybe we can return now */
358 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
359 struct bio *mbio = r1_bio->master_bio;
360 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
361 (unsigned long long) mbio->bi_sector,
362 (unsigned long long) mbio->bi_sector +
363 (mbio->bi_size >> 9) - 1);
NeilBrown6712ecf2007-09-27 12:47:43 +0200364 bio_endio(mbio, 0);
NeilBrowna9701a32005-11-08 21:39:34 -0800365 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700366 }
367 }
NeilBrown5e7dd2a2006-05-01 12:15:47 -0700368 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -0700369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 /*
371 *
372 * Let's see if all mirrored write operations have finished
373 * already.
374 */
375 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrownc70810b2006-06-26 00:27:35 -0700376 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
NeilBrowna9701a32005-11-08 21:39:34 -0800377 reschedule_retry(r1_bio);
NeilBrownc70810b2006-06-26 00:27:35 -0700378 else {
379 /* it really is the end of this request */
380 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
381 /* free extra copy of the data pages */
382 int i = bio->bi_vcnt;
383 while (i--)
384 safe_put_page(bio->bi_io_vec[i].bv_page);
385 }
386 /* clear the bitmap if all writes complete successfully */
387 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
388 r1_bio->sectors,
389 !test_bit(R1BIO_Degraded, &r1_bio->state),
390 behind);
391 md_write_end(r1_bio->mddev);
392 raid_end_bio_io(r1_bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800393 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
NeilBrownc70810b2006-06-26 00:27:35 -0700395
NeilBrown04b857f2006-03-09 17:33:46 -0800396 if (to_put)
397 bio_put(to_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
400
401/*
402 * This routine returns the disk from which the requested read should
403 * be done. There is a per-array 'next expected sequential IO' sector
404 * number - if this matches on the next IO then we use the last disk.
405 * There is also a per-disk 'last know head position' sector that is
406 * maintained from IRQ contexts, both the normal and the resync IO
407 * completion handlers update this position correctly. If there is no
408 * perfect sequential match then we pick the disk whose head is closest.
409 *
410 * If there are 2 mirrors in the same 2 devices, performance degrades
411 * because position is mirror, not device based.
412 *
413 * The rdev for the device selected will have nr_pending incremented.
414 */
415static int read_balance(conf_t *conf, r1bio_t *r1_bio)
416{
417 const unsigned long this_sector = r1_bio->sector;
418 int new_disk = conf->last_used, disk = new_disk;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700419 int wonly_disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 const int sectors = r1_bio->sectors;
421 sector_t new_distance, current_distance;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700422 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424 rcu_read_lock();
425 /*
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700426 * Check if we can balance. We can balance on the whole
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 * device if no resync is going on, or below the resync window.
428 * We take the first readable disk when above the resync window.
429 */
430 retry:
431 if (conf->mddev->recovery_cp < MaxSector &&
432 (this_sector + sectors >= conf->next_resync)) {
433 /* Choose the first operation device, for consistancy */
434 new_disk = 0;
435
Suzanne Woodd6065f72005-11-08 21:39:27 -0800436 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800437 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800438 !rdev || !test_bit(In_sync, &rdev->flags)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700439 || test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800440 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700441
NeilBrowncf30a472006-01-06 00:20:23 -0800442 if (rdev && test_bit(In_sync, &rdev->flags) &&
443 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700444 wonly_disk = new_disk;
445
446 if (new_disk == conf->raid_disks - 1) {
447 new_disk = wonly_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 break;
449 }
450 }
451 goto rb_out;
452 }
453
454
455 /* make sure the disk is operational */
Suzanne Woodd6065f72005-11-08 21:39:27 -0800456 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800457 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800458 !rdev || !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700459 test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800460 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700461
NeilBrowncf30a472006-01-06 00:20:23 -0800462 if (rdev && test_bit(In_sync, &rdev->flags) &&
463 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700464 wonly_disk = new_disk;
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 if (new_disk <= 0)
467 new_disk = conf->raid_disks;
468 new_disk--;
469 if (new_disk == disk) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700470 new_disk = wonly_disk;
471 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
473 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700474
475 if (new_disk < 0)
476 goto rb_out;
477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 disk = new_disk;
479 /* now disk == new_disk == starting point for search */
480
481 /*
482 * Don't change to another disk for sequential reads:
483 */
484 if (conf->next_seq_sect == this_sector)
485 goto rb_out;
486 if (this_sector == conf->mirrors[new_disk].head_position)
487 goto rb_out;
488
489 current_distance = abs(this_sector - conf->mirrors[disk].head_position);
490
491 /* Find the disk whose head is closest */
492
493 do {
494 if (disk <= 0)
495 disk = conf->raid_disks;
496 disk--;
497
Suzanne Woodd6065f72005-11-08 21:39:27 -0800498 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700499
NeilBrowncf30a472006-01-06 00:20:23 -0800500 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800501 !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700502 test_bit(WriteMostly, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 continue;
504
505 if (!atomic_read(&rdev->nr_pending)) {
506 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 break;
508 }
509 new_distance = abs(this_sector - conf->mirrors[disk].head_position);
510 if (new_distance < current_distance) {
511 current_distance = new_distance;
512 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514 } while (disk != conf->last_used);
515
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700516 rb_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
518
519 if (new_disk >= 0) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800520 rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700521 if (!rdev)
522 goto retry;
523 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800524 if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 /* cannot risk returning a device that failed
526 * before we inc'ed nr_pending
527 */
NeilBrown03c902e2006-01-06 00:20:46 -0800528 rdev_dec_pending(rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 goto retry;
530 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700531 conf->next_seq_sect = this_sector + sectors;
532 conf->last_used = new_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
534 rcu_read_unlock();
535
536 return new_disk;
537}
538
539static void unplug_slaves(mddev_t *mddev)
540{
541 conf_t *conf = mddev_to_conf(mddev);
542 int i;
543
544 rcu_read_lock();
545 for (i=0; i<mddev->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800546 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800547 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Jens Axboe165125e2007-07-24 09:28:11 +0200548 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 atomic_inc(&rdev->nr_pending);
551 rcu_read_unlock();
552
Alan D. Brunelle2ad8b1e2007-11-07 14:26:56 -0500553 blk_unplug(r_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 rdev_dec_pending(rdev, mddev);
556 rcu_read_lock();
557 }
558 }
559 rcu_read_unlock();
560}
561
Jens Axboe165125e2007-07-24 09:28:11 +0200562static void raid1_unplug(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
NeilBrown191ea9b2005-06-21 17:17:23 -0700564 mddev_t *mddev = q->queuedata;
565
566 unplug_slaves(mddev);
567 md_wakeup_thread(mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568}
569
NeilBrown0d129222006-10-03 01:15:54 -0700570static int raid1_congested(void *data, int bits)
571{
572 mddev_t *mddev = data;
573 conf_t *conf = mddev_to_conf(mddev);
574 int i, ret = 0;
575
576 rcu_read_lock();
577 for (i = 0; i < mddev->raid_disks; i++) {
578 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
579 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Jens Axboe165125e2007-07-24 09:28:11 +0200580 struct request_queue *q = bdev_get_queue(rdev->bdev);
NeilBrown0d129222006-10-03 01:15:54 -0700581
582 /* Note the '|| 1' - when read_balance prefers
583 * non-congested targets, it can be removed
584 */
585 if ((bits & (1<<BDI_write_congested)) || 1)
586 ret |= bdi_congested(&q->backing_dev_info, bits);
587 else
588 ret &= bdi_congested(&q->backing_dev_info, bits);
589 }
590 }
591 rcu_read_unlock();
592 return ret;
593}
594
595
NeilBrowna35e63e2008-03-04 14:29:29 -0800596static int flush_pending_writes(conf_t *conf)
597{
598 /* Any writes that have been queued but are awaiting
599 * bitmap updates get flushed here.
600 * We return 1 if any requests were actually submitted.
601 */
602 int rv = 0;
603
604 spin_lock_irq(&conf->device_lock);
605
606 if (conf->pending_bio_list.head) {
607 struct bio *bio;
608 bio = bio_list_get(&conf->pending_bio_list);
609 blk_remove_plug(conf->mddev->queue);
610 spin_unlock_irq(&conf->device_lock);
611 /* flush any pending bitmap writes to
612 * disk before proceeding w/ I/O */
613 bitmap_unplug(conf->mddev->bitmap);
614
615 while (bio) { /* submit pending writes */
616 struct bio *next = bio->bi_next;
617 bio->bi_next = NULL;
618 generic_make_request(bio);
619 bio = next;
620 }
621 rv = 1;
622 } else
623 spin_unlock_irq(&conf->device_lock);
624 return rv;
625}
626
NeilBrown17999be2006-01-06 00:20:12 -0800627/* Barriers....
628 * Sometimes we need to suspend IO while we do something else,
629 * either some resync/recovery, or reconfigure the array.
630 * To do this we raise a 'barrier'.
631 * The 'barrier' is a counter that can be raised multiple times
632 * to count how many activities are happening which preclude
633 * normal IO.
634 * We can only raise the barrier if there is no pending IO.
635 * i.e. if nr_pending == 0.
636 * We choose only to raise the barrier if no-one is waiting for the
637 * barrier to go down. This means that as soon as an IO request
638 * is ready, no other operations which require a barrier will start
639 * until the IO request has had a chance.
640 *
641 * So: regular IO calls 'wait_barrier'. When that returns there
642 * is no backgroup IO happening, It must arrange to call
643 * allow_barrier when it has finished its IO.
644 * backgroup IO calls must call raise_barrier. Once that returns
645 * there is no normal IO happeing. It must arrange to call
646 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 */
648#define RESYNC_DEPTH 32
649
NeilBrown17999be2006-01-06 00:20:12 -0800650static void raise_barrier(conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 spin_lock_irq(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -0800653
654 /* Wait until no block IO is waiting */
655 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
656 conf->resync_lock,
657 raid1_unplug(conf->mddev->queue));
658
659 /* block any new IO from starting */
660 conf->barrier++;
661
662 /* No wait for all pending IO to complete */
663 wait_event_lock_irq(conf->wait_barrier,
664 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
665 conf->resync_lock,
666 raid1_unplug(conf->mddev->queue));
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 spin_unlock_irq(&conf->resync_lock);
669}
670
NeilBrown17999be2006-01-06 00:20:12 -0800671static void lower_barrier(conf_t *conf)
672{
673 unsigned long flags;
674 spin_lock_irqsave(&conf->resync_lock, flags);
675 conf->barrier--;
676 spin_unlock_irqrestore(&conf->resync_lock, flags);
677 wake_up(&conf->wait_barrier);
678}
679
680static void wait_barrier(conf_t *conf)
681{
682 spin_lock_irq(&conf->resync_lock);
683 if (conf->barrier) {
684 conf->nr_waiting++;
685 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
686 conf->resync_lock,
687 raid1_unplug(conf->mddev->queue));
688 conf->nr_waiting--;
689 }
690 conf->nr_pending++;
691 spin_unlock_irq(&conf->resync_lock);
692}
693
694static void allow_barrier(conf_t *conf)
695{
696 unsigned long flags;
697 spin_lock_irqsave(&conf->resync_lock, flags);
698 conf->nr_pending--;
699 spin_unlock_irqrestore(&conf->resync_lock, flags);
700 wake_up(&conf->wait_barrier);
701}
702
NeilBrownddaf22a2006-01-06 00:20:19 -0800703static void freeze_array(conf_t *conf)
704{
705 /* stop syncio and normal IO and wait for everything to
706 * go quite.
707 * We increment barrier and nr_waiting, and then
NeilBrown1c830532008-03-04 14:29:35 -0800708 * wait until nr_pending match nr_queued+1
709 * This is called in the context of one normal IO request
710 * that has failed. Thus any sync request that might be pending
711 * will be blocked by nr_pending, and we need to wait for
712 * pending IO requests to complete or be queued for re-try.
713 * Thus the number queued (nr_queued) plus this request (1)
714 * must match the number of pending IOs (nr_pending) before
715 * we continue.
NeilBrownddaf22a2006-01-06 00:20:19 -0800716 */
717 spin_lock_irq(&conf->resync_lock);
718 conf->barrier++;
719 conf->nr_waiting++;
720 wait_event_lock_irq(conf->wait_barrier,
NeilBrown1c830532008-03-04 14:29:35 -0800721 conf->nr_pending == conf->nr_queued+1,
NeilBrownddaf22a2006-01-06 00:20:19 -0800722 conf->resync_lock,
NeilBrowna35e63e2008-03-04 14:29:29 -0800723 ({ flush_pending_writes(conf);
724 raid1_unplug(conf->mddev->queue); }));
NeilBrownddaf22a2006-01-06 00:20:19 -0800725 spin_unlock_irq(&conf->resync_lock);
726}
727static void unfreeze_array(conf_t *conf)
728{
729 /* reverse the effect of the freeze */
730 spin_lock_irq(&conf->resync_lock);
731 conf->barrier--;
732 conf->nr_waiting--;
733 wake_up(&conf->wait_barrier);
734 spin_unlock_irq(&conf->resync_lock);
735}
736
NeilBrown17999be2006-01-06 00:20:12 -0800737
NeilBrown4b6d2872005-09-09 16:23:47 -0700738/* duplicate the data pages for behind I/O */
739static struct page **alloc_behind_pages(struct bio *bio)
740{
741 int i;
742 struct bio_vec *bvec;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800743 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
NeilBrown4b6d2872005-09-09 16:23:47 -0700744 GFP_NOIO);
745 if (unlikely(!pages))
746 goto do_sync_io;
747
NeilBrown4b6d2872005-09-09 16:23:47 -0700748 bio_for_each_segment(bvec, bio, i) {
749 pages[i] = alloc_page(GFP_NOIO);
750 if (unlikely(!pages[i]))
751 goto do_sync_io;
752 memcpy(kmap(pages[i]) + bvec->bv_offset,
753 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
754 kunmap(pages[i]);
755 kunmap(bvec->bv_page);
756 }
757
758 return pages;
759
760do_sync_io:
761 if (pages)
762 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
NeilBrown2d1f3b52006-01-06 00:20:31 -0800763 put_page(pages[i]);
NeilBrown4b6d2872005-09-09 16:23:47 -0700764 kfree(pages);
765 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
766 return NULL;
767}
768
Jens Axboe165125e2007-07-24 09:28:11 +0200769static int make_request(struct request_queue *q, struct bio * bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
771 mddev_t *mddev = q->queuedata;
772 conf_t *conf = mddev_to_conf(mddev);
773 mirror_info_t *mirror;
774 r1bio_t *r1_bio;
775 struct bio *read_bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700776 int i, targets = 0, disks;
NeilBrown84255d12008-05-23 13:04:32 -0700777 struct bitmap *bitmap;
NeilBrown191ea9b2005-06-21 17:17:23 -0700778 unsigned long flags;
779 struct bio_list bl;
NeilBrown4b6d2872005-09-09 16:23:47 -0700780 struct page **behind_pages = NULL;
Jens Axboea3623572005-11-01 09:26:16 +0100781 const int rw = bio_data_dir(bio);
Lars Ellenberge3881a62007-01-10 23:15:37 -0800782 const int do_sync = bio_sync(bio);
Tejun Heoc9959052008-08-25 19:47:21 +0900783 int cpu, do_barriers;
Dan Williams6bfe0b42008-04-30 00:52:32 -0700784 mdk_rdev_t *blocked_rdev;
NeilBrown191ea9b2005-06-21 17:17:23 -0700785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 /*
787 * Register the new request and wait if the reconstruction
788 * thread has put up a bar for new requests.
789 * Continue immediately if no resync is active currently.
NeilBrown62de6082006-05-01 12:15:47 -0700790 * We test barriers_work *after* md_write_start as md_write_start
791 * may cause the first superblock write, and that will check out
792 * if barriers work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 */
NeilBrown62de6082006-05-01 12:15:47 -0700794
NeilBrown3d310eb2005-06-21 17:17:26 -0700795 md_write_start(mddev, bio); /* wait on superblock update early */
796
NeilBrown62de6082006-05-01 12:15:47 -0700797 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
798 if (rw == WRITE)
799 md_write_end(mddev);
NeilBrown6712ecf2007-09-27 12:47:43 +0200800 bio_endio(bio, -EOPNOTSUPP);
NeilBrown62de6082006-05-01 12:15:47 -0700801 return 0;
802 }
803
NeilBrown17999be2006-01-06 00:20:12 -0800804 wait_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
NeilBrown84255d12008-05-23 13:04:32 -0700806 bitmap = mddev->bitmap;
807
Tejun Heo074a7ac2008-08-25 19:56:14 +0900808 cpu = part_stat_lock();
809 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
810 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
811 bio_sectors(bio));
812 part_stat_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
814 /*
815 * make_request() can abort the operation when READA is being
816 * used and no empty request is available.
817 *
818 */
819 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
820
821 r1_bio->master_bio = bio;
822 r1_bio->sectors = bio->bi_size >> 9;
NeilBrown191ea9b2005-06-21 17:17:23 -0700823 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 r1_bio->mddev = mddev;
825 r1_bio->sector = bio->bi_sector;
826
Jens Axboea3623572005-11-01 09:26:16 +0100827 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 /*
829 * read balancing logic:
830 */
831 int rdisk = read_balance(conf, r1_bio);
832
833 if (rdisk < 0) {
834 /* couldn't find anywhere to read from */
835 raid_end_bio_io(r1_bio);
836 return 0;
837 }
838 mirror = conf->mirrors + rdisk;
839
840 r1_bio->read_disk = rdisk;
841
842 read_bio = bio_clone(bio, GFP_NOIO);
843
844 r1_bio->bios[rdisk] = read_bio;
845
846 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
847 read_bio->bi_bdev = mirror->rdev->bdev;
848 read_bio->bi_end_io = raid1_end_read_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -0800849 read_bio->bi_rw = READ | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 read_bio->bi_private = r1_bio;
851
852 generic_make_request(read_bio);
853 return 0;
854 }
855
856 /*
857 * WRITE:
858 */
859 /* first select target devices under spinlock and
860 * inc refcount on their rdev. Record them by setting
861 * bios[x] to bio
862 */
863 disks = conf->raid_disks;
NeilBrown191ea9b2005-06-21 17:17:23 -0700864#if 0
865 { static int first=1;
866 if (first) printk("First Write sector %llu disks %d\n",
867 (unsigned long long)r1_bio->sector, disks);
868 first = 0;
869 }
870#endif
Dan Williams6bfe0b42008-04-30 00:52:32 -0700871 retry_write:
872 blocked_rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 rcu_read_lock();
874 for (i = 0; i < disks; i++) {
Dan Williams6bfe0b42008-04-30 00:52:32 -0700875 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
876 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
877 atomic_inc(&rdev->nr_pending);
878 blocked_rdev = rdev;
879 break;
880 }
881 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800883 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown03c902e2006-01-06 00:20:46 -0800884 rdev_dec_pending(rdev, mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 r1_bio->bios[i] = NULL;
886 } else
887 r1_bio->bios[i] = bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700888 targets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 } else
890 r1_bio->bios[i] = NULL;
891 }
892 rcu_read_unlock();
893
Dan Williams6bfe0b42008-04-30 00:52:32 -0700894 if (unlikely(blocked_rdev)) {
895 /* Wait for this device to become unblocked */
896 int j;
897
898 for (j = 0; j < i; j++)
899 if (r1_bio->bios[j])
900 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
901
902 allow_barrier(conf);
903 md_wait_for_blocked_rdev(blocked_rdev, mddev);
904 wait_barrier(conf);
905 goto retry_write;
906 }
907
NeilBrown4b6d2872005-09-09 16:23:47 -0700908 BUG_ON(targets == 0); /* we never fail the last device */
909
NeilBrown191ea9b2005-06-21 17:17:23 -0700910 if (targets < conf->raid_disks) {
911 /* array is degraded, we will not clear the bitmap
912 * on I/O completion (see raid1_end_write_request) */
913 set_bit(R1BIO_Degraded, &r1_bio->state);
914 }
NeilBrown06d91a52005-06-21 17:17:12 -0700915
NeilBrown4b6d2872005-09-09 16:23:47 -0700916 /* do behind I/O ? */
917 if (bitmap &&
918 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
919 (behind_pages = alloc_behind_pages(bio)) != NULL)
920 set_bit(R1BIO_BehindIO, &r1_bio->state);
921
NeilBrown191ea9b2005-06-21 17:17:23 -0700922 atomic_set(&r1_bio->remaining, 0);
NeilBrown4b6d2872005-09-09 16:23:47 -0700923 atomic_set(&r1_bio->behind_remaining, 0);
NeilBrown191ea9b2005-06-21 17:17:23 -0700924
NeilBrown04b857f2006-03-09 17:33:46 -0800925 do_barriers = bio_barrier(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800926 if (do_barriers)
927 set_bit(R1BIO_Barrier, &r1_bio->state);
928
NeilBrown191ea9b2005-06-21 17:17:23 -0700929 bio_list_init(&bl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 for (i = 0; i < disks; i++) {
931 struct bio *mbio;
932 if (!r1_bio->bios[i])
933 continue;
934
935 mbio = bio_clone(bio, GFP_NOIO);
936 r1_bio->bios[i] = mbio;
937
938 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
939 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
940 mbio->bi_end_io = raid1_end_write_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -0800941 mbio->bi_rw = WRITE | do_barriers | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 mbio->bi_private = r1_bio;
943
NeilBrown4b6d2872005-09-09 16:23:47 -0700944 if (behind_pages) {
945 struct bio_vec *bvec;
946 int j;
947
948 /* Yes, I really want the '__' version so that
949 * we clear any unused pointer in the io_vec, rather
950 * than leave them unchanged. This is important
951 * because when we come to free the pages, we won't
952 * know the originial bi_idx, so we just free
953 * them all
954 */
955 __bio_for_each_segment(bvec, mbio, j, 0)
956 bvec->bv_page = behind_pages[j];
957 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
958 atomic_inc(&r1_bio->behind_remaining);
959 }
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 atomic_inc(&r1_bio->remaining);
NeilBrown191ea9b2005-06-21 17:17:23 -0700962
963 bio_list_add(&bl, mbio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700965 kfree(behind_pages); /* the behind pages are attached to the bios now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
NeilBrown4b6d2872005-09-09 16:23:47 -0700967 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
968 test_bit(R1BIO_BehindIO, &r1_bio->state));
NeilBrown191ea9b2005-06-21 17:17:23 -0700969 spin_lock_irqsave(&conf->device_lock, flags);
970 bio_list_merge(&conf->pending_bio_list, &bl);
971 bio_list_init(&bl);
972
973 blk_plug_device(mddev->queue);
974 spin_unlock_irqrestore(&conf->device_lock, flags);
975
NeilBrowna35e63e2008-03-04 14:29:29 -0800976 /* In case raid1d snuck into freeze_array */
977 wake_up(&conf->wait_barrier);
978
Lars Ellenberge3881a62007-01-10 23:15:37 -0800979 if (do_sync)
980 md_wakeup_thread(mddev->thread);
NeilBrown191ea9b2005-06-21 17:17:23 -0700981#if 0
982 while ((bio = bio_list_pop(&bl)) != NULL)
983 generic_make_request(bio);
984#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 return 0;
987}
988
989static void status(struct seq_file *seq, mddev_t *mddev)
990{
991 conf_t *conf = mddev_to_conf(mddev);
992 int i;
993
994 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
NeilBrown11ce99e2006-10-03 01:15:52 -0700995 conf->raid_disks - mddev->degraded);
NeilBrownddac7c72006-08-31 21:27:36 -0700996 rcu_read_lock();
997 for (i = 0; i < conf->raid_disks; i++) {
998 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 seq_printf(seq, "%s",
NeilBrownddac7c72006-08-31 21:27:36 -07001000 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1001 }
1002 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 seq_printf(seq, "]");
1004}
1005
1006
1007static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1008{
1009 char b[BDEVNAME_SIZE];
1010 conf_t *conf = mddev_to_conf(mddev);
1011
1012 /*
1013 * If it is not operational, then we have already marked it as dead
1014 * else if it is the last working disks, ignore the error, let the
1015 * next level up know.
1016 * else mark the drive as failed
1017 */
NeilBrownb2d444d2005-11-08 21:39:31 -08001018 if (test_bit(In_sync, &rdev->flags)
NeilBrown4044ba52009-01-09 08:31:11 +11001019 && (conf->raid_disks - mddev->degraded) == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 /*
1021 * Don't fail the drive, act as though we were just a
NeilBrown4044ba52009-01-09 08:31:11 +11001022 * normal single drive.
1023 * However don't try a recovery from this drive as
1024 * it is very likely to fail.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 */
NeilBrown4044ba52009-01-09 08:31:11 +11001026 mddev->recovery_disabled = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 return;
NeilBrown4044ba52009-01-09 08:31:11 +11001028 }
NeilBrownc04be0a2006-10-03 01:15:53 -07001029 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1030 unsigned long flags;
1031 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 mddev->degraded++;
NeilBrowndd00a992007-05-10 03:15:50 -07001033 set_bit(Faulty, &rdev->flags);
NeilBrownc04be0a2006-10-03 01:15:53 -07001034 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 /*
1036 * if recovery is running, make sure it aborts.
1037 */
NeilBrowndfc70642008-05-23 13:04:39 -07001038 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrowndd00a992007-05-10 03:15:50 -07001039 } else
1040 set_bit(Faulty, &rdev->flags);
NeilBrown850b2b42006-10-03 01:15:46 -07001041 set_bit(MD_CHANGE_DEVS, &mddev->flags);
Nick Andrewd7a420c2008-04-28 02:15:55 -07001042 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
1043 "raid1: Operation continuing on %d devices.\n",
NeilBrown11ce99e2006-10-03 01:15:52 -07001044 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045}
1046
1047static void print_conf(conf_t *conf)
1048{
1049 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 printk("RAID1 conf printout:\n");
1052 if (!conf) {
1053 printk("(!conf)\n");
1054 return;
1055 }
NeilBrown11ce99e2006-10-03 01:15:52 -07001056 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 conf->raid_disks);
1058
NeilBrownddac7c72006-08-31 21:27:36 -07001059 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 for (i = 0; i < conf->raid_disks; i++) {
1061 char b[BDEVNAME_SIZE];
NeilBrownddac7c72006-08-31 21:27:36 -07001062 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1063 if (rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownddac7c72006-08-31 21:27:36 -07001065 i, !test_bit(In_sync, &rdev->flags),
1066 !test_bit(Faulty, &rdev->flags),
1067 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 }
NeilBrownddac7c72006-08-31 21:27:36 -07001069 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070}
1071
1072static void close_sync(conf_t *conf)
1073{
NeilBrown17999be2006-01-06 00:20:12 -08001074 wait_barrier(conf);
1075 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
1077 mempool_destroy(conf->r1buf_pool);
1078 conf->r1buf_pool = NULL;
1079}
1080
1081static int raid1_spare_active(mddev_t *mddev)
1082{
1083 int i;
1084 conf_t *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 /*
1087 * Find all failed disks within the RAID1 configuration
NeilBrownddac7c72006-08-31 21:27:36 -07001088 * and mark them readable.
1089 * Called under mddev lock, so rcu protection not needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 */
1091 for (i = 0; i < conf->raid_disks; i++) {
NeilBrownddac7c72006-08-31 21:27:36 -07001092 mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1093 if (rdev
1094 && !test_bit(Faulty, &rdev->flags)
NeilBrownc04be0a2006-10-03 01:15:53 -07001095 && !test_and_set_bit(In_sync, &rdev->flags)) {
1096 unsigned long flags;
1097 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 mddev->degraded--;
NeilBrownc04be0a2006-10-03 01:15:53 -07001099 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 }
1101 }
1102
1103 print_conf(conf);
1104 return 0;
1105}
1106
1107
1108static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1109{
1110 conf_t *conf = mddev->private;
Neil Brown199050e2008-06-28 08:31:33 +10001111 int err = -EEXIST;
NeilBrown41158c72005-06-21 17:17:25 -07001112 int mirror = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 mirror_info_t *p;
Neil Brown6c2fce22008-06-28 08:31:31 +10001114 int first = 0;
1115 int last = mddev->raid_disks - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Neil Brown6c2fce22008-06-28 08:31:31 +10001117 if (rdev->raid_disk >= 0)
1118 first = last = rdev->raid_disk;
1119
1120 for (mirror = first; mirror <= last; mirror++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 if ( !(p=conf->mirrors+mirror)->rdev) {
1122
1123 blk_queue_stack_limits(mddev->queue,
1124 rdev->bdev->bd_disk->queue);
1125 /* as we don't honour merge_bvec_fn, we must never risk
1126 * violating it, so limit ->max_sector to one PAGE, as
1127 * a one page request is never in violation.
1128 */
1129 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1130 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1131 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1132
1133 p->head_position = 0;
1134 rdev->raid_disk = mirror;
Neil Brown199050e2008-06-28 08:31:33 +10001135 err = 0;
NeilBrown6aea114a2005-11-28 13:44:13 -08001136 /* As all devices are equivalent, we don't need a full recovery
1137 * if this was recently any drive of the array
1138 */
1139 if (rdev->saved_raid_disk < 0)
NeilBrown41158c72005-06-21 17:17:25 -07001140 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08001141 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 break;
1143 }
1144
1145 print_conf(conf);
Neil Brown199050e2008-06-28 08:31:33 +10001146 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147}
1148
1149static int raid1_remove_disk(mddev_t *mddev, int number)
1150{
1151 conf_t *conf = mddev->private;
1152 int err = 0;
1153 mdk_rdev_t *rdev;
1154 mirror_info_t *p = conf->mirrors+ number;
1155
1156 print_conf(conf);
1157 rdev = p->rdev;
1158 if (rdev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001159 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 atomic_read(&rdev->nr_pending)) {
1161 err = -EBUSY;
1162 goto abort;
1163 }
NeilBrowndfc70642008-05-23 13:04:39 -07001164 /* Only remove non-faulty devices is recovery
1165 * is not possible.
1166 */
1167 if (!test_bit(Faulty, &rdev->flags) &&
1168 mddev->degraded < conf->raid_disks) {
1169 err = -EBUSY;
1170 goto abort;
1171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07001173 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 if (atomic_read(&rdev->nr_pending)) {
1175 /* lost the race, try later */
1176 err = -EBUSY;
1177 p->rdev = rdev;
1178 }
1179 }
1180abort:
1181
1182 print_conf(conf);
1183 return err;
1184}
1185
1186
NeilBrown6712ecf2007-09-27 12:47:43 +02001187static void end_sync_read(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrownd11c1712006-01-06 00:20:26 -08001190 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
NeilBrownd11c1712006-01-06 00:20:26 -08001192 for (i=r1_bio->mddev->raid_disks; i--; )
1193 if (r1_bio->bios[i] == bio)
1194 break;
1195 BUG_ON(i < 0);
1196 update_head_pos(i, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 /*
1198 * we have read a block, now it needs to be re-written,
1199 * or re-read if the read failed.
1200 * We don't do much here, just schedule handling by raid1d
1201 */
NeilBrown69382e82006-01-06 00:20:22 -08001202 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 set_bit(R1BIO_Uptodate, &r1_bio->state);
NeilBrownd11c1712006-01-06 00:20:26 -08001204
1205 if (atomic_dec_and_test(&r1_bio->remaining))
1206 reschedule_retry(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207}
1208
NeilBrown6712ecf2007-09-27 12:47:43 +02001209static void end_sync_write(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210{
1211 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1212 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1213 mddev_t *mddev = r1_bio->mddev;
1214 conf_t *conf = mddev_to_conf(mddev);
1215 int i;
1216 int mirror=0;
1217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 for (i = 0; i < conf->raid_disks; i++)
1219 if (r1_bio->bios[i] == bio) {
1220 mirror = i;
1221 break;
1222 }
NeilBrown6b1117d2006-03-31 02:31:57 -08001223 if (!uptodate) {
1224 int sync_blocks = 0;
1225 sector_t s = r1_bio->sector;
1226 long sectors_to_go = r1_bio->sectors;
1227 /* make sure these bits doesn't get cleared. */
1228 do {
NeilBrown5e3db642006-07-10 04:44:18 -07001229 bitmap_end_sync(mddev->bitmap, s,
NeilBrown6b1117d2006-03-31 02:31:57 -08001230 &sync_blocks, 1);
1231 s += sync_blocks;
1232 sectors_to_go -= sync_blocks;
1233 } while (sectors_to_go > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 md_error(mddev, conf->mirrors[mirror].rdev);
NeilBrown6b1117d2006-03-31 02:31:57 -08001235 }
NeilBrowne3b97032005-08-04 12:53:34 -07001236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 update_head_pos(mirror, r1_bio);
1238
1239 if (atomic_dec_and_test(&r1_bio->remaining)) {
1240 md_done_sync(mddev, r1_bio->sectors, uptodate);
1241 put_buf(r1_bio);
1242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
1245static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1246{
1247 conf_t *conf = mddev_to_conf(mddev);
1248 int i;
1249 int disks = conf->raid_disks;
1250 struct bio *bio, *wbio;
1251
1252 bio = r1_bio->bios[r1_bio->read_disk];
1253
NeilBrown69382e82006-01-06 00:20:22 -08001254
NeilBrownd11c1712006-01-06 00:20:26 -08001255 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1256 /* We have read all readable devices. If we haven't
1257 * got the block, then there is no hope left.
1258 * If we have, then we want to do a comparison
1259 * and skip the write if everything is the same.
1260 * If any blocks failed to read, then we need to
1261 * attempt an over-write
1262 */
1263 int primary;
1264 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1265 for (i=0; i<mddev->raid_disks; i++)
1266 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1267 md_error(mddev, conf->mirrors[i].rdev);
1268
1269 md_done_sync(mddev, r1_bio->sectors, 1);
1270 put_buf(r1_bio);
1271 return;
1272 }
1273 for (primary=0; primary<mddev->raid_disks; primary++)
1274 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1275 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1276 r1_bio->bios[primary]->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001277 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
NeilBrownd11c1712006-01-06 00:20:26 -08001278 break;
1279 }
1280 r1_bio->read_disk = primary;
1281 for (i=0; i<mddev->raid_disks; i++)
Mike Accettaed456662007-06-16 10:16:07 -07001282 if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
NeilBrownd11c1712006-01-06 00:20:26 -08001283 int j;
1284 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1285 struct bio *pbio = r1_bio->bios[primary];
1286 struct bio *sbio = r1_bio->bios[i];
Mike Accettaed456662007-06-16 10:16:07 -07001287
1288 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1289 for (j = vcnt; j-- ; ) {
1290 struct page *p, *s;
1291 p = pbio->bi_io_vec[j].bv_page;
1292 s = sbio->bi_io_vec[j].bv_page;
1293 if (memcmp(page_address(p),
1294 page_address(s),
1295 PAGE_SIZE))
1296 break;
1297 }
1298 } else
1299 j = 0;
NeilBrownd11c1712006-01-06 00:20:26 -08001300 if (j >= 0)
1301 mddev->resync_mismatches += r1_bio->sectors;
NeilBrowncf7a4412007-10-16 23:30:55 -07001302 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1303 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
NeilBrownd11c1712006-01-06 00:20:26 -08001304 sbio->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001305 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1306 } else {
NeilBrownd11c1712006-01-06 00:20:26 -08001307 /* fixup the bio for reuse */
NeilBrown698b18c2008-05-23 13:04:35 -07001308 int size;
NeilBrownd11c1712006-01-06 00:20:26 -08001309 sbio->bi_vcnt = vcnt;
1310 sbio->bi_size = r1_bio->sectors << 9;
1311 sbio->bi_idx = 0;
1312 sbio->bi_phys_segments = 0;
NeilBrownd11c1712006-01-06 00:20:26 -08001313 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1314 sbio->bi_flags |= 1 << BIO_UPTODATE;
1315 sbio->bi_next = NULL;
1316 sbio->bi_sector = r1_bio->sector +
1317 conf->mirrors[i].rdev->data_offset;
1318 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
NeilBrown698b18c2008-05-23 13:04:35 -07001319 size = sbio->bi_size;
1320 for (j = 0; j < vcnt ; j++) {
1321 struct bio_vec *bi;
1322 bi = &sbio->bi_io_vec[j];
1323 bi->bv_offset = 0;
1324 if (size > PAGE_SIZE)
1325 bi->bv_len = PAGE_SIZE;
1326 else
1327 bi->bv_len = size;
1328 size -= PAGE_SIZE;
1329 memcpy(page_address(bi->bv_page),
NeilBrown3eda22d2007-01-26 00:57:01 -08001330 page_address(pbio->bi_io_vec[j].bv_page),
1331 PAGE_SIZE);
NeilBrown698b18c2008-05-23 13:04:35 -07001332 }
NeilBrown3eda22d2007-01-26 00:57:01 -08001333
NeilBrownd11c1712006-01-06 00:20:26 -08001334 }
1335 }
1336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
NeilBrown69382e82006-01-06 00:20:22 -08001338 /* ouch - failed to read all of that.
1339 * Try some synchronous reads of other devices to get
1340 * good data, much like with normal read errors. Only
NeilBrownddac7c72006-08-31 21:27:36 -07001341 * read into the pages we already have so we don't
NeilBrown69382e82006-01-06 00:20:22 -08001342 * need to re-issue the read request.
1343 * We don't need to freeze the array, because being in an
1344 * active sync request, there is no normal IO, and
1345 * no overlapping syncs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 */
NeilBrown69382e82006-01-06 00:20:22 -08001347 sector_t sect = r1_bio->sector;
1348 int sectors = r1_bio->sectors;
1349 int idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
NeilBrown69382e82006-01-06 00:20:22 -08001351 while(sectors) {
1352 int s = sectors;
1353 int d = r1_bio->read_disk;
1354 int success = 0;
1355 mdk_rdev_t *rdev;
1356
1357 if (s > (PAGE_SIZE>>9))
1358 s = PAGE_SIZE >> 9;
1359 do {
1360 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
NeilBrownddac7c72006-08-31 21:27:36 -07001361 /* No rcu protection needed here devices
1362 * can only be removed when no resync is
1363 * active, and resync is currently active
1364 */
NeilBrown69382e82006-01-06 00:20:22 -08001365 rdev = conf->mirrors[d].rdev;
1366 if (sync_page_io(rdev->bdev,
1367 sect + rdev->data_offset,
1368 s<<9,
1369 bio->bi_io_vec[idx].bv_page,
1370 READ)) {
1371 success = 1;
1372 break;
1373 }
1374 }
1375 d++;
1376 if (d == conf->raid_disks)
1377 d = 0;
1378 } while (!success && d != r1_bio->read_disk);
1379
1380 if (success) {
NeilBrown097426f2006-01-06 00:20:37 -08001381 int start = d;
NeilBrown69382e82006-01-06 00:20:22 -08001382 /* write it back and re-read */
1383 set_bit(R1BIO_Uptodate, &r1_bio->state);
1384 while (d != r1_bio->read_disk) {
1385 if (d == 0)
1386 d = conf->raid_disks;
1387 d--;
1388 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1389 continue;
1390 rdev = conf->mirrors[d].rdev;
NeilBrown4dbcdc72006-01-06 00:20:52 -08001391 atomic_add(s, &rdev->corrected_errors);
NeilBrown69382e82006-01-06 00:20:22 -08001392 if (sync_page_io(rdev->bdev,
1393 sect + rdev->data_offset,
1394 s<<9,
1395 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001396 WRITE) == 0)
1397 md_error(mddev, rdev);
1398 }
1399 d = start;
1400 while (d != r1_bio->read_disk) {
1401 if (d == 0)
1402 d = conf->raid_disks;
1403 d--;
1404 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1405 continue;
1406 rdev = conf->mirrors[d].rdev;
1407 if (sync_page_io(rdev->bdev,
NeilBrown69382e82006-01-06 00:20:22 -08001408 sect + rdev->data_offset,
1409 s<<9,
1410 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001411 READ) == 0)
NeilBrown69382e82006-01-06 00:20:22 -08001412 md_error(mddev, rdev);
NeilBrown69382e82006-01-06 00:20:22 -08001413 }
1414 } else {
1415 char b[BDEVNAME_SIZE];
1416 /* Cannot read from anywhere, array is toast */
1417 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1418 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1419 " for block %llu\n",
1420 bdevname(bio->bi_bdev,b),
1421 (unsigned long long)r1_bio->sector);
1422 md_done_sync(mddev, r1_bio->sectors, 0);
1423 put_buf(r1_bio);
1424 return;
1425 }
1426 sectors -= s;
1427 sect += s;
1428 idx ++;
1429 }
1430 }
NeilBrownd11c1712006-01-06 00:20:26 -08001431
1432 /*
1433 * schedule writes
1434 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 atomic_set(&r1_bio->remaining, 1);
1436 for (i = 0; i < disks ; i++) {
1437 wbio = r1_bio->bios[i];
NeilBrown3e198f72006-01-06 00:20:21 -08001438 if (wbio->bi_end_io == NULL ||
1439 (wbio->bi_end_io == end_sync_read &&
1440 (i == r1_bio->read_disk ||
1441 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 continue;
1443
NeilBrown3e198f72006-01-06 00:20:21 -08001444 wbio->bi_rw = WRITE;
1445 wbio->bi_end_io = end_sync_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 atomic_inc(&r1_bio->remaining);
1447 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
NeilBrown191ea9b2005-06-21 17:17:23 -07001448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 generic_make_request(wbio);
1450 }
1451
1452 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001453 /* if we're here, all write(s) have completed, so clean up */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 md_done_sync(mddev, r1_bio->sectors, 1);
1455 put_buf(r1_bio);
1456 }
1457}
1458
1459/*
1460 * This is a kernel thread which:
1461 *
1462 * 1. Retries failed read operations on working mirrors.
1463 * 2. Updates the raid superblock when problems encounter.
1464 * 3. Performs writes following reads for array syncronising.
1465 */
1466
NeilBrown867868f2006-10-03 01:15:51 -07001467static void fix_read_error(conf_t *conf, int read_disk,
1468 sector_t sect, int sectors)
1469{
1470 mddev_t *mddev = conf->mddev;
1471 while(sectors) {
1472 int s = sectors;
1473 int d = read_disk;
1474 int success = 0;
1475 int start;
1476 mdk_rdev_t *rdev;
1477
1478 if (s > (PAGE_SIZE>>9))
1479 s = PAGE_SIZE >> 9;
1480
1481 do {
1482 /* Note: no rcu protection needed here
1483 * as this is synchronous in the raid1d thread
1484 * which is the thread that might remove
1485 * a device. If raid1d ever becomes multi-threaded....
1486 */
1487 rdev = conf->mirrors[d].rdev;
1488 if (rdev &&
1489 test_bit(In_sync, &rdev->flags) &&
1490 sync_page_io(rdev->bdev,
1491 sect + rdev->data_offset,
1492 s<<9,
1493 conf->tmppage, READ))
1494 success = 1;
1495 else {
1496 d++;
1497 if (d == conf->raid_disks)
1498 d = 0;
1499 }
1500 } while (!success && d != read_disk);
1501
1502 if (!success) {
1503 /* Cannot read from anywhere -- bye bye array */
1504 md_error(mddev, conf->mirrors[read_disk].rdev);
1505 break;
1506 }
1507 /* write it back and re-read */
1508 start = d;
1509 while (d != read_disk) {
1510 if (d==0)
1511 d = conf->raid_disks;
1512 d--;
1513 rdev = conf->mirrors[d].rdev;
1514 if (rdev &&
1515 test_bit(In_sync, &rdev->flags)) {
1516 if (sync_page_io(rdev->bdev,
1517 sect + rdev->data_offset,
1518 s<<9, conf->tmppage, WRITE)
1519 == 0)
1520 /* Well, this device is dead */
1521 md_error(mddev, rdev);
1522 }
1523 }
1524 d = start;
1525 while (d != read_disk) {
1526 char b[BDEVNAME_SIZE];
1527 if (d==0)
1528 d = conf->raid_disks;
1529 d--;
1530 rdev = conf->mirrors[d].rdev;
1531 if (rdev &&
1532 test_bit(In_sync, &rdev->flags)) {
1533 if (sync_page_io(rdev->bdev,
1534 sect + rdev->data_offset,
1535 s<<9, conf->tmppage, READ)
1536 == 0)
1537 /* Well, this device is dead */
1538 md_error(mddev, rdev);
1539 else {
1540 atomic_add(s, &rdev->corrected_errors);
1541 printk(KERN_INFO
1542 "raid1:%s: read error corrected "
1543 "(%d sectors at %llu on %s)\n",
1544 mdname(mddev), s,
Randy Dunlap969b7552006-10-28 10:38:32 -07001545 (unsigned long long)(sect +
1546 rdev->data_offset),
NeilBrown867868f2006-10-03 01:15:51 -07001547 bdevname(rdev->bdev, b));
1548 }
1549 }
1550 }
1551 sectors -= s;
1552 sect += s;
1553 }
1554}
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556static void raid1d(mddev_t *mddev)
1557{
1558 r1bio_t *r1_bio;
1559 struct bio *bio;
1560 unsigned long flags;
1561 conf_t *conf = mddev_to_conf(mddev);
1562 struct list_head *head = &conf->retry_list;
1563 int unplug=0;
1564 mdk_rdev_t *rdev;
1565
1566 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
1568 for (;;) {
1569 char b[BDEVNAME_SIZE];
NeilBrowna35e63e2008-03-04 14:29:29 -08001570
1571 unplug += flush_pending_writes(conf);
1572
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrowna35e63e2008-03-04 14:29:29 -08001574 if (list_empty(head)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001575 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 break;
NeilBrowna35e63e2008-03-04 14:29:29 -08001577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1579 list_del(head->prev);
NeilBrownddaf22a2006-01-06 00:20:19 -08001580 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 spin_unlock_irqrestore(&conf->device_lock, flags);
1582
1583 mddev = r1_bio->mddev;
1584 conf = mddev_to_conf(mddev);
1585 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1586 sync_request_write(mddev, r1_bio);
1587 unplug = 1;
NeilBrowna9701a32005-11-08 21:39:34 -08001588 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1589 /* some requests in the r1bio were BIO_RW_BARRIER
NeilBrownbea27712006-05-01 12:15:46 -07001590 * requests which failed with -EOPNOTSUPP. Hohumm..
NeilBrowna9701a32005-11-08 21:39:34 -08001591 * Better resubmit without the barrier.
1592 * We know which devices to resubmit for, because
1593 * all others have had their bios[] entry cleared.
NeilBrown5e7dd2a2006-05-01 12:15:47 -07001594 * We already have a nr_pending reference on these rdevs.
NeilBrowna9701a32005-11-08 21:39:34 -08001595 */
1596 int i;
Lars Ellenberge3881a62007-01-10 23:15:37 -08001597 const int do_sync = bio_sync(r1_bio->master_bio);
NeilBrowna9701a32005-11-08 21:39:34 -08001598 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1599 clear_bit(R1BIO_Barrier, &r1_bio->state);
1600 for (i=0; i < conf->raid_disks; i++)
NeilBrown2f889122006-03-27 01:18:19 -08001601 if (r1_bio->bios[i])
1602 atomic_inc(&r1_bio->remaining);
1603 for (i=0; i < conf->raid_disks; i++)
NeilBrowna9701a32005-11-08 21:39:34 -08001604 if (r1_bio->bios[i]) {
1605 struct bio_vec *bvec;
1606 int j;
1607
1608 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1609 /* copy pages from the failed bio, as
1610 * this might be a write-behind device */
1611 __bio_for_each_segment(bvec, bio, j, 0)
1612 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1613 bio_put(r1_bio->bios[i]);
1614 bio->bi_sector = r1_bio->sector +
1615 conf->mirrors[i].rdev->data_offset;
1616 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1617 bio->bi_end_io = raid1_end_write_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -08001618 bio->bi_rw = WRITE | do_sync;
NeilBrowna9701a32005-11-08 21:39:34 -08001619 bio->bi_private = r1_bio;
1620 r1_bio->bios[i] = bio;
1621 generic_make_request(bio);
1622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 } else {
1624 int disk;
NeilBrownddaf22a2006-01-06 00:20:19 -08001625
1626 /* we got a read error. Maybe the drive is bad. Maybe just
1627 * the block and we can fix it.
1628 * We freeze all other IO, and try reading the block from
1629 * other devices. When we find one, we re-write
1630 * and check it that fixes the read error.
1631 * This is all done synchronously while the array is
1632 * frozen
1633 */
NeilBrown867868f2006-10-03 01:15:51 -07001634 if (mddev->ro == 0) {
1635 freeze_array(conf);
1636 fix_read_error(conf, r1_bio->read_disk,
1637 r1_bio->sector,
1638 r1_bio->sectors);
1639 unfreeze_array(conf);
NeilBrownddaf22a2006-01-06 00:20:19 -08001640 }
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 bio = r1_bio->bios[r1_bio->read_disk];
1643 if ((disk=read_balance(conf, r1_bio)) == -1) {
1644 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1645 " read error for block %llu\n",
1646 bdevname(bio->bi_bdev,b),
1647 (unsigned long long)r1_bio->sector);
1648 raid_end_bio_io(r1_bio);
1649 } else {
Lars Ellenberge3881a62007-01-10 23:15:37 -08001650 const int do_sync = bio_sync(r1_bio->master_bio);
NeilBrowncf30a472006-01-06 00:20:23 -08001651 r1_bio->bios[r1_bio->read_disk] =
1652 mddev->ro ? IO_BLOCKED : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 r1_bio->read_disk = disk;
1654 bio_put(bio);
1655 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1656 r1_bio->bios[r1_bio->read_disk] = bio;
1657 rdev = conf->mirrors[disk].rdev;
1658 if (printk_ratelimit())
1659 printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
1660 " another mirror\n",
1661 bdevname(rdev->bdev,b),
1662 (unsigned long long)r1_bio->sector);
1663 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1664 bio->bi_bdev = rdev->bdev;
1665 bio->bi_end_io = raid1_end_read_request;
Lars Ellenberge3881a62007-01-10 23:15:37 -08001666 bio->bi_rw = READ | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 bio->bi_private = r1_bio;
1668 unplug = 1;
1669 generic_make_request(bio);
1670 }
1671 }
1672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 if (unplug)
1674 unplug_slaves(mddev);
1675}
1676
1677
1678static int init_resync(conf_t *conf)
1679{
1680 int buffs;
1681
1682 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
Eric Sesterhenn9e77c482006-04-01 01:08:49 +02001683 BUG_ON(conf->r1buf_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1685 conf->poolinfo);
1686 if (!conf->r1buf_pool)
1687 return -ENOMEM;
1688 conf->next_resync = 0;
1689 return 0;
1690}
1691
1692/*
1693 * perform a "sync" on one "block"
1694 *
1695 * We need to make sure that no normal I/O request - particularly write
1696 * requests - conflict with active sync requests.
1697 *
1698 * This is achieved by tracking pending requests and a 'barrier' concept
1699 * that can be installed to exclude normal IO requests.
1700 */
1701
NeilBrown57afd892005-06-21 17:17:13 -07001702static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
1704 conf_t *conf = mddev_to_conf(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 r1bio_t *r1_bio;
1706 struct bio *bio;
1707 sector_t max_sector, nr_sectors;
NeilBrown3e198f72006-01-06 00:20:21 -08001708 int disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 int i;
NeilBrown3e198f72006-01-06 00:20:21 -08001710 int wonly = -1;
1711 int write_targets = 0, read_targets = 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001712 int sync_blocks;
NeilBrowne3b97032005-08-04 12:53:34 -07001713 int still_degraded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
1715 if (!conf->r1buf_pool)
NeilBrown191ea9b2005-06-21 17:17:23 -07001716 {
1717/*
1718 printk("sync start - bitmap %p\n", mddev->bitmap);
1719*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07001721 return 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724 max_sector = mddev->size << 1;
1725 if (sector_nr >= max_sector) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001726 /* If we aborted, we need to abort the
1727 * sync on the 'current' bitmap chunk (there will
1728 * only be one in raid1 resync.
1729 * We can find the current addess in mddev->curr_resync
1730 */
NeilBrown6a806c52005-07-15 03:56:35 -07001731 if (mddev->curr_resync < max_sector) /* aborted */
1732 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
NeilBrown191ea9b2005-06-21 17:17:23 -07001733 &sync_blocks, 1);
NeilBrown6a806c52005-07-15 03:56:35 -07001734 else /* completed sync */
NeilBrown191ea9b2005-06-21 17:17:23 -07001735 conf->fullsync = 0;
NeilBrown6a806c52005-07-15 03:56:35 -07001736
1737 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 close_sync(conf);
1739 return 0;
1740 }
1741
NeilBrown07d84d102006-06-26 00:27:56 -07001742 if (mddev->bitmap == NULL &&
1743 mddev->recovery_cp == MaxSector &&
NeilBrown6394cca2006-08-27 01:23:50 -07001744 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
NeilBrown07d84d102006-06-26 00:27:56 -07001745 conf->fullsync == 0) {
1746 *skipped = 1;
1747 return max_sector - sector_nr;
1748 }
NeilBrown6394cca2006-08-27 01:23:50 -07001749 /* before building a request, check if we can skip these blocks..
1750 * This call the bitmap_start_sync doesn't actually record anything
1751 */
NeilBrowne3b97032005-08-04 12:53:34 -07001752 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
NeilBrowne5de4852005-11-08 21:39:38 -08001753 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001754 /* We can skip this block, and probably several more */
1755 *skipped = 1;
1756 return sync_blocks;
1757 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 /*
NeilBrown17999be2006-01-06 00:20:12 -08001759 * If there is non-resync activity waiting for a turn,
1760 * and resync is going fast enough,
1761 * then let it though before starting on this new sync request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 */
NeilBrown17999be2006-01-06 00:20:12 -08001763 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 msleep_interruptible(1000);
NeilBrown17999be2006-01-06 00:20:12 -08001765
NeilBrownb47490c2008-02-06 01:39:50 -08001766 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
NeilBrown17999be2006-01-06 00:20:12 -08001767 raise_barrier(conf);
1768
1769 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
NeilBrown3e198f72006-01-06 00:20:21 -08001772 rcu_read_lock();
1773 /*
1774 * If we get a correctably read error during resync or recovery,
1775 * we might want to read from a different device. So we
1776 * flag all drives that could conceivably be read from for READ,
1777 * and any others (which will be non-In_sync devices) for WRITE.
1778 * If a read fails, we try reading from something else for which READ
1779 * is OK.
1780 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 r1_bio->mddev = mddev;
1783 r1_bio->sector = sector_nr;
NeilBrown191ea9b2005-06-21 17:17:23 -07001784 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 set_bit(R1BIO_IsSync, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
1787 for (i=0; i < conf->raid_disks; i++) {
NeilBrown3e198f72006-01-06 00:20:21 -08001788 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 bio = r1_bio->bios[i];
1790
1791 /* take from bio_init */
1792 bio->bi_next = NULL;
1793 bio->bi_flags |= 1 << BIO_UPTODATE;
NeilBrown802ba062006-12-13 00:34:13 -08001794 bio->bi_rw = READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 bio->bi_vcnt = 0;
1796 bio->bi_idx = 0;
1797 bio->bi_phys_segments = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 bio->bi_size = 0;
1799 bio->bi_end_io = NULL;
1800 bio->bi_private = NULL;
1801
NeilBrown3e198f72006-01-06 00:20:21 -08001802 rdev = rcu_dereference(conf->mirrors[i].rdev);
1803 if (rdev == NULL ||
1804 test_bit(Faulty, &rdev->flags)) {
NeilBrowne3b97032005-08-04 12:53:34 -07001805 still_degraded = 1;
1806 continue;
NeilBrown3e198f72006-01-06 00:20:21 -08001807 } else if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 bio->bi_rw = WRITE;
1809 bio->bi_end_io = end_sync_write;
1810 write_targets ++;
NeilBrown3e198f72006-01-06 00:20:21 -08001811 } else {
1812 /* may need to read from here */
1813 bio->bi_rw = READ;
1814 bio->bi_end_io = end_sync_read;
1815 if (test_bit(WriteMostly, &rdev->flags)) {
1816 if (wonly < 0)
1817 wonly = i;
1818 } else {
1819 if (disk < 0)
1820 disk = i;
1821 }
1822 read_targets++;
1823 }
1824 atomic_inc(&rdev->nr_pending);
1825 bio->bi_sector = sector_nr + rdev->data_offset;
1826 bio->bi_bdev = rdev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 bio->bi_private = r1_bio;
1828 }
NeilBrown3e198f72006-01-06 00:20:21 -08001829 rcu_read_unlock();
1830 if (disk < 0)
1831 disk = wonly;
1832 r1_bio->read_disk = disk;
NeilBrown191ea9b2005-06-21 17:17:23 -07001833
NeilBrown3e198f72006-01-06 00:20:21 -08001834 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1835 /* extra read targets are also write targets */
1836 write_targets += read_targets-1;
1837
1838 if (write_targets == 0 || read_targets == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 /* There is nowhere to write, so all non-sync
1840 * drives must be failed - so we are finished
1841 */
NeilBrown57afd892005-06-21 17:17:13 -07001842 sector_t rv = max_sector - sector_nr;
1843 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 put_buf(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 return rv;
1846 }
1847
NeilBrownc6207272008-02-06 01:39:52 -08001848 if (max_sector > mddev->resync_max)
1849 max_sector = mddev->resync_max; /* Don't do IO beyond here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 nr_sectors = 0;
NeilBrown289e99e2005-06-21 17:17:24 -07001851 sync_blocks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 do {
1853 struct page *page;
1854 int len = PAGE_SIZE;
1855 if (sector_nr + (len>>9) > max_sector)
1856 len = (max_sector - sector_nr) << 9;
1857 if (len == 0)
1858 break;
NeilBrown6a806c52005-07-15 03:56:35 -07001859 if (sync_blocks == 0) {
1860 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
NeilBrowne5de4852005-11-08 21:39:38 -08001861 &sync_blocks, still_degraded) &&
1862 !conf->fullsync &&
1863 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
NeilBrown6a806c52005-07-15 03:56:35 -07001864 break;
Eric Sesterhenn9e77c482006-04-01 01:08:49 +02001865 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
NeilBrown6a806c52005-07-15 03:56:35 -07001866 if (len > (sync_blocks<<9))
1867 len = sync_blocks<<9;
NeilBrownab7a30c2005-06-21 17:17:23 -07001868 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001869
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 for (i=0 ; i < conf->raid_disks; i++) {
1871 bio = r1_bio->bios[i];
1872 if (bio->bi_end_io) {
NeilBrownd11c1712006-01-06 00:20:26 -08001873 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 if (bio_add_page(bio, page, len, 0) == 0) {
1875 /* stop here */
NeilBrownd11c1712006-01-06 00:20:26 -08001876 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 while (i > 0) {
1878 i--;
1879 bio = r1_bio->bios[i];
NeilBrown6a806c52005-07-15 03:56:35 -07001880 if (bio->bi_end_io==NULL)
1881 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 /* remove last page from this bio */
1883 bio->bi_vcnt--;
1884 bio->bi_size -= len;
1885 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
1886 }
1887 goto bio_full;
1888 }
1889 }
1890 }
1891 nr_sectors += len>>9;
1892 sector_nr += len>>9;
NeilBrown191ea9b2005-06-21 17:17:23 -07001893 sync_blocks -= (len>>9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1895 bio_full:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 r1_bio->sectors = nr_sectors;
1897
NeilBrownd11c1712006-01-06 00:20:26 -08001898 /* For a user-requested sync, we read all readable devices and do a
1899 * compare
1900 */
1901 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1902 atomic_set(&r1_bio->remaining, read_targets);
1903 for (i=0; i<conf->raid_disks; i++) {
1904 bio = r1_bio->bios[i];
1905 if (bio->bi_end_io == end_sync_read) {
NeilBrownddac7c72006-08-31 21:27:36 -07001906 md_sync_acct(bio->bi_bdev, nr_sectors);
NeilBrownd11c1712006-01-06 00:20:26 -08001907 generic_make_request(bio);
1908 }
1909 }
1910 } else {
1911 atomic_set(&r1_bio->remaining, 1);
1912 bio = r1_bio->bios[r1_bio->read_disk];
NeilBrownddac7c72006-08-31 21:27:36 -07001913 md_sync_acct(bio->bi_bdev, nr_sectors);
NeilBrownd11c1712006-01-06 00:20:26 -08001914 generic_make_request(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
NeilBrownd11c1712006-01-06 00:20:26 -08001916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 return nr_sectors;
1918}
1919
1920static int run(mddev_t *mddev)
1921{
1922 conf_t *conf;
1923 int i, j, disk_idx;
1924 mirror_info_t *disk;
1925 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927 if (mddev->level != 1) {
1928 printk("raid1: %s: raid level not set to mirroring (%d)\n",
1929 mdname(mddev), mddev->level);
1930 goto out;
1931 }
NeilBrownf6705572006-03-27 01:18:11 -08001932 if (mddev->reshape_position != MaxSector) {
1933 printk("raid1: %s: reshape_position set but not supported\n",
1934 mdname(mddev));
1935 goto out;
1936 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 /*
1938 * copy the already verified devices into our private RAID1
1939 * bookkeeping area. [whatever we allocate in run(),
1940 * should be freed in stop()]
1941 */
NeilBrown9ffae0c2006-01-06 00:20:32 -08001942 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 mddev->private = conf;
1944 if (!conf)
1945 goto out_no_mem;
1946
NeilBrown9ffae0c2006-01-06 00:20:32 -08001947 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 GFP_KERNEL);
1949 if (!conf->mirrors)
1950 goto out_no_mem;
1951
NeilBrownddaf22a2006-01-06 00:20:19 -08001952 conf->tmppage = alloc_page(GFP_KERNEL);
1953 if (!conf->tmppage)
1954 goto out_no_mem;
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1957 if (!conf->poolinfo)
1958 goto out_no_mem;
1959 conf->poolinfo->mddev = mddev;
1960 conf->poolinfo->raid_disks = mddev->raid_disks;
1961 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1962 r1bio_pool_free,
1963 conf->poolinfo);
1964 if (!conf->r1bio_pool)
1965 goto out_no_mem;
1966
Neil Browne7e72bf2008-05-14 16:05:54 -07001967 spin_lock_init(&conf->device_lock);
1968 mddev->queue->queue_lock = &conf->device_lock;
1969
Cheng Renquan159ec1f2009-01-09 08:31:08 +11001970 list_for_each_entry(rdev, &mddev->disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 disk_idx = rdev->raid_disk;
1972 if (disk_idx >= mddev->raid_disks
1973 || disk_idx < 0)
1974 continue;
1975 disk = conf->mirrors + disk_idx;
1976
1977 disk->rdev = rdev;
1978
1979 blk_queue_stack_limits(mddev->queue,
1980 rdev->bdev->bd_disk->queue);
1981 /* as we don't honour merge_bvec_fn, we must never risk
1982 * violating it, so limit ->max_sector to one PAGE, as
1983 * a one page request is never in violation.
1984 */
1985 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1986 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1987 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1988
1989 disk->head_position = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 }
1991 conf->raid_disks = mddev->raid_disks;
1992 conf->mddev = mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 INIT_LIST_HEAD(&conf->retry_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995 spin_lock_init(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -08001996 init_waitqueue_head(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
NeilBrown191ea9b2005-06-21 17:17:23 -07001998 bio_list_init(&conf->pending_bio_list);
1999 bio_list_init(&conf->flushing_bio_list);
2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
2002 mddev->degraded = 0;
2003 for (i = 0; i < conf->raid_disks; i++) {
2004
2005 disk = conf->mirrors + i;
2006
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002007 if (!disk->rdev ||
2008 !test_bit(In_sync, &disk->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 disk->head_position = 0;
2010 mddev->degraded++;
NeilBrown918f0232007-08-22 14:01:52 -07002011 if (disk->rdev)
2012 conf->fullsync = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 }
2014 }
NeilBrown11ce99e2006-10-03 01:15:52 -07002015 if (mddev->degraded == conf->raid_disks) {
2016 printk(KERN_ERR "raid1: no operational mirrors for %s\n",
2017 mdname(mddev));
2018 goto out_free_conf;
2019 }
2020 if (conf->raid_disks - mddev->degraded == 1)
2021 mddev->recovery_cp = MaxSector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
2023 /*
2024 * find the first working one and use it as a starting point
2025 * to read balancing.
2026 */
2027 for (j = 0; j < conf->raid_disks &&
2028 (!conf->mirrors[j].rdev ||
NeilBrownb2d444d2005-11-08 21:39:31 -08002029 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 /* nothing */;
2031 conf->last_used = j;
2032
2033
NeilBrown191ea9b2005-06-21 17:17:23 -07002034 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
2035 if (!mddev->thread) {
2036 printk(KERN_ERR
2037 "raid1: couldn't allocate thread for %s\n",
2038 mdname(mddev));
2039 goto out_free_conf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 }
NeilBrown191ea9b2005-06-21 17:17:23 -07002041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 printk(KERN_INFO
2043 "raid1: raid set %s active with %d out of %d mirrors\n",
2044 mdname(mddev), mddev->raid_disks - mddev->degraded,
2045 mddev->raid_disks);
2046 /*
2047 * Ok, everything is just fine now
2048 */
Andre Nollf233ea52008-07-21 17:05:22 +10002049 mddev->array_sectors = mddev->size * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
NeilBrown7a5febe2005-05-16 21:53:16 -07002051 mddev->queue->unplug_fn = raid1_unplug;
NeilBrown0d129222006-10-03 01:15:54 -07002052 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2053 mddev->queue->backing_dev_info.congested_data = mddev;
NeilBrown7a5febe2005-05-16 21:53:16 -07002054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 return 0;
2056
2057out_no_mem:
2058 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
2059 mdname(mddev));
2060
2061out_free_conf:
2062 if (conf) {
2063 if (conf->r1bio_pool)
2064 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07002065 kfree(conf->mirrors);
NeilBrown1345b1d2006-01-06 00:20:40 -08002066 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07002067 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 kfree(conf);
2069 mddev->private = NULL;
2070 }
2071out:
2072 return -EIO;
2073}
2074
2075static int stop(mddev_t *mddev)
2076{
2077 conf_t *conf = mddev_to_conf(mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -07002078 struct bitmap *bitmap = mddev->bitmap;
2079 int behind_wait = 0;
2080
2081 /* wait for behind writes to complete */
2082 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2083 behind_wait++;
2084 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
2085 set_current_state(TASK_UNINTERRUPTIBLE);
2086 schedule_timeout(HZ); /* wait a second */
2087 /* need to kick something here to make sure I/O goes? */
2088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090 md_unregister_thread(mddev->thread);
2091 mddev->thread = NULL;
2092 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2093 if (conf->r1bio_pool)
2094 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07002095 kfree(conf->mirrors);
2096 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 kfree(conf);
2098 mddev->private = NULL;
2099 return 0;
2100}
2101
2102static int raid1_resize(mddev_t *mddev, sector_t sectors)
2103{
2104 /* no resync is happening, and there is enough space
2105 * on all devices, so we can resize.
2106 * We need to make sure resync covers any new space.
2107 * If the array is shrinking we should possibly wait until
2108 * any io in the removed space completes, but it hardly seems
2109 * worth it.
2110 */
Andre Nollf233ea52008-07-21 17:05:22 +10002111 mddev->array_sectors = sectors;
2112 set_capacity(mddev->gendisk, mddev->array_sectors);
Linus Torvalds44ce6292007-05-09 18:51:36 -07002113 mddev->changed = 1;
Andre Nollf233ea52008-07-21 17:05:22 +10002114 if (mddev->array_sectors / 2 > mddev->size &&
2115 mddev->recovery_cp == MaxSector) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 mddev->recovery_cp = mddev->size << 1;
2117 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2118 }
Andre Nollf233ea52008-07-21 17:05:22 +10002119 mddev->size = mddev->array_sectors / 2;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07002120 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 return 0;
2122}
2123
NeilBrown63c70c42006-03-27 01:18:13 -08002124static int raid1_reshape(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125{
2126 /* We need to:
2127 * 1/ resize the r1bio_pool
2128 * 2/ resize conf->mirrors
2129 *
2130 * We allocate a new r1bio_pool if we can.
2131 * Then raise a device barrier and wait until all IO stops.
2132 * Then resize conf->mirrors and swap in the new r1bio pool.
NeilBrown6ea9c072005-06-21 17:17:09 -07002133 *
2134 * At the same time, we "pack" the devices so that all the missing
2135 * devices have the higher raid_disk numbers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 */
2137 mempool_t *newpool, *oldpool;
2138 struct pool_info *newpoolinfo;
2139 mirror_info_t *newmirrors;
2140 conf_t *conf = mddev_to_conf(mddev);
NeilBrown63c70c42006-03-27 01:18:13 -08002141 int cnt, raid_disks;
NeilBrownc04be0a2006-10-03 01:15:53 -07002142 unsigned long flags;
Dan Williamsb5470dc2008-06-27 21:44:04 -07002143 int d, d2, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
NeilBrown63c70c42006-03-27 01:18:13 -08002145 /* Cannot change chunk_size, layout, or level */
2146 if (mddev->chunk_size != mddev->new_chunk ||
2147 mddev->layout != mddev->new_layout ||
2148 mddev->level != mddev->new_level) {
2149 mddev->new_chunk = mddev->chunk_size;
2150 mddev->new_layout = mddev->layout;
2151 mddev->new_level = mddev->level;
2152 return -EINVAL;
2153 }
2154
Dan Williamsb5470dc2008-06-27 21:44:04 -07002155 err = md_allow_write(mddev);
2156 if (err)
2157 return err;
NeilBrown2a2275d2007-01-26 00:57:11 -08002158
NeilBrown63c70c42006-03-27 01:18:13 -08002159 raid_disks = mddev->raid_disks + mddev->delta_disks;
2160
NeilBrown6ea9c072005-06-21 17:17:09 -07002161 if (raid_disks < conf->raid_disks) {
2162 cnt=0;
2163 for (d= 0; d < conf->raid_disks; d++)
2164 if (conf->mirrors[d].rdev)
2165 cnt++;
2166 if (cnt > raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 return -EBUSY;
NeilBrown6ea9c072005-06-21 17:17:09 -07002168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
2170 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2171 if (!newpoolinfo)
2172 return -ENOMEM;
2173 newpoolinfo->mddev = mddev;
2174 newpoolinfo->raid_disks = raid_disks;
2175
2176 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2177 r1bio_pool_free, newpoolinfo);
2178 if (!newpool) {
2179 kfree(newpoolinfo);
2180 return -ENOMEM;
2181 }
NeilBrown9ffae0c2006-01-06 00:20:32 -08002182 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 if (!newmirrors) {
2184 kfree(newpoolinfo);
2185 mempool_destroy(newpool);
2186 return -ENOMEM;
2187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
NeilBrown17999be2006-01-06 00:20:12 -08002189 raise_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191 /* ok, everything is stopped */
2192 oldpool = conf->r1bio_pool;
2193 conf->r1bio_pool = newpool;
NeilBrown6ea9c072005-06-21 17:17:09 -07002194
NeilBrowna88aa782007-08-22 14:01:53 -07002195 for (d = d2 = 0; d < conf->raid_disks; d++) {
2196 mdk_rdev_t *rdev = conf->mirrors[d].rdev;
2197 if (rdev && rdev->raid_disk != d2) {
2198 char nm[20];
2199 sprintf(nm, "rd%d", rdev->raid_disk);
2200 sysfs_remove_link(&mddev->kobj, nm);
2201 rdev->raid_disk = d2;
2202 sprintf(nm, "rd%d", rdev->raid_disk);
2203 sysfs_remove_link(&mddev->kobj, nm);
2204 if (sysfs_create_link(&mddev->kobj,
2205 &rdev->kobj, nm))
2206 printk(KERN_WARNING
2207 "md/raid1: cannot register "
2208 "%s for %s\n",
2209 nm, mdname(mddev));
NeilBrown6ea9c072005-06-21 17:17:09 -07002210 }
NeilBrowna88aa782007-08-22 14:01:53 -07002211 if (rdev)
2212 newmirrors[d2++].rdev = rdev;
2213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 kfree(conf->mirrors);
2215 conf->mirrors = newmirrors;
2216 kfree(conf->poolinfo);
2217 conf->poolinfo = newpoolinfo;
2218
NeilBrownc04be0a2006-10-03 01:15:53 -07002219 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 mddev->degraded += (raid_disks - conf->raid_disks);
NeilBrownc04be0a2006-10-03 01:15:53 -07002221 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 conf->raid_disks = mddev->raid_disks = raid_disks;
NeilBrown63c70c42006-03-27 01:18:13 -08002223 mddev->delta_disks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
NeilBrown6ea9c072005-06-21 17:17:09 -07002225 conf->last_used = 0; /* just make sure it is in-range */
NeilBrown17999be2006-01-06 00:20:12 -08002226 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
2228 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2229 md_wakeup_thread(mddev->thread);
2230
2231 mempool_destroy(oldpool);
2232 return 0;
2233}
2234
NeilBrown500af872005-09-09 16:23:58 -07002235static void raid1_quiesce(mddev_t *mddev, int state)
NeilBrown36fa3062005-09-09 16:23:45 -07002236{
2237 conf_t *conf = mddev_to_conf(mddev);
2238
2239 switch(state) {
NeilBrown9e6603d2005-09-09 16:23:48 -07002240 case 1:
NeilBrown17999be2006-01-06 00:20:12 -08002241 raise_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002242 break;
NeilBrown9e6603d2005-09-09 16:23:48 -07002243 case 0:
NeilBrown17999be2006-01-06 00:20:12 -08002244 lower_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002245 break;
2246 }
NeilBrown36fa3062005-09-09 16:23:45 -07002247}
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
NeilBrown2604b702006-01-06 00:20:36 -08002250static struct mdk_personality raid1_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
2252 .name = "raid1",
NeilBrown2604b702006-01-06 00:20:36 -08002253 .level = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 .owner = THIS_MODULE,
2255 .make_request = make_request,
2256 .run = run,
2257 .stop = stop,
2258 .status = status,
2259 .error_handler = error,
2260 .hot_add_disk = raid1_add_disk,
2261 .hot_remove_disk= raid1_remove_disk,
2262 .spare_active = raid1_spare_active,
2263 .sync_request = sync_request,
2264 .resize = raid1_resize,
NeilBrown63c70c42006-03-27 01:18:13 -08002265 .check_reshape = raid1_reshape,
NeilBrown36fa3062005-09-09 16:23:45 -07002266 .quiesce = raid1_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267};
2268
2269static int __init raid_init(void)
2270{
NeilBrown2604b702006-01-06 00:20:36 -08002271 return register_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272}
2273
2274static void raid_exit(void)
2275{
NeilBrown2604b702006-01-06 00:20:36 -08002276 unregister_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277}
2278
2279module_init(raid_init);
2280module_exit(raid_exit);
2281MODULE_LICENSE("GPL");
2282MODULE_ALIAS("md-personality-3"); /* RAID1 */
NeilBrownd9d166c2006-01-06 00:20:51 -08002283MODULE_ALIAS("md-raid1");
NeilBrown2604b702006-01-06 00:20:36 -08002284MODULE_ALIAS("md-level-1");