blob: 181c9616d5f1abfe7b54bd73683cd6a93c30cccd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
NeilBrown191ea9b2005-06-21 17:17:23 -070015 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
NeilBrown191ea9b2005-06-21 17:17:23 -070034#include "dm-bio-list.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/raid/raid1.h>
NeilBrown191ea9b2005-06-21 17:17:23 -070036#include <linux/raid/bitmap.h>
37
38#define DEBUG 0
39#if DEBUG
40#define PRINTK(x...) printk(x)
41#else
42#define PRINTK(x...)
43#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46 * Number of guaranteed r1bios in case of extreme VM load:
47 */
48#define NR_RAID1_BIOS 256
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51static void unplug_slaves(mddev_t *mddev);
52
NeilBrown17999be2006-01-06 00:20:12 -080053static void allow_barrier(conf_t *conf);
54static void lower_barrier(conf_t *conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Al Virodd0fc662005-10-07 07:46:04 +010056static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
58 struct pool_info *pi = data;
59 r1bio_t *r1_bio;
60 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
61
62 /* allocate a r1bio with room for raid_disks entries in the bios array */
NeilBrown9ffae0c2006-01-06 00:20:32 -080063 r1_bio = kzalloc(size, gfp_flags);
64 if (!r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 unplug_slaves(pi->mddev);
66
67 return r1_bio;
68}
69
70static void r1bio_pool_free(void *r1_bio, void *data)
71{
72 kfree(r1_bio);
73}
74
75#define RESYNC_BLOCK_SIZE (64*1024)
76//#define RESYNC_BLOCK_SIZE PAGE_SIZE
77#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
78#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
79#define RESYNC_WINDOW (2048*1024)
80
Al Virodd0fc662005-10-07 07:46:04 +010081static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 struct pool_info *pi = data;
84 struct page *page;
85 r1bio_t *r1_bio;
86 struct bio *bio;
87 int i, j;
88
89 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
90 if (!r1_bio) {
91 unplug_slaves(pi->mddev);
92 return NULL;
93 }
94
95 /*
96 * Allocate bios : 1 for reading, n-1 for writing
97 */
98 for (j = pi->raid_disks ; j-- ; ) {
99 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
100 if (!bio)
101 goto out_free_bio;
102 r1_bio->bios[j] = bio;
103 }
104 /*
105 * Allocate RESYNC_PAGES data pages and attach them to
NeilBrownd11c1712006-01-06 00:20:26 -0800106 * the first bio.
107 * If this is a user-requested check/repair, allocate
108 * RESYNC_PAGES for each bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 */
NeilBrownd11c1712006-01-06 00:20:26 -0800110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
111 j = pi->raid_disks;
112 else
113 j = 1;
114 while(j--) {
115 bio = r1_bio->bios[j];
116 for (i = 0; i < RESYNC_PAGES; i++) {
117 page = alloc_page(gfp_flags);
118 if (unlikely(!page))
119 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
NeilBrownd11c1712006-01-06 00:20:26 -0800121 bio->bi_io_vec[i].bv_page = page;
122 }
123 }
124 /* If not user-requests, copy the page pointers to all bios */
125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
126 for (i=0; i<RESYNC_PAGES ; i++)
127 for (j=1; j<pi->raid_disks; j++)
128 r1_bio->bios[j]->bi_io_vec[i].bv_page =
129 r1_bio->bios[0]->bi_io_vec[i].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131
132 r1_bio->master_bio = NULL;
133
134 return r1_bio;
135
136out_free_pages:
NeilBrownd11c1712006-01-06 00:20:26 -0800137 for (i=0; i < RESYNC_PAGES ; i++)
138 for (j=0 ; j < pi->raid_disks; j++)
NeilBrown1345b1d2006-01-06 00:20:40 -0800139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800140 j = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141out_free_bio:
142 while ( ++j < pi->raid_disks )
143 bio_put(r1_bio->bios[j]);
144 r1bio_pool_free(r1_bio, data);
145 return NULL;
146}
147
148static void r1buf_pool_free(void *__r1_bio, void *data)
149{
150 struct pool_info *pi = data;
NeilBrownd11c1712006-01-06 00:20:26 -0800151 int i,j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 r1bio_t *r1bio = __r1_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
NeilBrownd11c1712006-01-06 00:20:26 -0800154 for (i = 0; i < RESYNC_PAGES; i++)
155 for (j = pi->raid_disks; j-- ;) {
156 if (j == 0 ||
157 r1bio->bios[j]->bi_io_vec[i].bv_page !=
158 r1bio->bios[0]->bi_io_vec[i].bv_page)
NeilBrown1345b1d2006-01-06 00:20:40 -0800159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 for (i=0 ; i < pi->raid_disks; i++)
162 bio_put(r1bio->bios[i]);
163
164 r1bio_pool_free(r1bio, data);
165}
166
167static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
168{
169 int i;
170
171 for (i = 0; i < conf->raid_disks; i++) {
172 struct bio **bio = r1_bio->bios + i;
NeilBrowncf30a472006-01-06 00:20:23 -0800173 if (*bio && *bio != IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 bio_put(*bio);
175 *bio = NULL;
176 }
177}
178
179static inline void free_r1bio(r1bio_t *r1_bio)
180{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 conf_t *conf = mddev_to_conf(r1_bio->mddev);
182
183 /*
184 * Wake up any possible resync thread that waits for the device
185 * to go idle.
186 */
NeilBrown17999be2006-01-06 00:20:12 -0800187 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 put_all_bios(conf, r1_bio);
190 mempool_free(r1_bio, conf->r1bio_pool);
191}
192
193static inline void put_buf(r1bio_t *r1_bio)
194{
195 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown3e198f72006-01-06 00:20:21 -0800196 int i;
197
198 for (i=0; i<conf->raid_disks; i++) {
199 struct bio *bio = r1_bio->bios[i];
200 if (bio->bi_end_io)
201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 mempool_free(r1_bio, conf->r1buf_pool);
205
NeilBrown17999be2006-01-06 00:20:12 -0800206 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209static void reschedule_retry(r1bio_t *r1_bio)
210{
211 unsigned long flags;
212 mddev_t *mddev = r1_bio->mddev;
213 conf_t *conf = mddev_to_conf(mddev);
214
215 spin_lock_irqsave(&conf->device_lock, flags);
216 list_add(&r1_bio->retry_list, &conf->retry_list);
NeilBrownddaf22a2006-01-06 00:20:19 -0800217 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 spin_unlock_irqrestore(&conf->device_lock, flags);
219
NeilBrown17999be2006-01-06 00:20:12 -0800220 wake_up(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 md_wakeup_thread(mddev->thread);
222}
223
224/*
225 * raid_end_bio_io() is called when we have finished servicing a mirrored
226 * operation and are ready to return a success/failure code to the buffer
227 * cache layer.
228 */
229static void raid_end_bio_io(r1bio_t *r1_bio)
230{
231 struct bio *bio = r1_bio->master_bio;
232
NeilBrown4b6d2872005-09-09 16:23:47 -0700233 /* if nobody has done the final endio yet, do it now */
234 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
235 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
236 (bio_data_dir(bio) == WRITE) ? "write" : "read",
237 (unsigned long long) bio->bi_sector,
238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1);
240
241 bio_endio(bio, bio->bi_size,
242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 free_r1bio(r1_bio);
245}
246
247/*
248 * Update disk head position estimator based on IRQ completion info.
249 */
250static inline void update_head_pos(int disk, r1bio_t *r1_bio)
251{
252 conf_t *conf = mddev_to_conf(r1_bio->mddev);
253
254 conf->mirrors[disk].head_position =
255 r1_bio->sector + (r1_bio->sectors);
256}
257
258static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264
265 if (bio->bi_size)
266 return 1;
267
268 mirror = r1_bio->read_disk;
269 /*
270 * this branch is our 'one mirror IO has finished' event handler:
271 */
NeilBrownddaf22a2006-01-06 00:20:19 -0800272 update_head_pos(mirror, r1_bio);
273
274 if (uptodate || conf->working_disks <= 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 /*
276 * Set R1BIO_Uptodate in our master bio, so that
277 * we will return a good error code for to the higher
278 * levels even if IO on some other mirrored buffer fails.
279 *
280 * The 'master' represents the composite IO operation to
281 * user-side. So if something waits for IO, then it will
282 * wait for the 'master' bio.
283 */
NeilBrown220946c2006-01-06 00:20:27 -0800284 if (uptodate)
285 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 raid_end_bio_io(r1_bio);
NeilBrownddaf22a2006-01-06 00:20:19 -0800288 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 /*
290 * oops, read error:
291 */
292 char b[BDEVNAME_SIZE];
293 if (printk_ratelimit())
294 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
295 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
296 reschedule_retry(r1_bio);
297 }
298
299 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
300 return 0;
301}
302
303static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
304{
305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrowna9701a32005-11-08 21:39:34 -0800307 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
309
310 if (bio->bi_size)
311 return 1;
312
313 for (mirror = 0; mirror < conf->raid_disks; mirror++)
314 if (r1_bio->bios[mirror] == bio)
315 break;
316
NeilBrowna9701a32005-11-08 21:39:34 -0800317 if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
318 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
319 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
320 r1_bio->mddev->barriers_work = 0;
321 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 /*
NeilBrowna9701a32005-11-08 21:39:34 -0800323 * this branch is our 'one mirror IO has finished' event handler:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 */
NeilBrowna9701a32005-11-08 21:39:34 -0800325 r1_bio->bios[mirror] = NULL;
NeilBrowna9701a32005-11-08 21:39:34 -0800326 if (!uptodate) {
327 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
328 /* an I/O failed, we can't clear the bitmap */
329 set_bit(R1BIO_Degraded, &r1_bio->state);
330 } else
331 /*
332 * Set R1BIO_Uptodate in our master bio, so that
333 * we will return a good error code for to the higher
334 * levels even if IO on some other mirrored buffer fails.
335 *
336 * The 'master' represents the composite IO operation to
337 * user-side. So if something waits for IO, then it will
338 * wait for the 'master' bio.
339 */
340 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
NeilBrowna9701a32005-11-08 21:39:34 -0800342 update_head_pos(mirror, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
NeilBrowna9701a32005-11-08 21:39:34 -0800344 if (behind) {
345 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
346 atomic_dec(&r1_bio->behind_remaining);
NeilBrown4b6d2872005-09-09 16:23:47 -0700347
NeilBrowna9701a32005-11-08 21:39:34 -0800348 /* In behind mode, we ACK the master bio once the I/O has safely
349 * reached all non-writemostly disks. Setting the Returned bit
350 * ensures that this gets done only once -- we don't ever want to
351 * return -EIO here, instead we'll wait */
NeilBrown4b6d2872005-09-09 16:23:47 -0700352
NeilBrowna9701a32005-11-08 21:39:34 -0800353 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
354 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
355 /* Maybe we can return now */
356 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
357 struct bio *mbio = r1_bio->master_bio;
358 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
359 (unsigned long long) mbio->bi_sector,
360 (unsigned long long) mbio->bi_sector +
361 (mbio->bi_size >> 9) - 1);
362 bio_endio(mbio, mbio->bi_size, 0);
363 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700364 }
365 }
366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 /*
368 *
369 * Let's see if all mirrored write operations have finished
370 * already.
371 */
372 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrowna9701a32005-11-08 21:39:34 -0800373 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
374 reschedule_retry(r1_bio);
375 /* Don't dec_pending yet, we want to hold
376 * the reference over the retry
377 */
378 return 0;
379 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700380 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
381 /* free extra copy of the data pages */
382 int i = bio->bi_vcnt;
383 while (i--)
NeilBrown1345b1d2006-01-06 00:20:40 -0800384 safe_put_page(bio->bi_io_vec[i].bv_page);
NeilBrown4b6d2872005-09-09 16:23:47 -0700385 }
NeilBrown191ea9b2005-06-21 17:17:23 -0700386 /* clear the bitmap if all writes complete successfully */
387 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
388 r1_bio->sectors,
NeilBrown4b6d2872005-09-09 16:23:47 -0700389 !test_bit(R1BIO_Degraded, &r1_bio->state),
390 behind);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 md_write_end(r1_bio->mddev);
392 raid_end_bio_io(r1_bio);
393 }
394
NeilBrown3795bb02005-12-12 02:39:16 -0800395 if (r1_bio->bios[mirror]==NULL)
396 bio_put(bio);
397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
399 return 0;
400}
401
402
403/*
404 * This routine returns the disk from which the requested read should
405 * be done. There is a per-array 'next expected sequential IO' sector
406 * number - if this matches on the next IO then we use the last disk.
407 * There is also a per-disk 'last know head position' sector that is
408 * maintained from IRQ contexts, both the normal and the resync IO
409 * completion handlers update this position correctly. If there is no
410 * perfect sequential match then we pick the disk whose head is closest.
411 *
412 * If there are 2 mirrors in the same 2 devices, performance degrades
413 * because position is mirror, not device based.
414 *
415 * The rdev for the device selected will have nr_pending incremented.
416 */
417static int read_balance(conf_t *conf, r1bio_t *r1_bio)
418{
419 const unsigned long this_sector = r1_bio->sector;
420 int new_disk = conf->last_used, disk = new_disk;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700421 int wonly_disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 const int sectors = r1_bio->sectors;
423 sector_t new_distance, current_distance;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700424 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 rcu_read_lock();
427 /*
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700428 * Check if we can balance. We can balance on the whole
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 * device if no resync is going on, or below the resync window.
430 * We take the first readable disk when above the resync window.
431 */
432 retry:
433 if (conf->mddev->recovery_cp < MaxSector &&
434 (this_sector + sectors >= conf->next_resync)) {
435 /* Choose the first operation device, for consistancy */
436 new_disk = 0;
437
Suzanne Woodd6065f72005-11-08 21:39:27 -0800438 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800439 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800440 !rdev || !test_bit(In_sync, &rdev->flags)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700441 || test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800442 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700443
NeilBrowncf30a472006-01-06 00:20:23 -0800444 if (rdev && test_bit(In_sync, &rdev->flags) &&
445 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700446 wonly_disk = new_disk;
447
448 if (new_disk == conf->raid_disks - 1) {
449 new_disk = wonly_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 break;
451 }
452 }
453 goto rb_out;
454 }
455
456
457 /* make sure the disk is operational */
Suzanne Woodd6065f72005-11-08 21:39:27 -0800458 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800459 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800460 !rdev || !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700461 test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800462 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700463
NeilBrowncf30a472006-01-06 00:20:23 -0800464 if (rdev && test_bit(In_sync, &rdev->flags) &&
465 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700466 wonly_disk = new_disk;
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 if (new_disk <= 0)
469 new_disk = conf->raid_disks;
470 new_disk--;
471 if (new_disk == disk) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700472 new_disk = wonly_disk;
473 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 }
475 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700476
477 if (new_disk < 0)
478 goto rb_out;
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 disk = new_disk;
481 /* now disk == new_disk == starting point for search */
482
483 /*
484 * Don't change to another disk for sequential reads:
485 */
486 if (conf->next_seq_sect == this_sector)
487 goto rb_out;
488 if (this_sector == conf->mirrors[new_disk].head_position)
489 goto rb_out;
490
491 current_distance = abs(this_sector - conf->mirrors[disk].head_position);
492
493 /* Find the disk whose head is closest */
494
495 do {
496 if (disk <= 0)
497 disk = conf->raid_disks;
498 disk--;
499
Suzanne Woodd6065f72005-11-08 21:39:27 -0800500 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700501
NeilBrowncf30a472006-01-06 00:20:23 -0800502 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800503 !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700504 test_bit(WriteMostly, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 continue;
506
507 if (!atomic_read(&rdev->nr_pending)) {
508 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 break;
510 }
511 new_distance = abs(this_sector - conf->mirrors[disk].head_position);
512 if (new_distance < current_distance) {
513 current_distance = new_distance;
514 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516 } while (disk != conf->last_used);
517
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700518 rb_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520
521 if (new_disk >= 0) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800522 rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700523 if (!rdev)
524 goto retry;
525 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800526 if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /* cannot risk returning a device that failed
528 * before we inc'ed nr_pending
529 */
NeilBrown03c902e2006-01-06 00:20:46 -0800530 rdev_dec_pending(rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 goto retry;
532 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700533 conf->next_seq_sect = this_sector + sectors;
534 conf->last_used = new_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 }
536 rcu_read_unlock();
537
538 return new_disk;
539}
540
541static void unplug_slaves(mddev_t *mddev)
542{
543 conf_t *conf = mddev_to_conf(mddev);
544 int i;
545
546 rcu_read_lock();
547 for (i=0; i<mddev->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800548 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800549 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
551
552 atomic_inc(&rdev->nr_pending);
553 rcu_read_unlock();
554
555 if (r_queue->unplug_fn)
556 r_queue->unplug_fn(r_queue);
557
558 rdev_dec_pending(rdev, mddev);
559 rcu_read_lock();
560 }
561 }
562 rcu_read_unlock();
563}
564
565static void raid1_unplug(request_queue_t *q)
566{
NeilBrown191ea9b2005-06-21 17:17:23 -0700567 mddev_t *mddev = q->queuedata;
568
569 unplug_slaves(mddev);
570 md_wakeup_thread(mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572
573static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
574 sector_t *error_sector)
575{
576 mddev_t *mddev = q->queuedata;
577 conf_t *conf = mddev_to_conf(mddev);
578 int i, ret = 0;
579
580 rcu_read_lock();
581 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800582 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800583 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 struct block_device *bdev = rdev->bdev;
585 request_queue_t *r_queue = bdev_get_queue(bdev);
586
587 if (!r_queue->issue_flush_fn)
588 ret = -EOPNOTSUPP;
589 else {
590 atomic_inc(&rdev->nr_pending);
591 rcu_read_unlock();
592 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
593 error_sector);
594 rdev_dec_pending(rdev, mddev);
595 rcu_read_lock();
596 }
597 }
598 }
599 rcu_read_unlock();
600 return ret;
601}
602
NeilBrown17999be2006-01-06 00:20:12 -0800603/* Barriers....
604 * Sometimes we need to suspend IO while we do something else,
605 * either some resync/recovery, or reconfigure the array.
606 * To do this we raise a 'barrier'.
607 * The 'barrier' is a counter that can be raised multiple times
608 * to count how many activities are happening which preclude
609 * normal IO.
610 * We can only raise the barrier if there is no pending IO.
611 * i.e. if nr_pending == 0.
612 * We choose only to raise the barrier if no-one is waiting for the
613 * barrier to go down. This means that as soon as an IO request
614 * is ready, no other operations which require a barrier will start
615 * until the IO request has had a chance.
616 *
617 * So: regular IO calls 'wait_barrier'. When that returns there
618 * is no backgroup IO happening, It must arrange to call
619 * allow_barrier when it has finished its IO.
620 * backgroup IO calls must call raise_barrier. Once that returns
621 * there is no normal IO happeing. It must arrange to call
622 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 */
624#define RESYNC_DEPTH 32
625
NeilBrown17999be2006-01-06 00:20:12 -0800626static void raise_barrier(conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627{
628 spin_lock_irq(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -0800629
630 /* Wait until no block IO is waiting */
631 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
632 conf->resync_lock,
633 raid1_unplug(conf->mddev->queue));
634
635 /* block any new IO from starting */
636 conf->barrier++;
637
638 /* No wait for all pending IO to complete */
639 wait_event_lock_irq(conf->wait_barrier,
640 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
641 conf->resync_lock,
642 raid1_unplug(conf->mddev->queue));
643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 spin_unlock_irq(&conf->resync_lock);
645}
646
NeilBrown17999be2006-01-06 00:20:12 -0800647static void lower_barrier(conf_t *conf)
648{
649 unsigned long flags;
650 spin_lock_irqsave(&conf->resync_lock, flags);
651 conf->barrier--;
652 spin_unlock_irqrestore(&conf->resync_lock, flags);
653 wake_up(&conf->wait_barrier);
654}
655
656static void wait_barrier(conf_t *conf)
657{
658 spin_lock_irq(&conf->resync_lock);
659 if (conf->barrier) {
660 conf->nr_waiting++;
661 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
662 conf->resync_lock,
663 raid1_unplug(conf->mddev->queue));
664 conf->nr_waiting--;
665 }
666 conf->nr_pending++;
667 spin_unlock_irq(&conf->resync_lock);
668}
669
670static void allow_barrier(conf_t *conf)
671{
672 unsigned long flags;
673 spin_lock_irqsave(&conf->resync_lock, flags);
674 conf->nr_pending--;
675 spin_unlock_irqrestore(&conf->resync_lock, flags);
676 wake_up(&conf->wait_barrier);
677}
678
NeilBrownddaf22a2006-01-06 00:20:19 -0800679static void freeze_array(conf_t *conf)
680{
681 /* stop syncio and normal IO and wait for everything to
682 * go quite.
683 * We increment barrier and nr_waiting, and then
684 * wait until barrier+nr_pending match nr_queued+2
685 */
686 spin_lock_irq(&conf->resync_lock);
687 conf->barrier++;
688 conf->nr_waiting++;
689 wait_event_lock_irq(conf->wait_barrier,
690 conf->barrier+conf->nr_pending == conf->nr_queued+2,
691 conf->resync_lock,
692 raid1_unplug(conf->mddev->queue));
693 spin_unlock_irq(&conf->resync_lock);
694}
695static void unfreeze_array(conf_t *conf)
696{
697 /* reverse the effect of the freeze */
698 spin_lock_irq(&conf->resync_lock);
699 conf->barrier--;
700 conf->nr_waiting--;
701 wake_up(&conf->wait_barrier);
702 spin_unlock_irq(&conf->resync_lock);
703}
704
NeilBrown17999be2006-01-06 00:20:12 -0800705
NeilBrown4b6d2872005-09-09 16:23:47 -0700706/* duplicate the data pages for behind I/O */
707static struct page **alloc_behind_pages(struct bio *bio)
708{
709 int i;
710 struct bio_vec *bvec;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800711 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
NeilBrown4b6d2872005-09-09 16:23:47 -0700712 GFP_NOIO);
713 if (unlikely(!pages))
714 goto do_sync_io;
715
NeilBrown4b6d2872005-09-09 16:23:47 -0700716 bio_for_each_segment(bvec, bio, i) {
717 pages[i] = alloc_page(GFP_NOIO);
718 if (unlikely(!pages[i]))
719 goto do_sync_io;
720 memcpy(kmap(pages[i]) + bvec->bv_offset,
721 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
722 kunmap(pages[i]);
723 kunmap(bvec->bv_page);
724 }
725
726 return pages;
727
728do_sync_io:
729 if (pages)
730 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
NeilBrown2d1f3b52006-01-06 00:20:31 -0800731 put_page(pages[i]);
NeilBrown4b6d2872005-09-09 16:23:47 -0700732 kfree(pages);
733 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
734 return NULL;
735}
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737static int make_request(request_queue_t *q, struct bio * bio)
738{
739 mddev_t *mddev = q->queuedata;
740 conf_t *conf = mddev_to_conf(mddev);
741 mirror_info_t *mirror;
742 r1bio_t *r1_bio;
743 struct bio *read_bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700744 int i, targets = 0, disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 mdk_rdev_t *rdev;
NeilBrown191ea9b2005-06-21 17:17:23 -0700746 struct bitmap *bitmap = mddev->bitmap;
747 unsigned long flags;
748 struct bio_list bl;
NeilBrown4b6d2872005-09-09 16:23:47 -0700749 struct page **behind_pages = NULL;
Jens Axboea3623572005-11-01 09:26:16 +0100750 const int rw = bio_data_dir(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800751 int do_barriers;
NeilBrown191ea9b2005-06-21 17:17:23 -0700752
NeilBrowna9701a32005-11-08 21:39:34 -0800753 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
NeilBrowne5dcdd82005-09-09 16:23:41 -0700754 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
755 return 0;
756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 /*
759 * Register the new request and wait if the reconstruction
760 * thread has put up a bar for new requests.
761 * Continue immediately if no resync is active currently.
762 */
NeilBrown3d310eb2005-06-21 17:17:26 -0700763 md_write_start(mddev, bio); /* wait on superblock update early */
764
NeilBrown17999be2006-01-06 00:20:12 -0800765 wait_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Jens Axboea3623572005-11-01 09:26:16 +0100767 disk_stat_inc(mddev->gendisk, ios[rw]);
768 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 /*
771 * make_request() can abort the operation when READA is being
772 * used and no empty request is available.
773 *
774 */
775 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
776
777 r1_bio->master_bio = bio;
778 r1_bio->sectors = bio->bi_size >> 9;
NeilBrown191ea9b2005-06-21 17:17:23 -0700779 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 r1_bio->mddev = mddev;
781 r1_bio->sector = bio->bi_sector;
782
Jens Axboea3623572005-11-01 09:26:16 +0100783 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 /*
785 * read balancing logic:
786 */
787 int rdisk = read_balance(conf, r1_bio);
788
789 if (rdisk < 0) {
790 /* couldn't find anywhere to read from */
791 raid_end_bio_io(r1_bio);
792 return 0;
793 }
794 mirror = conf->mirrors + rdisk;
795
796 r1_bio->read_disk = rdisk;
797
798 read_bio = bio_clone(bio, GFP_NOIO);
799
800 r1_bio->bios[rdisk] = read_bio;
801
802 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
803 read_bio->bi_bdev = mirror->rdev->bdev;
804 read_bio->bi_end_io = raid1_end_read_request;
805 read_bio->bi_rw = READ;
806 read_bio->bi_private = r1_bio;
807
808 generic_make_request(read_bio);
809 return 0;
810 }
811
812 /*
813 * WRITE:
814 */
815 /* first select target devices under spinlock and
816 * inc refcount on their rdev. Record them by setting
817 * bios[x] to bio
818 */
819 disks = conf->raid_disks;
NeilBrown191ea9b2005-06-21 17:17:23 -0700820#if 0
821 { static int first=1;
822 if (first) printk("First Write sector %llu disks %d\n",
823 (unsigned long long)r1_bio->sector, disks);
824 first = 0;
825 }
826#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 rcu_read_lock();
828 for (i = 0; i < disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800829 if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
NeilBrownb2d444d2005-11-08 21:39:31 -0800830 !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800832 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown03c902e2006-01-06 00:20:46 -0800833 rdev_dec_pending(rdev, mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 r1_bio->bios[i] = NULL;
835 } else
836 r1_bio->bios[i] = bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700837 targets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 } else
839 r1_bio->bios[i] = NULL;
840 }
841 rcu_read_unlock();
842
NeilBrown4b6d2872005-09-09 16:23:47 -0700843 BUG_ON(targets == 0); /* we never fail the last device */
844
NeilBrown191ea9b2005-06-21 17:17:23 -0700845 if (targets < conf->raid_disks) {
846 /* array is degraded, we will not clear the bitmap
847 * on I/O completion (see raid1_end_write_request) */
848 set_bit(R1BIO_Degraded, &r1_bio->state);
849 }
NeilBrown06d91a52005-06-21 17:17:12 -0700850
NeilBrown4b6d2872005-09-09 16:23:47 -0700851 /* do behind I/O ? */
852 if (bitmap &&
853 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
854 (behind_pages = alloc_behind_pages(bio)) != NULL)
855 set_bit(R1BIO_BehindIO, &r1_bio->state);
856
NeilBrown191ea9b2005-06-21 17:17:23 -0700857 atomic_set(&r1_bio->remaining, 0);
NeilBrown4b6d2872005-09-09 16:23:47 -0700858 atomic_set(&r1_bio->behind_remaining, 0);
NeilBrown191ea9b2005-06-21 17:17:23 -0700859
NeilBrowna9701a32005-11-08 21:39:34 -0800860 do_barriers = bio->bi_rw & BIO_RW_BARRIER;
861 if (do_barriers)
862 set_bit(R1BIO_Barrier, &r1_bio->state);
863
NeilBrown191ea9b2005-06-21 17:17:23 -0700864 bio_list_init(&bl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 for (i = 0; i < disks; i++) {
866 struct bio *mbio;
867 if (!r1_bio->bios[i])
868 continue;
869
870 mbio = bio_clone(bio, GFP_NOIO);
871 r1_bio->bios[i] = mbio;
872
873 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
874 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
875 mbio->bi_end_io = raid1_end_write_request;
NeilBrowna9701a32005-11-08 21:39:34 -0800876 mbio->bi_rw = WRITE | do_barriers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 mbio->bi_private = r1_bio;
878
NeilBrown4b6d2872005-09-09 16:23:47 -0700879 if (behind_pages) {
880 struct bio_vec *bvec;
881 int j;
882
883 /* Yes, I really want the '__' version so that
884 * we clear any unused pointer in the io_vec, rather
885 * than leave them unchanged. This is important
886 * because when we come to free the pages, we won't
887 * know the originial bi_idx, so we just free
888 * them all
889 */
890 __bio_for_each_segment(bvec, mbio, j, 0)
891 bvec->bv_page = behind_pages[j];
892 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
893 atomic_inc(&r1_bio->behind_remaining);
894 }
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 atomic_inc(&r1_bio->remaining);
NeilBrown191ea9b2005-06-21 17:17:23 -0700897
898 bio_list_add(&bl, mbio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700900 kfree(behind_pages); /* the behind pages are attached to the bios now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
NeilBrown4b6d2872005-09-09 16:23:47 -0700902 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
903 test_bit(R1BIO_BehindIO, &r1_bio->state));
NeilBrown191ea9b2005-06-21 17:17:23 -0700904 spin_lock_irqsave(&conf->device_lock, flags);
905 bio_list_merge(&conf->pending_bio_list, &bl);
906 bio_list_init(&bl);
907
908 blk_plug_device(mddev->queue);
909 spin_unlock_irqrestore(&conf->device_lock, flags);
910
911#if 0
912 while ((bio = bio_list_pop(&bl)) != NULL)
913 generic_make_request(bio);
914#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
916 return 0;
917}
918
919static void status(struct seq_file *seq, mddev_t *mddev)
920{
921 conf_t *conf = mddev_to_conf(mddev);
922 int i;
923
924 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
925 conf->working_disks);
926 for (i = 0; i < conf->raid_disks; i++)
927 seq_printf(seq, "%s",
928 conf->mirrors[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -0800929 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 seq_printf(seq, "]");
931}
932
933
934static void error(mddev_t *mddev, mdk_rdev_t *rdev)
935{
936 char b[BDEVNAME_SIZE];
937 conf_t *conf = mddev_to_conf(mddev);
938
939 /*
940 * If it is not operational, then we have already marked it as dead
941 * else if it is the last working disks, ignore the error, let the
942 * next level up know.
943 * else mark the drive as failed
944 */
NeilBrownb2d444d2005-11-08 21:39:31 -0800945 if (test_bit(In_sync, &rdev->flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 && conf->working_disks == 1)
947 /*
948 * Don't fail the drive, act as though we were just a
949 * normal single drive
950 */
951 return;
NeilBrownb2d444d2005-11-08 21:39:31 -0800952 if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 mddev->degraded++;
954 conf->working_disks--;
955 /*
956 * if recovery is running, make sure it aborts.
957 */
958 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
959 }
NeilBrownb2d444d2005-11-08 21:39:31 -0800960 clear_bit(In_sync, &rdev->flags);
961 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 mddev->sb_dirty = 1;
963 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
964 " Operation continuing on %d devices\n",
965 bdevname(rdev->bdev,b), conf->working_disks);
966}
967
968static void print_conf(conf_t *conf)
969{
970 int i;
971 mirror_info_t *tmp;
972
973 printk("RAID1 conf printout:\n");
974 if (!conf) {
975 printk("(!conf)\n");
976 return;
977 }
978 printk(" --- wd:%d rd:%d\n", conf->working_disks,
979 conf->raid_disks);
980
981 for (i = 0; i < conf->raid_disks; i++) {
982 char b[BDEVNAME_SIZE];
983 tmp = conf->mirrors + i;
984 if (tmp->rdev)
985 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownb2d444d2005-11-08 21:39:31 -0800986 i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 bdevname(tmp->rdev->bdev,b));
988 }
989}
990
991static void close_sync(conf_t *conf)
992{
NeilBrown17999be2006-01-06 00:20:12 -0800993 wait_barrier(conf);
994 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996 mempool_destroy(conf->r1buf_pool);
997 conf->r1buf_pool = NULL;
998}
999
1000static int raid1_spare_active(mddev_t *mddev)
1001{
1002 int i;
1003 conf_t *conf = mddev->private;
1004 mirror_info_t *tmp;
1005
1006 /*
1007 * Find all failed disks within the RAID1 configuration
1008 * and mark them readable
1009 */
1010 for (i = 0; i < conf->raid_disks; i++) {
1011 tmp = conf->mirrors + i;
1012 if (tmp->rdev
NeilBrownb2d444d2005-11-08 21:39:31 -08001013 && !test_bit(Faulty, &tmp->rdev->flags)
1014 && !test_bit(In_sync, &tmp->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 conf->working_disks++;
1016 mddev->degraded--;
NeilBrownb2d444d2005-11-08 21:39:31 -08001017 set_bit(In_sync, &tmp->rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 }
1019 }
1020
1021 print_conf(conf);
1022 return 0;
1023}
1024
1025
1026static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1027{
1028 conf_t *conf = mddev->private;
1029 int found = 0;
NeilBrown41158c72005-06-21 17:17:25 -07001030 int mirror = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 mirror_info_t *p;
1032
1033 for (mirror=0; mirror < mddev->raid_disks; mirror++)
1034 if ( !(p=conf->mirrors+mirror)->rdev) {
1035
1036 blk_queue_stack_limits(mddev->queue,
1037 rdev->bdev->bd_disk->queue);
1038 /* as we don't honour merge_bvec_fn, we must never risk
1039 * violating it, so limit ->max_sector to one PAGE, as
1040 * a one page request is never in violation.
1041 */
1042 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1043 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1044 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1045
1046 p->head_position = 0;
1047 rdev->raid_disk = mirror;
1048 found = 1;
NeilBrown6aea114a2005-11-28 13:44:13 -08001049 /* As all devices are equivalent, we don't need a full recovery
1050 * if this was recently any drive of the array
1051 */
1052 if (rdev->saved_raid_disk < 0)
NeilBrown41158c72005-06-21 17:17:25 -07001053 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08001054 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 break;
1056 }
1057
1058 print_conf(conf);
1059 return found;
1060}
1061
1062static int raid1_remove_disk(mddev_t *mddev, int number)
1063{
1064 conf_t *conf = mddev->private;
1065 int err = 0;
1066 mdk_rdev_t *rdev;
1067 mirror_info_t *p = conf->mirrors+ number;
1068
1069 print_conf(conf);
1070 rdev = p->rdev;
1071 if (rdev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001072 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 atomic_read(&rdev->nr_pending)) {
1074 err = -EBUSY;
1075 goto abort;
1076 }
1077 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07001078 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 if (atomic_read(&rdev->nr_pending)) {
1080 /* lost the race, try later */
1081 err = -EBUSY;
1082 p->rdev = rdev;
1083 }
1084 }
1085abort:
1086
1087 print_conf(conf);
1088 return err;
1089}
1090
1091
1092static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1093{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrownd11c1712006-01-06 00:20:26 -08001095 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 if (bio->bi_size)
1098 return 1;
1099
NeilBrownd11c1712006-01-06 00:20:26 -08001100 for (i=r1_bio->mddev->raid_disks; i--; )
1101 if (r1_bio->bios[i] == bio)
1102 break;
1103 BUG_ON(i < 0);
1104 update_head_pos(i, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 /*
1106 * we have read a block, now it needs to be re-written,
1107 * or re-read if the read failed.
1108 * We don't do much here, just schedule handling by raid1d
1109 */
NeilBrown69382e82006-01-06 00:20:22 -08001110 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 set_bit(R1BIO_Uptodate, &r1_bio->state);
NeilBrownd11c1712006-01-06 00:20:26 -08001112
1113 if (atomic_dec_and_test(&r1_bio->remaining))
1114 reschedule_retry(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 return 0;
1116}
1117
1118static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1119{
1120 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1121 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1122 mddev_t *mddev = r1_bio->mddev;
1123 conf_t *conf = mddev_to_conf(mddev);
1124 int i;
1125 int mirror=0;
1126
1127 if (bio->bi_size)
1128 return 1;
1129
1130 for (i = 0; i < conf->raid_disks; i++)
1131 if (r1_bio->bios[i] == bio) {
1132 mirror = i;
1133 break;
1134 }
NeilBrowne3b97032005-08-04 12:53:34 -07001135 if (!uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 md_error(mddev, conf->mirrors[mirror].rdev);
NeilBrowne3b97032005-08-04 12:53:34 -07001137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 update_head_pos(mirror, r1_bio);
1139
1140 if (atomic_dec_and_test(&r1_bio->remaining)) {
1141 md_done_sync(mddev, r1_bio->sectors, uptodate);
1142 put_buf(r1_bio);
1143 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return 0;
1145}
1146
1147static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1148{
1149 conf_t *conf = mddev_to_conf(mddev);
1150 int i;
1151 int disks = conf->raid_disks;
1152 struct bio *bio, *wbio;
1153
1154 bio = r1_bio->bios[r1_bio->read_disk];
1155
NeilBrown69382e82006-01-06 00:20:22 -08001156
NeilBrownd11c1712006-01-06 00:20:26 -08001157 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1158 /* We have read all readable devices. If we haven't
1159 * got the block, then there is no hope left.
1160 * If we have, then we want to do a comparison
1161 * and skip the write if everything is the same.
1162 * If any blocks failed to read, then we need to
1163 * attempt an over-write
1164 */
1165 int primary;
1166 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1167 for (i=0; i<mddev->raid_disks; i++)
1168 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1169 md_error(mddev, conf->mirrors[i].rdev);
1170
1171 md_done_sync(mddev, r1_bio->sectors, 1);
1172 put_buf(r1_bio);
1173 return;
1174 }
1175 for (primary=0; primary<mddev->raid_disks; primary++)
1176 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1177 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1178 r1_bio->bios[primary]->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001179 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
NeilBrownd11c1712006-01-06 00:20:26 -08001180 break;
1181 }
1182 r1_bio->read_disk = primary;
1183 for (i=0; i<mddev->raid_disks; i++)
1184 if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
1185 test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
1186 int j;
1187 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1188 struct bio *pbio = r1_bio->bios[primary];
1189 struct bio *sbio = r1_bio->bios[i];
1190 for (j = vcnt; j-- ; )
1191 if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
1192 page_address(sbio->bi_io_vec[j].bv_page),
1193 PAGE_SIZE))
1194 break;
1195 if (j >= 0)
1196 mddev->resync_mismatches += r1_bio->sectors;
NeilBrown03c902e2006-01-06 00:20:46 -08001197 if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrownd11c1712006-01-06 00:20:26 -08001198 sbio->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001199 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1200 } else {
NeilBrownd11c1712006-01-06 00:20:26 -08001201 /* fixup the bio for reuse */
1202 sbio->bi_vcnt = vcnt;
1203 sbio->bi_size = r1_bio->sectors << 9;
1204 sbio->bi_idx = 0;
1205 sbio->bi_phys_segments = 0;
1206 sbio->bi_hw_segments = 0;
1207 sbio->bi_hw_front_size = 0;
1208 sbio->bi_hw_back_size = 0;
1209 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1210 sbio->bi_flags |= 1 << BIO_UPTODATE;
1211 sbio->bi_next = NULL;
1212 sbio->bi_sector = r1_bio->sector +
1213 conf->mirrors[i].rdev->data_offset;
1214 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1215 }
1216 }
1217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
NeilBrown69382e82006-01-06 00:20:22 -08001219 /* ouch - failed to read all of that.
1220 * Try some synchronous reads of other devices to get
1221 * good data, much like with normal read errors. Only
1222 * read into the pages we already have so they we don't
1223 * need to re-issue the read request.
1224 * We don't need to freeze the array, because being in an
1225 * active sync request, there is no normal IO, and
1226 * no overlapping syncs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 */
NeilBrown69382e82006-01-06 00:20:22 -08001228 sector_t sect = r1_bio->sector;
1229 int sectors = r1_bio->sectors;
1230 int idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
NeilBrown69382e82006-01-06 00:20:22 -08001232 while(sectors) {
1233 int s = sectors;
1234 int d = r1_bio->read_disk;
1235 int success = 0;
1236 mdk_rdev_t *rdev;
1237
1238 if (s > (PAGE_SIZE>>9))
1239 s = PAGE_SIZE >> 9;
1240 do {
1241 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1242 rdev = conf->mirrors[d].rdev;
1243 if (sync_page_io(rdev->bdev,
1244 sect + rdev->data_offset,
1245 s<<9,
1246 bio->bi_io_vec[idx].bv_page,
1247 READ)) {
1248 success = 1;
1249 break;
1250 }
1251 }
1252 d++;
1253 if (d == conf->raid_disks)
1254 d = 0;
1255 } while (!success && d != r1_bio->read_disk);
1256
1257 if (success) {
NeilBrown097426f2006-01-06 00:20:37 -08001258 int start = d;
NeilBrown69382e82006-01-06 00:20:22 -08001259 /* write it back and re-read */
1260 set_bit(R1BIO_Uptodate, &r1_bio->state);
1261 while (d != r1_bio->read_disk) {
1262 if (d == 0)
1263 d = conf->raid_disks;
1264 d--;
1265 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1266 continue;
1267 rdev = conf->mirrors[d].rdev;
1268 if (sync_page_io(rdev->bdev,
1269 sect + rdev->data_offset,
1270 s<<9,
1271 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001272 WRITE) == 0)
1273 md_error(mddev, rdev);
1274 }
1275 d = start;
1276 while (d != r1_bio->read_disk) {
1277 if (d == 0)
1278 d = conf->raid_disks;
1279 d--;
1280 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1281 continue;
1282 rdev = conf->mirrors[d].rdev;
1283 if (sync_page_io(rdev->bdev,
NeilBrown69382e82006-01-06 00:20:22 -08001284 sect + rdev->data_offset,
1285 s<<9,
1286 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001287 READ) == 0)
NeilBrown69382e82006-01-06 00:20:22 -08001288 md_error(mddev, rdev);
NeilBrown69382e82006-01-06 00:20:22 -08001289 }
1290 } else {
1291 char b[BDEVNAME_SIZE];
1292 /* Cannot read from anywhere, array is toast */
1293 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1294 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1295 " for block %llu\n",
1296 bdevname(bio->bi_bdev,b),
1297 (unsigned long long)r1_bio->sector);
1298 md_done_sync(mddev, r1_bio->sectors, 0);
1299 put_buf(r1_bio);
1300 return;
1301 }
1302 sectors -= s;
1303 sect += s;
1304 idx ++;
1305 }
1306 }
NeilBrownd11c1712006-01-06 00:20:26 -08001307
1308 /*
1309 * schedule writes
1310 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 atomic_set(&r1_bio->remaining, 1);
1312 for (i = 0; i < disks ; i++) {
1313 wbio = r1_bio->bios[i];
NeilBrown3e198f72006-01-06 00:20:21 -08001314 if (wbio->bi_end_io == NULL ||
1315 (wbio->bi_end_io == end_sync_read &&
1316 (i == r1_bio->read_disk ||
1317 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 continue;
1319
NeilBrown3e198f72006-01-06 00:20:21 -08001320 wbio->bi_rw = WRITE;
1321 wbio->bi_end_io = end_sync_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 atomic_inc(&r1_bio->remaining);
1323 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
NeilBrown191ea9b2005-06-21 17:17:23 -07001324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 generic_make_request(wbio);
1326 }
1327
1328 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001329 /* if we're here, all write(s) have completed, so clean up */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 md_done_sync(mddev, r1_bio->sectors, 1);
1331 put_buf(r1_bio);
1332 }
1333}
1334
1335/*
1336 * This is a kernel thread which:
1337 *
1338 * 1. Retries failed read operations on working mirrors.
1339 * 2. Updates the raid superblock when problems encounter.
1340 * 3. Performs writes following reads for array syncronising.
1341 */
1342
1343static void raid1d(mddev_t *mddev)
1344{
1345 r1bio_t *r1_bio;
1346 struct bio *bio;
1347 unsigned long flags;
1348 conf_t *conf = mddev_to_conf(mddev);
1349 struct list_head *head = &conf->retry_list;
1350 int unplug=0;
1351 mdk_rdev_t *rdev;
1352
1353 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
1355 for (;;) {
1356 char b[BDEVNAME_SIZE];
1357 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrown191ea9b2005-06-21 17:17:23 -07001358
1359 if (conf->pending_bio_list.head) {
1360 bio = bio_list_get(&conf->pending_bio_list);
1361 blk_remove_plug(mddev->queue);
1362 spin_unlock_irqrestore(&conf->device_lock, flags);
1363 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1364 if (bitmap_unplug(mddev->bitmap) != 0)
1365 printk("%s: bitmap file write failed!\n", mdname(mddev));
1366
1367 while (bio) { /* submit pending writes */
1368 struct bio *next = bio->bi_next;
1369 bio->bi_next = NULL;
1370 generic_make_request(bio);
1371 bio = next;
1372 }
1373 unplug = 1;
1374
1375 continue;
1376 }
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 if (list_empty(head))
1379 break;
1380 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1381 list_del(head->prev);
NeilBrownddaf22a2006-01-06 00:20:19 -08001382 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 spin_unlock_irqrestore(&conf->device_lock, flags);
1384
1385 mddev = r1_bio->mddev;
1386 conf = mddev_to_conf(mddev);
1387 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1388 sync_request_write(mddev, r1_bio);
1389 unplug = 1;
NeilBrowna9701a32005-11-08 21:39:34 -08001390 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1391 /* some requests in the r1bio were BIO_RW_BARRIER
1392 * requests which failed with -ENOTSUPP. Hohumm..
1393 * Better resubmit without the barrier.
1394 * We know which devices to resubmit for, because
1395 * all others have had their bios[] entry cleared.
1396 */
1397 int i;
1398 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1399 clear_bit(R1BIO_Barrier, &r1_bio->state);
1400 for (i=0; i < conf->raid_disks; i++)
1401 if (r1_bio->bios[i]) {
1402 struct bio_vec *bvec;
1403 int j;
1404
1405 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1406 /* copy pages from the failed bio, as
1407 * this might be a write-behind device */
1408 __bio_for_each_segment(bvec, bio, j, 0)
1409 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1410 bio_put(r1_bio->bios[i]);
1411 bio->bi_sector = r1_bio->sector +
1412 conf->mirrors[i].rdev->data_offset;
1413 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1414 bio->bi_end_io = raid1_end_write_request;
1415 bio->bi_rw = WRITE;
1416 bio->bi_private = r1_bio;
1417 r1_bio->bios[i] = bio;
1418 generic_make_request(bio);
1419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 } else {
1421 int disk;
NeilBrownddaf22a2006-01-06 00:20:19 -08001422
1423 /* we got a read error. Maybe the drive is bad. Maybe just
1424 * the block and we can fix it.
1425 * We freeze all other IO, and try reading the block from
1426 * other devices. When we find one, we re-write
1427 * and check it that fixes the read error.
1428 * This is all done synchronously while the array is
1429 * frozen
1430 */
1431 sector_t sect = r1_bio->sector;
1432 int sectors = r1_bio->sectors;
1433 freeze_array(conf);
NeilBrowncf30a472006-01-06 00:20:23 -08001434 if (mddev->ro == 0) while(sectors) {
NeilBrownddaf22a2006-01-06 00:20:19 -08001435 int s = sectors;
1436 int d = r1_bio->read_disk;
1437 int success = 0;
1438
1439 if (s > (PAGE_SIZE>>9))
1440 s = PAGE_SIZE >> 9;
1441
1442 do {
1443 rdev = conf->mirrors[d].rdev;
1444 if (rdev &&
1445 test_bit(In_sync, &rdev->flags) &&
1446 sync_page_io(rdev->bdev,
1447 sect + rdev->data_offset,
1448 s<<9,
1449 conf->tmppage, READ))
1450 success = 1;
1451 else {
1452 d++;
1453 if (d == conf->raid_disks)
1454 d = 0;
1455 }
1456 } while (!success && d != r1_bio->read_disk);
1457
1458 if (success) {
1459 /* write it back and re-read */
NeilBrown097426f2006-01-06 00:20:37 -08001460 int start = d;
NeilBrownddaf22a2006-01-06 00:20:19 -08001461 while (d != r1_bio->read_disk) {
1462 if (d==0)
1463 d = conf->raid_disks;
1464 d--;
1465 rdev = conf->mirrors[d].rdev;
1466 if (rdev &&
1467 test_bit(In_sync, &rdev->flags)) {
1468 if (sync_page_io(rdev->bdev,
1469 sect + rdev->data_offset,
NeilBrown097426f2006-01-06 00:20:37 -08001470 s<<9, conf->tmppage, WRITE) == 0)
NeilBrownddaf22a2006-01-06 00:20:19 -08001471 /* Well, this device is dead */
1472 md_error(mddev, rdev);
NeilBrown097426f2006-01-06 00:20:37 -08001473 }
1474 }
1475 d = start;
1476 while (d != r1_bio->read_disk) {
1477 if (d==0)
1478 d = conf->raid_disks;
1479 d--;
1480 rdev = conf->mirrors[d].rdev;
1481 if (rdev &&
1482 test_bit(In_sync, &rdev->flags)) {
1483 if (sync_page_io(rdev->bdev,
1484 sect + rdev->data_offset,
1485 s<<9, conf->tmppage, READ) == 0)
1486 /* Well, this device is dead */
1487 md_error(mddev, rdev);
NeilBrownddaf22a2006-01-06 00:20:19 -08001488 }
1489 }
1490 } else {
1491 /* Cannot read from anywhere -- bye bye array */
1492 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1493 break;
1494 }
1495 sectors -= s;
1496 sect += s;
1497 }
1498
NeilBrownddaf22a2006-01-06 00:20:19 -08001499 unfreeze_array(conf);
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 bio = r1_bio->bios[r1_bio->read_disk];
1502 if ((disk=read_balance(conf, r1_bio)) == -1) {
1503 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1504 " read error for block %llu\n",
1505 bdevname(bio->bi_bdev,b),
1506 (unsigned long long)r1_bio->sector);
1507 raid_end_bio_io(r1_bio);
1508 } else {
NeilBrowncf30a472006-01-06 00:20:23 -08001509 r1_bio->bios[r1_bio->read_disk] =
1510 mddev->ro ? IO_BLOCKED : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 r1_bio->read_disk = disk;
1512 bio_put(bio);
1513 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1514 r1_bio->bios[r1_bio->read_disk] = bio;
1515 rdev = conf->mirrors[disk].rdev;
1516 if (printk_ratelimit())
1517 printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
1518 " another mirror\n",
1519 bdevname(rdev->bdev,b),
1520 (unsigned long long)r1_bio->sector);
1521 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1522 bio->bi_bdev = rdev->bdev;
1523 bio->bi_end_io = raid1_end_read_request;
1524 bio->bi_rw = READ;
1525 bio->bi_private = r1_bio;
1526 unplug = 1;
1527 generic_make_request(bio);
1528 }
1529 }
1530 }
1531 spin_unlock_irqrestore(&conf->device_lock, flags);
1532 if (unplug)
1533 unplug_slaves(mddev);
1534}
1535
1536
1537static int init_resync(conf_t *conf)
1538{
1539 int buffs;
1540
1541 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1542 if (conf->r1buf_pool)
1543 BUG();
1544 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1545 conf->poolinfo);
1546 if (!conf->r1buf_pool)
1547 return -ENOMEM;
1548 conf->next_resync = 0;
1549 return 0;
1550}
1551
1552/*
1553 * perform a "sync" on one "block"
1554 *
1555 * We need to make sure that no normal I/O request - particularly write
1556 * requests - conflict with active sync requests.
1557 *
1558 * This is achieved by tracking pending requests and a 'barrier' concept
1559 * that can be installed to exclude normal IO requests.
1560 */
1561
NeilBrown57afd892005-06-21 17:17:13 -07001562static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
1564 conf_t *conf = mddev_to_conf(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 r1bio_t *r1_bio;
1566 struct bio *bio;
1567 sector_t max_sector, nr_sectors;
NeilBrown3e198f72006-01-06 00:20:21 -08001568 int disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 int i;
NeilBrown3e198f72006-01-06 00:20:21 -08001570 int wonly = -1;
1571 int write_targets = 0, read_targets = 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001572 int sync_blocks;
NeilBrowne3b97032005-08-04 12:53:34 -07001573 int still_degraded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
1575 if (!conf->r1buf_pool)
NeilBrown191ea9b2005-06-21 17:17:23 -07001576 {
1577/*
1578 printk("sync start - bitmap %p\n", mddev->bitmap);
1579*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07001581 return 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584 max_sector = mddev->size << 1;
1585 if (sector_nr >= max_sector) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001586 /* If we aborted, we need to abort the
1587 * sync on the 'current' bitmap chunk (there will
1588 * only be one in raid1 resync.
1589 * We can find the current addess in mddev->curr_resync
1590 */
NeilBrown6a806c52005-07-15 03:56:35 -07001591 if (mddev->curr_resync < max_sector) /* aborted */
1592 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
NeilBrown191ea9b2005-06-21 17:17:23 -07001593 &sync_blocks, 1);
NeilBrown6a806c52005-07-15 03:56:35 -07001594 else /* completed sync */
NeilBrown191ea9b2005-06-21 17:17:23 -07001595 conf->fullsync = 0;
NeilBrown6a806c52005-07-15 03:56:35 -07001596
1597 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 close_sync(conf);
1599 return 0;
1600 }
1601
NeilBrowne3b97032005-08-04 12:53:34 -07001602 /* before building a request, check if we can skip these blocks..
1603 * This call the bitmap_start_sync doesn't actually record anything
1604 */
1605 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
NeilBrowne5de485f2005-11-08 21:39:38 -08001606 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001607 /* We can skip this block, and probably several more */
1608 *skipped = 1;
1609 return sync_blocks;
1610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 /*
NeilBrown17999be2006-01-06 00:20:12 -08001612 * If there is non-resync activity waiting for a turn,
1613 * and resync is going fast enough,
1614 * then let it though before starting on this new sync request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 */
NeilBrown17999be2006-01-06 00:20:12 -08001616 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 msleep_interruptible(1000);
NeilBrown17999be2006-01-06 00:20:12 -08001618
1619 raise_barrier(conf);
1620
1621 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
NeilBrown3e198f72006-01-06 00:20:21 -08001624 rcu_read_lock();
1625 /*
1626 * If we get a correctably read error during resync or recovery,
1627 * we might want to read from a different device. So we
1628 * flag all drives that could conceivably be read from for READ,
1629 * and any others (which will be non-In_sync devices) for WRITE.
1630 * If a read fails, we try reading from something else for which READ
1631 * is OK.
1632 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 r1_bio->mddev = mddev;
1635 r1_bio->sector = sector_nr;
NeilBrown191ea9b2005-06-21 17:17:23 -07001636 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 set_bit(R1BIO_IsSync, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639 for (i=0; i < conf->raid_disks; i++) {
NeilBrown3e198f72006-01-06 00:20:21 -08001640 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 bio = r1_bio->bios[i];
1642
1643 /* take from bio_init */
1644 bio->bi_next = NULL;
1645 bio->bi_flags |= 1 << BIO_UPTODATE;
1646 bio->bi_rw = 0;
1647 bio->bi_vcnt = 0;
1648 bio->bi_idx = 0;
1649 bio->bi_phys_segments = 0;
1650 bio->bi_hw_segments = 0;
1651 bio->bi_size = 0;
1652 bio->bi_end_io = NULL;
1653 bio->bi_private = NULL;
1654
NeilBrown3e198f72006-01-06 00:20:21 -08001655 rdev = rcu_dereference(conf->mirrors[i].rdev);
1656 if (rdev == NULL ||
1657 test_bit(Faulty, &rdev->flags)) {
NeilBrowne3b97032005-08-04 12:53:34 -07001658 still_degraded = 1;
1659 continue;
NeilBrown3e198f72006-01-06 00:20:21 -08001660 } else if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 bio->bi_rw = WRITE;
1662 bio->bi_end_io = end_sync_write;
1663 write_targets ++;
NeilBrown3e198f72006-01-06 00:20:21 -08001664 } else {
1665 /* may need to read from here */
1666 bio->bi_rw = READ;
1667 bio->bi_end_io = end_sync_read;
1668 if (test_bit(WriteMostly, &rdev->flags)) {
1669 if (wonly < 0)
1670 wonly = i;
1671 } else {
1672 if (disk < 0)
1673 disk = i;
1674 }
1675 read_targets++;
1676 }
1677 atomic_inc(&rdev->nr_pending);
1678 bio->bi_sector = sector_nr + rdev->data_offset;
1679 bio->bi_bdev = rdev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 bio->bi_private = r1_bio;
1681 }
NeilBrown3e198f72006-01-06 00:20:21 -08001682 rcu_read_unlock();
1683 if (disk < 0)
1684 disk = wonly;
1685 r1_bio->read_disk = disk;
NeilBrown191ea9b2005-06-21 17:17:23 -07001686
NeilBrown3e198f72006-01-06 00:20:21 -08001687 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1688 /* extra read targets are also write targets */
1689 write_targets += read_targets-1;
1690
1691 if (write_targets == 0 || read_targets == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 /* There is nowhere to write, so all non-sync
1693 * drives must be failed - so we are finished
1694 */
NeilBrown57afd892005-06-21 17:17:13 -07001695 sector_t rv = max_sector - sector_nr;
1696 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 put_buf(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 return rv;
1699 }
1700
1701 nr_sectors = 0;
NeilBrown289e99e2005-06-21 17:17:24 -07001702 sync_blocks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 do {
1704 struct page *page;
1705 int len = PAGE_SIZE;
1706 if (sector_nr + (len>>9) > max_sector)
1707 len = (max_sector - sector_nr) << 9;
1708 if (len == 0)
1709 break;
NeilBrown6a806c52005-07-15 03:56:35 -07001710 if (sync_blocks == 0) {
1711 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
NeilBrowne5de485f2005-11-08 21:39:38 -08001712 &sync_blocks, still_degraded) &&
1713 !conf->fullsync &&
1714 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
NeilBrown6a806c52005-07-15 03:56:35 -07001715 break;
1716 if (sync_blocks < (PAGE_SIZE>>9))
1717 BUG();
1718 if (len > (sync_blocks<<9))
1719 len = sync_blocks<<9;
NeilBrownab7a30c2005-06-21 17:17:23 -07001720 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 for (i=0 ; i < conf->raid_disks; i++) {
1723 bio = r1_bio->bios[i];
1724 if (bio->bi_end_io) {
NeilBrownd11c1712006-01-06 00:20:26 -08001725 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 if (bio_add_page(bio, page, len, 0) == 0) {
1727 /* stop here */
NeilBrownd11c1712006-01-06 00:20:26 -08001728 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 while (i > 0) {
1730 i--;
1731 bio = r1_bio->bios[i];
NeilBrown6a806c52005-07-15 03:56:35 -07001732 if (bio->bi_end_io==NULL)
1733 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 /* remove last page from this bio */
1735 bio->bi_vcnt--;
1736 bio->bi_size -= len;
1737 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
1738 }
1739 goto bio_full;
1740 }
1741 }
1742 }
1743 nr_sectors += len>>9;
1744 sector_nr += len>>9;
NeilBrown191ea9b2005-06-21 17:17:23 -07001745 sync_blocks -= (len>>9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1747 bio_full:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 r1_bio->sectors = nr_sectors;
1749
NeilBrownd11c1712006-01-06 00:20:26 -08001750 /* For a user-requested sync, we read all readable devices and do a
1751 * compare
1752 */
1753 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1754 atomic_set(&r1_bio->remaining, read_targets);
1755 for (i=0; i<conf->raid_disks; i++) {
1756 bio = r1_bio->bios[i];
1757 if (bio->bi_end_io == end_sync_read) {
1758 md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
1759 generic_make_request(bio);
1760 }
1761 }
1762 } else {
1763 atomic_set(&r1_bio->remaining, 1);
1764 bio = r1_bio->bios[r1_bio->read_disk];
1765 md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
1766 nr_sectors);
1767 generic_make_request(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
NeilBrownd11c1712006-01-06 00:20:26 -08001769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 return nr_sectors;
1772}
1773
1774static int run(mddev_t *mddev)
1775{
1776 conf_t *conf;
1777 int i, j, disk_idx;
1778 mirror_info_t *disk;
1779 mdk_rdev_t *rdev;
1780 struct list_head *tmp;
1781
1782 if (mddev->level != 1) {
1783 printk("raid1: %s: raid level not set to mirroring (%d)\n",
1784 mdname(mddev), mddev->level);
1785 goto out;
1786 }
1787 /*
1788 * copy the already verified devices into our private RAID1
1789 * bookkeeping area. [whatever we allocate in run(),
1790 * should be freed in stop()]
1791 */
NeilBrown9ffae0c2006-01-06 00:20:32 -08001792 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 mddev->private = conf;
1794 if (!conf)
1795 goto out_no_mem;
1796
NeilBrown9ffae0c2006-01-06 00:20:32 -08001797 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 GFP_KERNEL);
1799 if (!conf->mirrors)
1800 goto out_no_mem;
1801
NeilBrownddaf22a2006-01-06 00:20:19 -08001802 conf->tmppage = alloc_page(GFP_KERNEL);
1803 if (!conf->tmppage)
1804 goto out_no_mem;
1805
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1807 if (!conf->poolinfo)
1808 goto out_no_mem;
1809 conf->poolinfo->mddev = mddev;
1810 conf->poolinfo->raid_disks = mddev->raid_disks;
1811 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1812 r1bio_pool_free,
1813 conf->poolinfo);
1814 if (!conf->r1bio_pool)
1815 goto out_no_mem;
1816
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 ITERATE_RDEV(mddev, rdev, tmp) {
1818 disk_idx = rdev->raid_disk;
1819 if (disk_idx >= mddev->raid_disks
1820 || disk_idx < 0)
1821 continue;
1822 disk = conf->mirrors + disk_idx;
1823
1824 disk->rdev = rdev;
1825
1826 blk_queue_stack_limits(mddev->queue,
1827 rdev->bdev->bd_disk->queue);
1828 /* as we don't honour merge_bvec_fn, we must never risk
1829 * violating it, so limit ->max_sector to one PAGE, as
1830 * a one page request is never in violation.
1831 */
1832 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1833 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1834 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1835
1836 disk->head_position = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08001837 if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 conf->working_disks++;
1839 }
1840 conf->raid_disks = mddev->raid_disks;
1841 conf->mddev = mddev;
1842 spin_lock_init(&conf->device_lock);
1843 INIT_LIST_HEAD(&conf->retry_list);
1844 if (conf->working_disks == 1)
1845 mddev->recovery_cp = MaxSector;
1846
1847 spin_lock_init(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -08001848 init_waitqueue_head(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
NeilBrown191ea9b2005-06-21 17:17:23 -07001850 bio_list_init(&conf->pending_bio_list);
1851 bio_list_init(&conf->flushing_bio_list);
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 if (!conf->working_disks) {
1854 printk(KERN_ERR "raid1: no operational mirrors for %s\n",
1855 mdname(mddev));
1856 goto out_free_conf;
1857 }
1858
1859 mddev->degraded = 0;
1860 for (i = 0; i < conf->raid_disks; i++) {
1861
1862 disk = conf->mirrors + i;
1863
1864 if (!disk->rdev) {
1865 disk->head_position = 0;
1866 mddev->degraded++;
1867 }
1868 }
1869
1870 /*
1871 * find the first working one and use it as a starting point
1872 * to read balancing.
1873 */
1874 for (j = 0; j < conf->raid_disks &&
1875 (!conf->mirrors[j].rdev ||
NeilBrownb2d444d2005-11-08 21:39:31 -08001876 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 /* nothing */;
1878 conf->last_used = j;
1879
1880
NeilBrown191ea9b2005-06-21 17:17:23 -07001881 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
1882 if (!mddev->thread) {
1883 printk(KERN_ERR
1884 "raid1: couldn't allocate thread for %s\n",
1885 mdname(mddev));
1886 goto out_free_conf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 printk(KERN_INFO
1890 "raid1: raid set %s active with %d out of %d mirrors\n",
1891 mdname(mddev), mddev->raid_disks - mddev->degraded,
1892 mddev->raid_disks);
1893 /*
1894 * Ok, everything is just fine now
1895 */
1896 mddev->array_size = mddev->size;
1897
NeilBrown7a5febe2005-05-16 21:53:16 -07001898 mddev->queue->unplug_fn = raid1_unplug;
1899 mddev->queue->issue_flush_fn = raid1_issue_flush;
1900
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 return 0;
1902
1903out_no_mem:
1904 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
1905 mdname(mddev));
1906
1907out_free_conf:
1908 if (conf) {
1909 if (conf->r1bio_pool)
1910 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001911 kfree(conf->mirrors);
NeilBrown1345b1d2006-01-06 00:20:40 -08001912 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001913 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 kfree(conf);
1915 mddev->private = NULL;
1916 }
1917out:
1918 return -EIO;
1919}
1920
1921static int stop(mddev_t *mddev)
1922{
1923 conf_t *conf = mddev_to_conf(mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -07001924 struct bitmap *bitmap = mddev->bitmap;
1925 int behind_wait = 0;
1926
1927 /* wait for behind writes to complete */
1928 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1929 behind_wait++;
1930 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
1931 set_current_state(TASK_UNINTERRUPTIBLE);
1932 schedule_timeout(HZ); /* wait a second */
1933 /* need to kick something here to make sure I/O goes? */
1934 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
1936 md_unregister_thread(mddev->thread);
1937 mddev->thread = NULL;
1938 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1939 if (conf->r1bio_pool)
1940 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001941 kfree(conf->mirrors);
1942 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 kfree(conf);
1944 mddev->private = NULL;
1945 return 0;
1946}
1947
1948static int raid1_resize(mddev_t *mddev, sector_t sectors)
1949{
1950 /* no resync is happening, and there is enough space
1951 * on all devices, so we can resize.
1952 * We need to make sure resync covers any new space.
1953 * If the array is shrinking we should possibly wait until
1954 * any io in the removed space completes, but it hardly seems
1955 * worth it.
1956 */
1957 mddev->array_size = sectors>>1;
1958 set_capacity(mddev->gendisk, mddev->array_size << 1);
1959 mddev->changed = 1;
1960 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
1961 mddev->recovery_cp = mddev->size << 1;
1962 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1963 }
1964 mddev->size = mddev->array_size;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07001965 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 return 0;
1967}
1968
1969static int raid1_reshape(mddev_t *mddev, int raid_disks)
1970{
1971 /* We need to:
1972 * 1/ resize the r1bio_pool
1973 * 2/ resize conf->mirrors
1974 *
1975 * We allocate a new r1bio_pool if we can.
1976 * Then raise a device barrier and wait until all IO stops.
1977 * Then resize conf->mirrors and swap in the new r1bio pool.
NeilBrown6ea9c072005-06-21 17:17:09 -07001978 *
1979 * At the same time, we "pack" the devices so that all the missing
1980 * devices have the higher raid_disk numbers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 */
1982 mempool_t *newpool, *oldpool;
1983 struct pool_info *newpoolinfo;
1984 mirror_info_t *newmirrors;
1985 conf_t *conf = mddev_to_conf(mddev);
NeilBrown6ea9c072005-06-21 17:17:09 -07001986 int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
NeilBrown6ea9c072005-06-21 17:17:09 -07001988 int d, d2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
NeilBrown6ea9c072005-06-21 17:17:09 -07001990 if (raid_disks < conf->raid_disks) {
1991 cnt=0;
1992 for (d= 0; d < conf->raid_disks; d++)
1993 if (conf->mirrors[d].rdev)
1994 cnt++;
1995 if (cnt > raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 return -EBUSY;
NeilBrown6ea9c072005-06-21 17:17:09 -07001997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2000 if (!newpoolinfo)
2001 return -ENOMEM;
2002 newpoolinfo->mddev = mddev;
2003 newpoolinfo->raid_disks = raid_disks;
2004
2005 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2006 r1bio_pool_free, newpoolinfo);
2007 if (!newpool) {
2008 kfree(newpoolinfo);
2009 return -ENOMEM;
2010 }
NeilBrown9ffae0c2006-01-06 00:20:32 -08002011 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 if (!newmirrors) {
2013 kfree(newpoolinfo);
2014 mempool_destroy(newpool);
2015 return -ENOMEM;
2016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
NeilBrown17999be2006-01-06 00:20:12 -08002018 raise_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 /* ok, everything is stopped */
2021 oldpool = conf->r1bio_pool;
2022 conf->r1bio_pool = newpool;
NeilBrown6ea9c072005-06-21 17:17:09 -07002023
2024 for (d=d2=0; d < conf->raid_disks; d++)
2025 if (conf->mirrors[d].rdev) {
2026 conf->mirrors[d].rdev->raid_disk = d2;
2027 newmirrors[d2++].rdev = conf->mirrors[d].rdev;
2028 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 kfree(conf->mirrors);
2030 conf->mirrors = newmirrors;
2031 kfree(conf->poolinfo);
2032 conf->poolinfo = newpoolinfo;
2033
2034 mddev->degraded += (raid_disks - conf->raid_disks);
2035 conf->raid_disks = mddev->raid_disks = raid_disks;
2036
NeilBrown6ea9c072005-06-21 17:17:09 -07002037 conf->last_used = 0; /* just make sure it is in-range */
NeilBrown17999be2006-01-06 00:20:12 -08002038 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
2040 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2041 md_wakeup_thread(mddev->thread);
2042
2043 mempool_destroy(oldpool);
2044 return 0;
2045}
2046
NeilBrown500af872005-09-09 16:23:58 -07002047static void raid1_quiesce(mddev_t *mddev, int state)
NeilBrown36fa3062005-09-09 16:23:45 -07002048{
2049 conf_t *conf = mddev_to_conf(mddev);
2050
2051 switch(state) {
NeilBrown9e6603d2005-09-09 16:23:48 -07002052 case 1:
NeilBrown17999be2006-01-06 00:20:12 -08002053 raise_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002054 break;
NeilBrown9e6603d2005-09-09 16:23:48 -07002055 case 0:
NeilBrown17999be2006-01-06 00:20:12 -08002056 lower_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002057 break;
2058 }
NeilBrown36fa3062005-09-09 16:23:45 -07002059}
2060
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
NeilBrown2604b702006-01-06 00:20:36 -08002062static struct mdk_personality raid1_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063{
2064 .name = "raid1",
NeilBrown2604b702006-01-06 00:20:36 -08002065 .level = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 .owner = THIS_MODULE,
2067 .make_request = make_request,
2068 .run = run,
2069 .stop = stop,
2070 .status = status,
2071 .error_handler = error,
2072 .hot_add_disk = raid1_add_disk,
2073 .hot_remove_disk= raid1_remove_disk,
2074 .spare_active = raid1_spare_active,
2075 .sync_request = sync_request,
2076 .resize = raid1_resize,
2077 .reshape = raid1_reshape,
NeilBrown36fa3062005-09-09 16:23:45 -07002078 .quiesce = raid1_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079};
2080
2081static int __init raid_init(void)
2082{
NeilBrown2604b702006-01-06 00:20:36 -08002083 return register_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084}
2085
2086static void raid_exit(void)
2087{
NeilBrown2604b702006-01-06 00:20:36 -08002088 unregister_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089}
2090
2091module_init(raid_init);
2092module_exit(raid_exit);
2093MODULE_LICENSE("GPL");
2094MODULE_ALIAS("md-personality-3"); /* RAID1 */
NeilBrownd9d166c2006-01-06 00:20:51 -08002095MODULE_ALIAS("md-raid1");
NeilBrown2604b702006-01-06 00:20:36 -08002096MODULE_ALIAS("md-level-1");