blob: 5d88329e3c7a7e3bd3ff7240d358d2173e86a957 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
NeilBrown191ea9b2005-06-21 17:17:23 -070015 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
NeilBrown191ea9b2005-06-21 17:17:23 -070034#include "dm-bio-list.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/raid/raid1.h>
NeilBrown191ea9b2005-06-21 17:17:23 -070036#include <linux/raid/bitmap.h>
37
38#define DEBUG 0
39#if DEBUG
40#define PRINTK(x...) printk(x)
41#else
42#define PRINTK(x...)
43#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46 * Number of guaranteed r1bios in case of extreme VM load:
47 */
48#define NR_RAID1_BIOS 256
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51static void unplug_slaves(mddev_t *mddev);
52
NeilBrown17999be2006-01-06 00:20:12 -080053static void allow_barrier(conf_t *conf);
54static void lower_barrier(conf_t *conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Al Virodd0fc662005-10-07 07:46:04 +010056static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
58 struct pool_info *pi = data;
59 r1bio_t *r1_bio;
60 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
61
62 /* allocate a r1bio with room for raid_disks entries in the bios array */
NeilBrown9ffae0c2006-01-06 00:20:32 -080063 r1_bio = kzalloc(size, gfp_flags);
64 if (!r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 unplug_slaves(pi->mddev);
66
67 return r1_bio;
68}
69
70static void r1bio_pool_free(void *r1_bio, void *data)
71{
72 kfree(r1_bio);
73}
74
75#define RESYNC_BLOCK_SIZE (64*1024)
76//#define RESYNC_BLOCK_SIZE PAGE_SIZE
77#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
78#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
79#define RESYNC_WINDOW (2048*1024)
80
Al Virodd0fc662005-10-07 07:46:04 +010081static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 struct pool_info *pi = data;
84 struct page *page;
85 r1bio_t *r1_bio;
86 struct bio *bio;
87 int i, j;
88
89 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
90 if (!r1_bio) {
91 unplug_slaves(pi->mddev);
92 return NULL;
93 }
94
95 /*
96 * Allocate bios : 1 for reading, n-1 for writing
97 */
98 for (j = pi->raid_disks ; j-- ; ) {
99 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
100 if (!bio)
101 goto out_free_bio;
102 r1_bio->bios[j] = bio;
103 }
104 /*
105 * Allocate RESYNC_PAGES data pages and attach them to
NeilBrownd11c1712006-01-06 00:20:26 -0800106 * the first bio.
107 * If this is a user-requested check/repair, allocate
108 * RESYNC_PAGES for each bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 */
NeilBrownd11c1712006-01-06 00:20:26 -0800110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
111 j = pi->raid_disks;
112 else
113 j = 1;
114 while(j--) {
115 bio = r1_bio->bios[j];
116 for (i = 0; i < RESYNC_PAGES; i++) {
117 page = alloc_page(gfp_flags);
118 if (unlikely(!page))
119 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
NeilBrownd11c1712006-01-06 00:20:26 -0800121 bio->bi_io_vec[i].bv_page = page;
122 }
123 }
124 /* If not user-requests, copy the page pointers to all bios */
125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
126 for (i=0; i<RESYNC_PAGES ; i++)
127 for (j=1; j<pi->raid_disks; j++)
128 r1_bio->bios[j]->bi_io_vec[i].bv_page =
129 r1_bio->bios[0]->bi_io_vec[i].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131
132 r1_bio->master_bio = NULL;
133
134 return r1_bio;
135
136out_free_pages:
NeilBrownd11c1712006-01-06 00:20:26 -0800137 for (i=0; i < RESYNC_PAGES ; i++)
138 for (j=0 ; j < pi->raid_disks; j++)
NeilBrown1345b1d2006-01-06 00:20:40 -0800139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800140 j = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141out_free_bio:
142 while ( ++j < pi->raid_disks )
143 bio_put(r1_bio->bios[j]);
144 r1bio_pool_free(r1_bio, data);
145 return NULL;
146}
147
148static void r1buf_pool_free(void *__r1_bio, void *data)
149{
150 struct pool_info *pi = data;
NeilBrownd11c1712006-01-06 00:20:26 -0800151 int i,j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 r1bio_t *r1bio = __r1_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
NeilBrownd11c1712006-01-06 00:20:26 -0800154 for (i = 0; i < RESYNC_PAGES; i++)
155 for (j = pi->raid_disks; j-- ;) {
156 if (j == 0 ||
157 r1bio->bios[j]->bi_io_vec[i].bv_page !=
158 r1bio->bios[0]->bi_io_vec[i].bv_page)
NeilBrown1345b1d2006-01-06 00:20:40 -0800159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 for (i=0 ; i < pi->raid_disks; i++)
162 bio_put(r1bio->bios[i]);
163
164 r1bio_pool_free(r1bio, data);
165}
166
167static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
168{
169 int i;
170
171 for (i = 0; i < conf->raid_disks; i++) {
172 struct bio **bio = r1_bio->bios + i;
NeilBrowncf30a472006-01-06 00:20:23 -0800173 if (*bio && *bio != IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 bio_put(*bio);
175 *bio = NULL;
176 }
177}
178
Arjan van de Ven858119e2006-01-14 13:20:43 -0800179static void free_r1bio(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 conf_t *conf = mddev_to_conf(r1_bio->mddev);
182
183 /*
184 * Wake up any possible resync thread that waits for the device
185 * to go idle.
186 */
NeilBrown17999be2006-01-06 00:20:12 -0800187 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 put_all_bios(conf, r1_bio);
190 mempool_free(r1_bio, conf->r1bio_pool);
191}
192
Arjan van de Ven858119e2006-01-14 13:20:43 -0800193static void put_buf(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
195 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown3e198f72006-01-06 00:20:21 -0800196 int i;
197
198 for (i=0; i<conf->raid_disks; i++) {
199 struct bio *bio = r1_bio->bios[i];
200 if (bio->bi_end_io)
201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 mempool_free(r1_bio, conf->r1buf_pool);
205
NeilBrown17999be2006-01-06 00:20:12 -0800206 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209static void reschedule_retry(r1bio_t *r1_bio)
210{
211 unsigned long flags;
212 mddev_t *mddev = r1_bio->mddev;
213 conf_t *conf = mddev_to_conf(mddev);
214
215 spin_lock_irqsave(&conf->device_lock, flags);
216 list_add(&r1_bio->retry_list, &conf->retry_list);
NeilBrownddaf22a2006-01-06 00:20:19 -0800217 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 spin_unlock_irqrestore(&conf->device_lock, flags);
219
NeilBrown17999be2006-01-06 00:20:12 -0800220 wake_up(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 md_wakeup_thread(mddev->thread);
222}
223
224/*
225 * raid_end_bio_io() is called when we have finished servicing a mirrored
226 * operation and are ready to return a success/failure code to the buffer
227 * cache layer.
228 */
229static void raid_end_bio_io(r1bio_t *r1_bio)
230{
231 struct bio *bio = r1_bio->master_bio;
232
NeilBrown4b6d2872005-09-09 16:23:47 -0700233 /* if nobody has done the final endio yet, do it now */
234 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
235 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
236 (bio_data_dir(bio) == WRITE) ? "write" : "read",
237 (unsigned long long) bio->bi_sector,
238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1);
240
241 bio_endio(bio, bio->bi_size,
242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 free_r1bio(r1_bio);
245}
246
247/*
248 * Update disk head position estimator based on IRQ completion info.
249 */
250static inline void update_head_pos(int disk, r1bio_t *r1_bio)
251{
252 conf_t *conf = mddev_to_conf(r1_bio->mddev);
253
254 conf->mirrors[disk].head_position =
255 r1_bio->sector + (r1_bio->sectors);
256}
257
258static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264
265 if (bio->bi_size)
266 return 1;
267
268 mirror = r1_bio->read_disk;
269 /*
270 * this branch is our 'one mirror IO has finished' event handler:
271 */
NeilBrownddaf22a2006-01-06 00:20:19 -0800272 update_head_pos(mirror, r1_bio);
273
274 if (uptodate || conf->working_disks <= 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 /*
276 * Set R1BIO_Uptodate in our master bio, so that
277 * we will return a good error code for to the higher
278 * levels even if IO on some other mirrored buffer fails.
279 *
280 * The 'master' represents the composite IO operation to
281 * user-side. So if something waits for IO, then it will
282 * wait for the 'master' bio.
283 */
NeilBrown220946c2006-01-06 00:20:27 -0800284 if (uptodate)
285 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 raid_end_bio_io(r1_bio);
NeilBrownddaf22a2006-01-06 00:20:19 -0800288 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 /*
290 * oops, read error:
291 */
292 char b[BDEVNAME_SIZE];
293 if (printk_ratelimit())
294 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
295 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
296 reschedule_retry(r1_bio);
297 }
298
299 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
300 return 0;
301}
302
303static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
304{
305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrowna9701a32005-11-08 21:39:34 -0800307 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown04b857f2006-03-09 17:33:46 -0800309 struct bio *to_put = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311 if (bio->bi_size)
312 return 1;
313
314 for (mirror = 0; mirror < conf->raid_disks; mirror++)
315 if (r1_bio->bios[mirror] == bio)
316 break;
317
NeilBrowna9701a32005-11-08 21:39:34 -0800318 if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
319 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
320 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
321 r1_bio->mddev->barriers_work = 0;
322 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 /*
NeilBrowna9701a32005-11-08 21:39:34 -0800324 * this branch is our 'one mirror IO has finished' event handler:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 */
NeilBrowna9701a32005-11-08 21:39:34 -0800326 r1_bio->bios[mirror] = NULL;
NeilBrown04b857f2006-03-09 17:33:46 -0800327 to_put = bio;
NeilBrowna9701a32005-11-08 21:39:34 -0800328 if (!uptodate) {
329 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
330 /* an I/O failed, we can't clear the bitmap */
331 set_bit(R1BIO_Degraded, &r1_bio->state);
332 } else
333 /*
334 * Set R1BIO_Uptodate in our master bio, so that
335 * we will return a good error code for to the higher
336 * levels even if IO on some other mirrored buffer fails.
337 *
338 * The 'master' represents the composite IO operation to
339 * user-side. So if something waits for IO, then it will
340 * wait for the 'master' bio.
341 */
342 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
NeilBrowna9701a32005-11-08 21:39:34 -0800344 update_head_pos(mirror, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
NeilBrowna9701a32005-11-08 21:39:34 -0800346 if (behind) {
347 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
348 atomic_dec(&r1_bio->behind_remaining);
NeilBrown4b6d2872005-09-09 16:23:47 -0700349
NeilBrowna9701a32005-11-08 21:39:34 -0800350 /* In behind mode, we ACK the master bio once the I/O has safely
351 * reached all non-writemostly disks. Setting the Returned bit
352 * ensures that this gets done only once -- we don't ever want to
353 * return -EIO here, instead we'll wait */
NeilBrown4b6d2872005-09-09 16:23:47 -0700354
NeilBrowna9701a32005-11-08 21:39:34 -0800355 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
356 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
357 /* Maybe we can return now */
358 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
359 struct bio *mbio = r1_bio->master_bio;
360 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
361 (unsigned long long) mbio->bi_sector,
362 (unsigned long long) mbio->bi_sector +
363 (mbio->bi_size >> 9) - 1);
364 bio_endio(mbio, mbio->bi_size, 0);
365 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700366 }
367 }
368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /*
370 *
371 * Let's see if all mirrored write operations have finished
372 * already.
373 */
374 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrowna9701a32005-11-08 21:39:34 -0800375 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
376 reschedule_retry(r1_bio);
377 /* Don't dec_pending yet, we want to hold
378 * the reference over the retry
379 */
NeilBrown04b857f2006-03-09 17:33:46 -0800380 goto out;
NeilBrowna9701a32005-11-08 21:39:34 -0800381 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700382 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
383 /* free extra copy of the data pages */
384 int i = bio->bi_vcnt;
385 while (i--)
NeilBrown1345b1d2006-01-06 00:20:40 -0800386 safe_put_page(bio->bi_io_vec[i].bv_page);
NeilBrown4b6d2872005-09-09 16:23:47 -0700387 }
NeilBrown191ea9b2005-06-21 17:17:23 -0700388 /* clear the bitmap if all writes complete successfully */
389 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
390 r1_bio->sectors,
NeilBrown4b6d2872005-09-09 16:23:47 -0700391 !test_bit(R1BIO_Degraded, &r1_bio->state),
392 behind);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 md_write_end(r1_bio->mddev);
394 raid_end_bio_io(r1_bio);
395 }
396
397 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
NeilBrown04b857f2006-03-09 17:33:46 -0800398 out:
399 if (to_put)
400 bio_put(to_put);
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 return 0;
403}
404
405
406/*
407 * This routine returns the disk from which the requested read should
408 * be done. There is a per-array 'next expected sequential IO' sector
409 * number - if this matches on the next IO then we use the last disk.
410 * There is also a per-disk 'last know head position' sector that is
411 * maintained from IRQ contexts, both the normal and the resync IO
412 * completion handlers update this position correctly. If there is no
413 * perfect sequential match then we pick the disk whose head is closest.
414 *
415 * If there are 2 mirrors in the same 2 devices, performance degrades
416 * because position is mirror, not device based.
417 *
418 * The rdev for the device selected will have nr_pending incremented.
419 */
420static int read_balance(conf_t *conf, r1bio_t *r1_bio)
421{
422 const unsigned long this_sector = r1_bio->sector;
423 int new_disk = conf->last_used, disk = new_disk;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700424 int wonly_disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 const int sectors = r1_bio->sectors;
426 sector_t new_distance, current_distance;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700427 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
429 rcu_read_lock();
430 /*
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700431 * Check if we can balance. We can balance on the whole
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 * device if no resync is going on, or below the resync window.
433 * We take the first readable disk when above the resync window.
434 */
435 retry:
436 if (conf->mddev->recovery_cp < MaxSector &&
437 (this_sector + sectors >= conf->next_resync)) {
438 /* Choose the first operation device, for consistancy */
439 new_disk = 0;
440
Suzanne Woodd6065f72005-11-08 21:39:27 -0800441 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800442 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800443 !rdev || !test_bit(In_sync, &rdev->flags)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700444 || test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800445 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700446
NeilBrowncf30a472006-01-06 00:20:23 -0800447 if (rdev && test_bit(In_sync, &rdev->flags) &&
448 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700449 wonly_disk = new_disk;
450
451 if (new_disk == conf->raid_disks - 1) {
452 new_disk = wonly_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 break;
454 }
455 }
456 goto rb_out;
457 }
458
459
460 /* make sure the disk is operational */
Suzanne Woodd6065f72005-11-08 21:39:27 -0800461 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800462 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800463 !rdev || !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700464 test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800465 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700466
NeilBrowncf30a472006-01-06 00:20:23 -0800467 if (rdev && test_bit(In_sync, &rdev->flags) &&
468 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700469 wonly_disk = new_disk;
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 if (new_disk <= 0)
472 new_disk = conf->raid_disks;
473 new_disk--;
474 if (new_disk == disk) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700475 new_disk = wonly_disk;
476 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 }
478 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700479
480 if (new_disk < 0)
481 goto rb_out;
482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 disk = new_disk;
484 /* now disk == new_disk == starting point for search */
485
486 /*
487 * Don't change to another disk for sequential reads:
488 */
489 if (conf->next_seq_sect == this_sector)
490 goto rb_out;
491 if (this_sector == conf->mirrors[new_disk].head_position)
492 goto rb_out;
493
494 current_distance = abs(this_sector - conf->mirrors[disk].head_position);
495
496 /* Find the disk whose head is closest */
497
498 do {
499 if (disk <= 0)
500 disk = conf->raid_disks;
501 disk--;
502
Suzanne Woodd6065f72005-11-08 21:39:27 -0800503 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700504
NeilBrowncf30a472006-01-06 00:20:23 -0800505 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800506 !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700507 test_bit(WriteMostly, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 continue;
509
510 if (!atomic_read(&rdev->nr_pending)) {
511 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 break;
513 }
514 new_distance = abs(this_sector - conf->mirrors[disk].head_position);
515 if (new_distance < current_distance) {
516 current_distance = new_distance;
517 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
519 } while (disk != conf->last_used);
520
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700521 rb_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
523
524 if (new_disk >= 0) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800525 rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700526 if (!rdev)
527 goto retry;
528 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800529 if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 /* cannot risk returning a device that failed
531 * before we inc'ed nr_pending
532 */
NeilBrown03c902e2006-01-06 00:20:46 -0800533 rdev_dec_pending(rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 goto retry;
535 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700536 conf->next_seq_sect = this_sector + sectors;
537 conf->last_used = new_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 }
539 rcu_read_unlock();
540
541 return new_disk;
542}
543
544static void unplug_slaves(mddev_t *mddev)
545{
546 conf_t *conf = mddev_to_conf(mddev);
547 int i;
548
549 rcu_read_lock();
550 for (i=0; i<mddev->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800551 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800552 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
554
555 atomic_inc(&rdev->nr_pending);
556 rcu_read_unlock();
557
558 if (r_queue->unplug_fn)
559 r_queue->unplug_fn(r_queue);
560
561 rdev_dec_pending(rdev, mddev);
562 rcu_read_lock();
563 }
564 }
565 rcu_read_unlock();
566}
567
568static void raid1_unplug(request_queue_t *q)
569{
NeilBrown191ea9b2005-06-21 17:17:23 -0700570 mddev_t *mddev = q->queuedata;
571
572 unplug_slaves(mddev);
573 md_wakeup_thread(mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574}
575
576static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
577 sector_t *error_sector)
578{
579 mddev_t *mddev = q->queuedata;
580 conf_t *conf = mddev_to_conf(mddev);
581 int i, ret = 0;
582
583 rcu_read_lock();
584 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800585 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800586 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 struct block_device *bdev = rdev->bdev;
588 request_queue_t *r_queue = bdev_get_queue(bdev);
589
590 if (!r_queue->issue_flush_fn)
591 ret = -EOPNOTSUPP;
592 else {
593 atomic_inc(&rdev->nr_pending);
594 rcu_read_unlock();
595 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
596 error_sector);
597 rdev_dec_pending(rdev, mddev);
598 rcu_read_lock();
599 }
600 }
601 }
602 rcu_read_unlock();
603 return ret;
604}
605
NeilBrown17999be2006-01-06 00:20:12 -0800606/* Barriers....
607 * Sometimes we need to suspend IO while we do something else,
608 * either some resync/recovery, or reconfigure the array.
609 * To do this we raise a 'barrier'.
610 * The 'barrier' is a counter that can be raised multiple times
611 * to count how many activities are happening which preclude
612 * normal IO.
613 * We can only raise the barrier if there is no pending IO.
614 * i.e. if nr_pending == 0.
615 * We choose only to raise the barrier if no-one is waiting for the
616 * barrier to go down. This means that as soon as an IO request
617 * is ready, no other operations which require a barrier will start
618 * until the IO request has had a chance.
619 *
620 * So: regular IO calls 'wait_barrier'. When that returns there
621 * is no backgroup IO happening, It must arrange to call
622 * allow_barrier when it has finished its IO.
623 * backgroup IO calls must call raise_barrier. Once that returns
624 * there is no normal IO happeing. It must arrange to call
625 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 */
627#define RESYNC_DEPTH 32
628
NeilBrown17999be2006-01-06 00:20:12 -0800629static void raise_barrier(conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
631 spin_lock_irq(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -0800632
633 /* Wait until no block IO is waiting */
634 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
635 conf->resync_lock,
636 raid1_unplug(conf->mddev->queue));
637
638 /* block any new IO from starting */
639 conf->barrier++;
640
641 /* No wait for all pending IO to complete */
642 wait_event_lock_irq(conf->wait_barrier,
643 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
644 conf->resync_lock,
645 raid1_unplug(conf->mddev->queue));
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 spin_unlock_irq(&conf->resync_lock);
648}
649
NeilBrown17999be2006-01-06 00:20:12 -0800650static void lower_barrier(conf_t *conf)
651{
652 unsigned long flags;
653 spin_lock_irqsave(&conf->resync_lock, flags);
654 conf->barrier--;
655 spin_unlock_irqrestore(&conf->resync_lock, flags);
656 wake_up(&conf->wait_barrier);
657}
658
659static void wait_barrier(conf_t *conf)
660{
661 spin_lock_irq(&conf->resync_lock);
662 if (conf->barrier) {
663 conf->nr_waiting++;
664 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
665 conf->resync_lock,
666 raid1_unplug(conf->mddev->queue));
667 conf->nr_waiting--;
668 }
669 conf->nr_pending++;
670 spin_unlock_irq(&conf->resync_lock);
671}
672
673static void allow_barrier(conf_t *conf)
674{
675 unsigned long flags;
676 spin_lock_irqsave(&conf->resync_lock, flags);
677 conf->nr_pending--;
678 spin_unlock_irqrestore(&conf->resync_lock, flags);
679 wake_up(&conf->wait_barrier);
680}
681
NeilBrownddaf22a2006-01-06 00:20:19 -0800682static void freeze_array(conf_t *conf)
683{
684 /* stop syncio and normal IO and wait for everything to
685 * go quite.
686 * We increment barrier and nr_waiting, and then
687 * wait until barrier+nr_pending match nr_queued+2
688 */
689 spin_lock_irq(&conf->resync_lock);
690 conf->barrier++;
691 conf->nr_waiting++;
692 wait_event_lock_irq(conf->wait_barrier,
693 conf->barrier+conf->nr_pending == conf->nr_queued+2,
694 conf->resync_lock,
695 raid1_unplug(conf->mddev->queue));
696 spin_unlock_irq(&conf->resync_lock);
697}
698static void unfreeze_array(conf_t *conf)
699{
700 /* reverse the effect of the freeze */
701 spin_lock_irq(&conf->resync_lock);
702 conf->barrier--;
703 conf->nr_waiting--;
704 wake_up(&conf->wait_barrier);
705 spin_unlock_irq(&conf->resync_lock);
706}
707
NeilBrown17999be2006-01-06 00:20:12 -0800708
NeilBrown4b6d2872005-09-09 16:23:47 -0700709/* duplicate the data pages for behind I/O */
710static struct page **alloc_behind_pages(struct bio *bio)
711{
712 int i;
713 struct bio_vec *bvec;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800714 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
NeilBrown4b6d2872005-09-09 16:23:47 -0700715 GFP_NOIO);
716 if (unlikely(!pages))
717 goto do_sync_io;
718
NeilBrown4b6d2872005-09-09 16:23:47 -0700719 bio_for_each_segment(bvec, bio, i) {
720 pages[i] = alloc_page(GFP_NOIO);
721 if (unlikely(!pages[i]))
722 goto do_sync_io;
723 memcpy(kmap(pages[i]) + bvec->bv_offset,
724 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
725 kunmap(pages[i]);
726 kunmap(bvec->bv_page);
727 }
728
729 return pages;
730
731do_sync_io:
732 if (pages)
733 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
NeilBrown2d1f3b52006-01-06 00:20:31 -0800734 put_page(pages[i]);
NeilBrown4b6d2872005-09-09 16:23:47 -0700735 kfree(pages);
736 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
737 return NULL;
738}
739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740static int make_request(request_queue_t *q, struct bio * bio)
741{
742 mddev_t *mddev = q->queuedata;
743 conf_t *conf = mddev_to_conf(mddev);
744 mirror_info_t *mirror;
745 r1bio_t *r1_bio;
746 struct bio *read_bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700747 int i, targets = 0, disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 mdk_rdev_t *rdev;
NeilBrown191ea9b2005-06-21 17:17:23 -0700749 struct bitmap *bitmap = mddev->bitmap;
750 unsigned long flags;
751 struct bio_list bl;
NeilBrown4b6d2872005-09-09 16:23:47 -0700752 struct page **behind_pages = NULL;
Jens Axboea3623572005-11-01 09:26:16 +0100753 const int rw = bio_data_dir(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800754 int do_barriers;
NeilBrown191ea9b2005-06-21 17:17:23 -0700755
NeilBrowna9701a32005-11-08 21:39:34 -0800756 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
NeilBrowne5dcdd82005-09-09 16:23:41 -0700757 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
758 return 0;
759 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761 /*
762 * Register the new request and wait if the reconstruction
763 * thread has put up a bar for new requests.
764 * Continue immediately if no resync is active currently.
765 */
NeilBrown3d310eb2005-06-21 17:17:26 -0700766 md_write_start(mddev, bio); /* wait on superblock update early */
767
NeilBrown17999be2006-01-06 00:20:12 -0800768 wait_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Jens Axboea3623572005-11-01 09:26:16 +0100770 disk_stat_inc(mddev->gendisk, ios[rw]);
771 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
773 /*
774 * make_request() can abort the operation when READA is being
775 * used and no empty request is available.
776 *
777 */
778 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
779
780 r1_bio->master_bio = bio;
781 r1_bio->sectors = bio->bi_size >> 9;
NeilBrown191ea9b2005-06-21 17:17:23 -0700782 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 r1_bio->mddev = mddev;
784 r1_bio->sector = bio->bi_sector;
785
Jens Axboea3623572005-11-01 09:26:16 +0100786 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 /*
788 * read balancing logic:
789 */
790 int rdisk = read_balance(conf, r1_bio);
791
792 if (rdisk < 0) {
793 /* couldn't find anywhere to read from */
794 raid_end_bio_io(r1_bio);
795 return 0;
796 }
797 mirror = conf->mirrors + rdisk;
798
799 r1_bio->read_disk = rdisk;
800
801 read_bio = bio_clone(bio, GFP_NOIO);
802
803 r1_bio->bios[rdisk] = read_bio;
804
805 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
806 read_bio->bi_bdev = mirror->rdev->bdev;
807 read_bio->bi_end_io = raid1_end_read_request;
808 read_bio->bi_rw = READ;
809 read_bio->bi_private = r1_bio;
810
811 generic_make_request(read_bio);
812 return 0;
813 }
814
815 /*
816 * WRITE:
817 */
818 /* first select target devices under spinlock and
819 * inc refcount on their rdev. Record them by setting
820 * bios[x] to bio
821 */
822 disks = conf->raid_disks;
NeilBrown191ea9b2005-06-21 17:17:23 -0700823#if 0
824 { static int first=1;
825 if (first) printk("First Write sector %llu disks %d\n",
826 (unsigned long long)r1_bio->sector, disks);
827 first = 0;
828 }
829#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 rcu_read_lock();
831 for (i = 0; i < disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800832 if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
NeilBrownb2d444d2005-11-08 21:39:31 -0800833 !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800835 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown03c902e2006-01-06 00:20:46 -0800836 rdev_dec_pending(rdev, mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 r1_bio->bios[i] = NULL;
838 } else
839 r1_bio->bios[i] = bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700840 targets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 } else
842 r1_bio->bios[i] = NULL;
843 }
844 rcu_read_unlock();
845
NeilBrown4b6d2872005-09-09 16:23:47 -0700846 BUG_ON(targets == 0); /* we never fail the last device */
847
NeilBrown191ea9b2005-06-21 17:17:23 -0700848 if (targets < conf->raid_disks) {
849 /* array is degraded, we will not clear the bitmap
850 * on I/O completion (see raid1_end_write_request) */
851 set_bit(R1BIO_Degraded, &r1_bio->state);
852 }
NeilBrown06d91a52005-06-21 17:17:12 -0700853
NeilBrown4b6d2872005-09-09 16:23:47 -0700854 /* do behind I/O ? */
855 if (bitmap &&
856 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
857 (behind_pages = alloc_behind_pages(bio)) != NULL)
858 set_bit(R1BIO_BehindIO, &r1_bio->state);
859
NeilBrown191ea9b2005-06-21 17:17:23 -0700860 atomic_set(&r1_bio->remaining, 0);
NeilBrown4b6d2872005-09-09 16:23:47 -0700861 atomic_set(&r1_bio->behind_remaining, 0);
NeilBrown191ea9b2005-06-21 17:17:23 -0700862
NeilBrown04b857f2006-03-09 17:33:46 -0800863 do_barriers = bio_barrier(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800864 if (do_barriers)
865 set_bit(R1BIO_Barrier, &r1_bio->state);
866
NeilBrown191ea9b2005-06-21 17:17:23 -0700867 bio_list_init(&bl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 for (i = 0; i < disks; i++) {
869 struct bio *mbio;
870 if (!r1_bio->bios[i])
871 continue;
872
873 mbio = bio_clone(bio, GFP_NOIO);
874 r1_bio->bios[i] = mbio;
875
876 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
877 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
878 mbio->bi_end_io = raid1_end_write_request;
NeilBrowna9701a32005-11-08 21:39:34 -0800879 mbio->bi_rw = WRITE | do_barriers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 mbio->bi_private = r1_bio;
881
NeilBrown4b6d2872005-09-09 16:23:47 -0700882 if (behind_pages) {
883 struct bio_vec *bvec;
884 int j;
885
886 /* Yes, I really want the '__' version so that
887 * we clear any unused pointer in the io_vec, rather
888 * than leave them unchanged. This is important
889 * because when we come to free the pages, we won't
890 * know the originial bi_idx, so we just free
891 * them all
892 */
893 __bio_for_each_segment(bvec, mbio, j, 0)
894 bvec->bv_page = behind_pages[j];
895 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
896 atomic_inc(&r1_bio->behind_remaining);
897 }
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 atomic_inc(&r1_bio->remaining);
NeilBrown191ea9b2005-06-21 17:17:23 -0700900
901 bio_list_add(&bl, mbio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700903 kfree(behind_pages); /* the behind pages are attached to the bios now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
NeilBrown4b6d2872005-09-09 16:23:47 -0700905 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
906 test_bit(R1BIO_BehindIO, &r1_bio->state));
NeilBrown191ea9b2005-06-21 17:17:23 -0700907 spin_lock_irqsave(&conf->device_lock, flags);
908 bio_list_merge(&conf->pending_bio_list, &bl);
909 bio_list_init(&bl);
910
911 blk_plug_device(mddev->queue);
912 spin_unlock_irqrestore(&conf->device_lock, flags);
913
914#if 0
915 while ((bio = bio_list_pop(&bl)) != NULL)
916 generic_make_request(bio);
917#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
919 return 0;
920}
921
922static void status(struct seq_file *seq, mddev_t *mddev)
923{
924 conf_t *conf = mddev_to_conf(mddev);
925 int i;
926
927 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
928 conf->working_disks);
929 for (i = 0; i < conf->raid_disks; i++)
930 seq_printf(seq, "%s",
931 conf->mirrors[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -0800932 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 seq_printf(seq, "]");
934}
935
936
937static void error(mddev_t *mddev, mdk_rdev_t *rdev)
938{
939 char b[BDEVNAME_SIZE];
940 conf_t *conf = mddev_to_conf(mddev);
941
942 /*
943 * If it is not operational, then we have already marked it as dead
944 * else if it is the last working disks, ignore the error, let the
945 * next level up know.
946 * else mark the drive as failed
947 */
NeilBrownb2d444d2005-11-08 21:39:31 -0800948 if (test_bit(In_sync, &rdev->flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 && conf->working_disks == 1)
950 /*
951 * Don't fail the drive, act as though we were just a
952 * normal single drive
953 */
954 return;
NeilBrownb2d444d2005-11-08 21:39:31 -0800955 if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 mddev->degraded++;
957 conf->working_disks--;
958 /*
959 * if recovery is running, make sure it aborts.
960 */
961 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
962 }
NeilBrownb2d444d2005-11-08 21:39:31 -0800963 clear_bit(In_sync, &rdev->flags);
964 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 mddev->sb_dirty = 1;
966 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
967 " Operation continuing on %d devices\n",
968 bdevname(rdev->bdev,b), conf->working_disks);
969}
970
971static void print_conf(conf_t *conf)
972{
973 int i;
974 mirror_info_t *tmp;
975
976 printk("RAID1 conf printout:\n");
977 if (!conf) {
978 printk("(!conf)\n");
979 return;
980 }
981 printk(" --- wd:%d rd:%d\n", conf->working_disks,
982 conf->raid_disks);
983
984 for (i = 0; i < conf->raid_disks; i++) {
985 char b[BDEVNAME_SIZE];
986 tmp = conf->mirrors + i;
987 if (tmp->rdev)
988 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownb2d444d2005-11-08 21:39:31 -0800989 i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 bdevname(tmp->rdev->bdev,b));
991 }
992}
993
994static void close_sync(conf_t *conf)
995{
NeilBrown17999be2006-01-06 00:20:12 -0800996 wait_barrier(conf);
997 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
999 mempool_destroy(conf->r1buf_pool);
1000 conf->r1buf_pool = NULL;
1001}
1002
1003static int raid1_spare_active(mddev_t *mddev)
1004{
1005 int i;
1006 conf_t *conf = mddev->private;
1007 mirror_info_t *tmp;
1008
1009 /*
1010 * Find all failed disks within the RAID1 configuration
1011 * and mark them readable
1012 */
1013 for (i = 0; i < conf->raid_disks; i++) {
1014 tmp = conf->mirrors + i;
1015 if (tmp->rdev
NeilBrownb2d444d2005-11-08 21:39:31 -08001016 && !test_bit(Faulty, &tmp->rdev->flags)
1017 && !test_bit(In_sync, &tmp->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 conf->working_disks++;
1019 mddev->degraded--;
NeilBrownb2d444d2005-11-08 21:39:31 -08001020 set_bit(In_sync, &tmp->rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 }
1022 }
1023
1024 print_conf(conf);
1025 return 0;
1026}
1027
1028
1029static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1030{
1031 conf_t *conf = mddev->private;
1032 int found = 0;
NeilBrown41158c72005-06-21 17:17:25 -07001033 int mirror = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 mirror_info_t *p;
1035
1036 for (mirror=0; mirror < mddev->raid_disks; mirror++)
1037 if ( !(p=conf->mirrors+mirror)->rdev) {
1038
1039 blk_queue_stack_limits(mddev->queue,
1040 rdev->bdev->bd_disk->queue);
1041 /* as we don't honour merge_bvec_fn, we must never risk
1042 * violating it, so limit ->max_sector to one PAGE, as
1043 * a one page request is never in violation.
1044 */
1045 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1046 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1047 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1048
1049 p->head_position = 0;
1050 rdev->raid_disk = mirror;
1051 found = 1;
NeilBrown6aea114a2005-11-28 13:44:13 -08001052 /* As all devices are equivalent, we don't need a full recovery
1053 * if this was recently any drive of the array
1054 */
1055 if (rdev->saved_raid_disk < 0)
NeilBrown41158c72005-06-21 17:17:25 -07001056 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08001057 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 break;
1059 }
1060
1061 print_conf(conf);
1062 return found;
1063}
1064
1065static int raid1_remove_disk(mddev_t *mddev, int number)
1066{
1067 conf_t *conf = mddev->private;
1068 int err = 0;
1069 mdk_rdev_t *rdev;
1070 mirror_info_t *p = conf->mirrors+ number;
1071
1072 print_conf(conf);
1073 rdev = p->rdev;
1074 if (rdev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001075 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 atomic_read(&rdev->nr_pending)) {
1077 err = -EBUSY;
1078 goto abort;
1079 }
1080 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07001081 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 if (atomic_read(&rdev->nr_pending)) {
1083 /* lost the race, try later */
1084 err = -EBUSY;
1085 p->rdev = rdev;
1086 }
1087 }
1088abort:
1089
1090 print_conf(conf);
1091 return err;
1092}
1093
1094
1095static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1096{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrownd11c1712006-01-06 00:20:26 -08001098 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 if (bio->bi_size)
1101 return 1;
1102
NeilBrownd11c1712006-01-06 00:20:26 -08001103 for (i=r1_bio->mddev->raid_disks; i--; )
1104 if (r1_bio->bios[i] == bio)
1105 break;
1106 BUG_ON(i < 0);
1107 update_head_pos(i, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 /*
1109 * we have read a block, now it needs to be re-written,
1110 * or re-read if the read failed.
1111 * We don't do much here, just schedule handling by raid1d
1112 */
NeilBrown69382e82006-01-06 00:20:22 -08001113 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 set_bit(R1BIO_Uptodate, &r1_bio->state);
NeilBrownd11c1712006-01-06 00:20:26 -08001115
1116 if (atomic_dec_and_test(&r1_bio->remaining))
1117 reschedule_retry(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 return 0;
1119}
1120
1121static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1122{
1123 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1124 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1125 mddev_t *mddev = r1_bio->mddev;
1126 conf_t *conf = mddev_to_conf(mddev);
1127 int i;
1128 int mirror=0;
1129
1130 if (bio->bi_size)
1131 return 1;
1132
1133 for (i = 0; i < conf->raid_disks; i++)
1134 if (r1_bio->bios[i] == bio) {
1135 mirror = i;
1136 break;
1137 }
NeilBrowne3b97032005-08-04 12:53:34 -07001138 if (!uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 md_error(mddev, conf->mirrors[mirror].rdev);
NeilBrowne3b97032005-08-04 12:53:34 -07001140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 update_head_pos(mirror, r1_bio);
1142
1143 if (atomic_dec_and_test(&r1_bio->remaining)) {
1144 md_done_sync(mddev, r1_bio->sectors, uptodate);
1145 put_buf(r1_bio);
1146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 return 0;
1148}
1149
1150static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1151{
1152 conf_t *conf = mddev_to_conf(mddev);
1153 int i;
1154 int disks = conf->raid_disks;
1155 struct bio *bio, *wbio;
1156
1157 bio = r1_bio->bios[r1_bio->read_disk];
1158
NeilBrown69382e82006-01-06 00:20:22 -08001159
NeilBrownd11c1712006-01-06 00:20:26 -08001160 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1161 /* We have read all readable devices. If we haven't
1162 * got the block, then there is no hope left.
1163 * If we have, then we want to do a comparison
1164 * and skip the write if everything is the same.
1165 * If any blocks failed to read, then we need to
1166 * attempt an over-write
1167 */
1168 int primary;
1169 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1170 for (i=0; i<mddev->raid_disks; i++)
1171 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1172 md_error(mddev, conf->mirrors[i].rdev);
1173
1174 md_done_sync(mddev, r1_bio->sectors, 1);
1175 put_buf(r1_bio);
1176 return;
1177 }
1178 for (primary=0; primary<mddev->raid_disks; primary++)
1179 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1180 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1181 r1_bio->bios[primary]->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001182 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
NeilBrownd11c1712006-01-06 00:20:26 -08001183 break;
1184 }
1185 r1_bio->read_disk = primary;
1186 for (i=0; i<mddev->raid_disks; i++)
1187 if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
1188 test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
1189 int j;
1190 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1191 struct bio *pbio = r1_bio->bios[primary];
1192 struct bio *sbio = r1_bio->bios[i];
1193 for (j = vcnt; j-- ; )
1194 if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
1195 page_address(sbio->bi_io_vec[j].bv_page),
1196 PAGE_SIZE))
1197 break;
1198 if (j >= 0)
1199 mddev->resync_mismatches += r1_bio->sectors;
NeilBrown03c902e2006-01-06 00:20:46 -08001200 if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrownd11c1712006-01-06 00:20:26 -08001201 sbio->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001202 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1203 } else {
NeilBrownd11c1712006-01-06 00:20:26 -08001204 /* fixup the bio for reuse */
1205 sbio->bi_vcnt = vcnt;
1206 sbio->bi_size = r1_bio->sectors << 9;
1207 sbio->bi_idx = 0;
1208 sbio->bi_phys_segments = 0;
1209 sbio->bi_hw_segments = 0;
1210 sbio->bi_hw_front_size = 0;
1211 sbio->bi_hw_back_size = 0;
1212 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1213 sbio->bi_flags |= 1 << BIO_UPTODATE;
1214 sbio->bi_next = NULL;
1215 sbio->bi_sector = r1_bio->sector +
1216 conf->mirrors[i].rdev->data_offset;
1217 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1218 }
1219 }
1220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
NeilBrown69382e82006-01-06 00:20:22 -08001222 /* ouch - failed to read all of that.
1223 * Try some synchronous reads of other devices to get
1224 * good data, much like with normal read errors. Only
1225 * read into the pages we already have so they we don't
1226 * need to re-issue the read request.
1227 * We don't need to freeze the array, because being in an
1228 * active sync request, there is no normal IO, and
1229 * no overlapping syncs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 */
NeilBrown69382e82006-01-06 00:20:22 -08001231 sector_t sect = r1_bio->sector;
1232 int sectors = r1_bio->sectors;
1233 int idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
NeilBrown69382e82006-01-06 00:20:22 -08001235 while(sectors) {
1236 int s = sectors;
1237 int d = r1_bio->read_disk;
1238 int success = 0;
1239 mdk_rdev_t *rdev;
1240
1241 if (s > (PAGE_SIZE>>9))
1242 s = PAGE_SIZE >> 9;
1243 do {
1244 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1245 rdev = conf->mirrors[d].rdev;
1246 if (sync_page_io(rdev->bdev,
1247 sect + rdev->data_offset,
1248 s<<9,
1249 bio->bi_io_vec[idx].bv_page,
1250 READ)) {
1251 success = 1;
1252 break;
1253 }
1254 }
1255 d++;
1256 if (d == conf->raid_disks)
1257 d = 0;
1258 } while (!success && d != r1_bio->read_disk);
1259
1260 if (success) {
NeilBrown097426f2006-01-06 00:20:37 -08001261 int start = d;
NeilBrown69382e82006-01-06 00:20:22 -08001262 /* write it back and re-read */
1263 set_bit(R1BIO_Uptodate, &r1_bio->state);
1264 while (d != r1_bio->read_disk) {
1265 if (d == 0)
1266 d = conf->raid_disks;
1267 d--;
1268 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1269 continue;
1270 rdev = conf->mirrors[d].rdev;
NeilBrown4dbcdc72006-01-06 00:20:52 -08001271 atomic_add(s, &rdev->corrected_errors);
NeilBrown69382e82006-01-06 00:20:22 -08001272 if (sync_page_io(rdev->bdev,
1273 sect + rdev->data_offset,
1274 s<<9,
1275 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001276 WRITE) == 0)
1277 md_error(mddev, rdev);
1278 }
1279 d = start;
1280 while (d != r1_bio->read_disk) {
1281 if (d == 0)
1282 d = conf->raid_disks;
1283 d--;
1284 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1285 continue;
1286 rdev = conf->mirrors[d].rdev;
1287 if (sync_page_io(rdev->bdev,
NeilBrown69382e82006-01-06 00:20:22 -08001288 sect + rdev->data_offset,
1289 s<<9,
1290 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001291 READ) == 0)
NeilBrown69382e82006-01-06 00:20:22 -08001292 md_error(mddev, rdev);
NeilBrown69382e82006-01-06 00:20:22 -08001293 }
1294 } else {
1295 char b[BDEVNAME_SIZE];
1296 /* Cannot read from anywhere, array is toast */
1297 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1298 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1299 " for block %llu\n",
1300 bdevname(bio->bi_bdev,b),
1301 (unsigned long long)r1_bio->sector);
1302 md_done_sync(mddev, r1_bio->sectors, 0);
1303 put_buf(r1_bio);
1304 return;
1305 }
1306 sectors -= s;
1307 sect += s;
1308 idx ++;
1309 }
1310 }
NeilBrownd11c1712006-01-06 00:20:26 -08001311
1312 /*
1313 * schedule writes
1314 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 atomic_set(&r1_bio->remaining, 1);
1316 for (i = 0; i < disks ; i++) {
1317 wbio = r1_bio->bios[i];
NeilBrown3e198f72006-01-06 00:20:21 -08001318 if (wbio->bi_end_io == NULL ||
1319 (wbio->bi_end_io == end_sync_read &&
1320 (i == r1_bio->read_disk ||
1321 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 continue;
1323
NeilBrown3e198f72006-01-06 00:20:21 -08001324 wbio->bi_rw = WRITE;
1325 wbio->bi_end_io = end_sync_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 atomic_inc(&r1_bio->remaining);
1327 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
NeilBrown191ea9b2005-06-21 17:17:23 -07001328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 generic_make_request(wbio);
1330 }
1331
1332 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001333 /* if we're here, all write(s) have completed, so clean up */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 md_done_sync(mddev, r1_bio->sectors, 1);
1335 put_buf(r1_bio);
1336 }
1337}
1338
1339/*
1340 * This is a kernel thread which:
1341 *
1342 * 1. Retries failed read operations on working mirrors.
1343 * 2. Updates the raid superblock when problems encounter.
1344 * 3. Performs writes following reads for array syncronising.
1345 */
1346
1347static void raid1d(mddev_t *mddev)
1348{
1349 r1bio_t *r1_bio;
1350 struct bio *bio;
1351 unsigned long flags;
1352 conf_t *conf = mddev_to_conf(mddev);
1353 struct list_head *head = &conf->retry_list;
1354 int unplug=0;
1355 mdk_rdev_t *rdev;
1356
1357 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
1359 for (;;) {
1360 char b[BDEVNAME_SIZE];
1361 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrown191ea9b2005-06-21 17:17:23 -07001362
1363 if (conf->pending_bio_list.head) {
1364 bio = bio_list_get(&conf->pending_bio_list);
1365 blk_remove_plug(mddev->queue);
1366 spin_unlock_irqrestore(&conf->device_lock, flags);
1367 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1368 if (bitmap_unplug(mddev->bitmap) != 0)
1369 printk("%s: bitmap file write failed!\n", mdname(mddev));
1370
1371 while (bio) { /* submit pending writes */
1372 struct bio *next = bio->bi_next;
1373 bio->bi_next = NULL;
1374 generic_make_request(bio);
1375 bio = next;
1376 }
1377 unplug = 1;
1378
1379 continue;
1380 }
1381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 if (list_empty(head))
1383 break;
1384 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1385 list_del(head->prev);
NeilBrownddaf22a2006-01-06 00:20:19 -08001386 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 spin_unlock_irqrestore(&conf->device_lock, flags);
1388
1389 mddev = r1_bio->mddev;
1390 conf = mddev_to_conf(mddev);
1391 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1392 sync_request_write(mddev, r1_bio);
1393 unplug = 1;
NeilBrowna9701a32005-11-08 21:39:34 -08001394 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1395 /* some requests in the r1bio were BIO_RW_BARRIER
1396 * requests which failed with -ENOTSUPP. Hohumm..
1397 * Better resubmit without the barrier.
1398 * We know which devices to resubmit for, because
1399 * all others have had their bios[] entry cleared.
1400 */
1401 int i;
1402 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1403 clear_bit(R1BIO_Barrier, &r1_bio->state);
1404 for (i=0; i < conf->raid_disks; i++)
1405 if (r1_bio->bios[i]) {
1406 struct bio_vec *bvec;
1407 int j;
1408
1409 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1410 /* copy pages from the failed bio, as
1411 * this might be a write-behind device */
1412 __bio_for_each_segment(bvec, bio, j, 0)
1413 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1414 bio_put(r1_bio->bios[i]);
1415 bio->bi_sector = r1_bio->sector +
1416 conf->mirrors[i].rdev->data_offset;
1417 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1418 bio->bi_end_io = raid1_end_write_request;
1419 bio->bi_rw = WRITE;
1420 bio->bi_private = r1_bio;
1421 r1_bio->bios[i] = bio;
1422 generic_make_request(bio);
1423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 } else {
1425 int disk;
NeilBrownddaf22a2006-01-06 00:20:19 -08001426
1427 /* we got a read error. Maybe the drive is bad. Maybe just
1428 * the block and we can fix it.
1429 * We freeze all other IO, and try reading the block from
1430 * other devices. When we find one, we re-write
1431 * and check it that fixes the read error.
1432 * This is all done synchronously while the array is
1433 * frozen
1434 */
1435 sector_t sect = r1_bio->sector;
1436 int sectors = r1_bio->sectors;
1437 freeze_array(conf);
NeilBrowncf30a472006-01-06 00:20:23 -08001438 if (mddev->ro == 0) while(sectors) {
NeilBrownddaf22a2006-01-06 00:20:19 -08001439 int s = sectors;
1440 int d = r1_bio->read_disk;
1441 int success = 0;
1442
1443 if (s > (PAGE_SIZE>>9))
1444 s = PAGE_SIZE >> 9;
1445
1446 do {
1447 rdev = conf->mirrors[d].rdev;
1448 if (rdev &&
1449 test_bit(In_sync, &rdev->flags) &&
1450 sync_page_io(rdev->bdev,
1451 sect + rdev->data_offset,
1452 s<<9,
1453 conf->tmppage, READ))
1454 success = 1;
1455 else {
1456 d++;
1457 if (d == conf->raid_disks)
1458 d = 0;
1459 }
1460 } while (!success && d != r1_bio->read_disk);
1461
1462 if (success) {
1463 /* write it back and re-read */
NeilBrown097426f2006-01-06 00:20:37 -08001464 int start = d;
NeilBrownddaf22a2006-01-06 00:20:19 -08001465 while (d != r1_bio->read_disk) {
1466 if (d==0)
1467 d = conf->raid_disks;
1468 d--;
1469 rdev = conf->mirrors[d].rdev;
NeilBrown4dbcdc72006-01-06 00:20:52 -08001470 atomic_add(s, &rdev->corrected_errors);
NeilBrownddaf22a2006-01-06 00:20:19 -08001471 if (rdev &&
1472 test_bit(In_sync, &rdev->flags)) {
1473 if (sync_page_io(rdev->bdev,
1474 sect + rdev->data_offset,
NeilBrown097426f2006-01-06 00:20:37 -08001475 s<<9, conf->tmppage, WRITE) == 0)
NeilBrownddaf22a2006-01-06 00:20:19 -08001476 /* Well, this device is dead */
1477 md_error(mddev, rdev);
NeilBrown097426f2006-01-06 00:20:37 -08001478 }
1479 }
1480 d = start;
1481 while (d != r1_bio->read_disk) {
1482 if (d==0)
1483 d = conf->raid_disks;
1484 d--;
1485 rdev = conf->mirrors[d].rdev;
1486 if (rdev &&
1487 test_bit(In_sync, &rdev->flags)) {
1488 if (sync_page_io(rdev->bdev,
1489 sect + rdev->data_offset,
1490 s<<9, conf->tmppage, READ) == 0)
1491 /* Well, this device is dead */
1492 md_error(mddev, rdev);
NeilBrownddaf22a2006-01-06 00:20:19 -08001493 }
1494 }
1495 } else {
1496 /* Cannot read from anywhere -- bye bye array */
1497 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1498 break;
1499 }
1500 sectors -= s;
1501 sect += s;
1502 }
1503
NeilBrownddaf22a2006-01-06 00:20:19 -08001504 unfreeze_array(conf);
1505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 bio = r1_bio->bios[r1_bio->read_disk];
1507 if ((disk=read_balance(conf, r1_bio)) == -1) {
1508 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1509 " read error for block %llu\n",
1510 bdevname(bio->bi_bdev,b),
1511 (unsigned long long)r1_bio->sector);
1512 raid_end_bio_io(r1_bio);
1513 } else {
NeilBrowncf30a472006-01-06 00:20:23 -08001514 r1_bio->bios[r1_bio->read_disk] =
1515 mddev->ro ? IO_BLOCKED : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 r1_bio->read_disk = disk;
1517 bio_put(bio);
1518 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1519 r1_bio->bios[r1_bio->read_disk] = bio;
1520 rdev = conf->mirrors[disk].rdev;
1521 if (printk_ratelimit())
1522 printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
1523 " another mirror\n",
1524 bdevname(rdev->bdev,b),
1525 (unsigned long long)r1_bio->sector);
1526 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1527 bio->bi_bdev = rdev->bdev;
1528 bio->bi_end_io = raid1_end_read_request;
1529 bio->bi_rw = READ;
1530 bio->bi_private = r1_bio;
1531 unplug = 1;
1532 generic_make_request(bio);
1533 }
1534 }
1535 }
1536 spin_unlock_irqrestore(&conf->device_lock, flags);
1537 if (unplug)
1538 unplug_slaves(mddev);
1539}
1540
1541
1542static int init_resync(conf_t *conf)
1543{
1544 int buffs;
1545
1546 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1547 if (conf->r1buf_pool)
1548 BUG();
1549 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1550 conf->poolinfo);
1551 if (!conf->r1buf_pool)
1552 return -ENOMEM;
1553 conf->next_resync = 0;
1554 return 0;
1555}
1556
1557/*
1558 * perform a "sync" on one "block"
1559 *
1560 * We need to make sure that no normal I/O request - particularly write
1561 * requests - conflict with active sync requests.
1562 *
1563 * This is achieved by tracking pending requests and a 'barrier' concept
1564 * that can be installed to exclude normal IO requests.
1565 */
1566
NeilBrown57afd892005-06-21 17:17:13 -07001567static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
1569 conf_t *conf = mddev_to_conf(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 r1bio_t *r1_bio;
1571 struct bio *bio;
1572 sector_t max_sector, nr_sectors;
NeilBrown3e198f72006-01-06 00:20:21 -08001573 int disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 int i;
NeilBrown3e198f72006-01-06 00:20:21 -08001575 int wonly = -1;
1576 int write_targets = 0, read_targets = 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001577 int sync_blocks;
NeilBrowne3b97032005-08-04 12:53:34 -07001578 int still_degraded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
1580 if (!conf->r1buf_pool)
NeilBrown191ea9b2005-06-21 17:17:23 -07001581 {
1582/*
1583 printk("sync start - bitmap %p\n", mddev->bitmap);
1584*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07001586 return 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
1589 max_sector = mddev->size << 1;
1590 if (sector_nr >= max_sector) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001591 /* If we aborted, we need to abort the
1592 * sync on the 'current' bitmap chunk (there will
1593 * only be one in raid1 resync.
1594 * We can find the current addess in mddev->curr_resync
1595 */
NeilBrown6a806c52005-07-15 03:56:35 -07001596 if (mddev->curr_resync < max_sector) /* aborted */
1597 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
NeilBrown191ea9b2005-06-21 17:17:23 -07001598 &sync_blocks, 1);
NeilBrown6a806c52005-07-15 03:56:35 -07001599 else /* completed sync */
NeilBrown191ea9b2005-06-21 17:17:23 -07001600 conf->fullsync = 0;
NeilBrown6a806c52005-07-15 03:56:35 -07001601
1602 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 close_sync(conf);
1604 return 0;
1605 }
1606
NeilBrowne3b97032005-08-04 12:53:34 -07001607 /* before building a request, check if we can skip these blocks..
1608 * This call the bitmap_start_sync doesn't actually record anything
1609 */
1610 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
NeilBrowne5de485f2005-11-08 21:39:38 -08001611 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001612 /* We can skip this block, and probably several more */
1613 *skipped = 1;
1614 return sync_blocks;
1615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 /*
NeilBrown17999be2006-01-06 00:20:12 -08001617 * If there is non-resync activity waiting for a turn,
1618 * and resync is going fast enough,
1619 * then let it though before starting on this new sync request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 */
NeilBrown17999be2006-01-06 00:20:12 -08001621 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 msleep_interruptible(1000);
NeilBrown17999be2006-01-06 00:20:12 -08001623
1624 raise_barrier(conf);
1625
1626 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
NeilBrown3e198f72006-01-06 00:20:21 -08001629 rcu_read_lock();
1630 /*
1631 * If we get a correctably read error during resync or recovery,
1632 * we might want to read from a different device. So we
1633 * flag all drives that could conceivably be read from for READ,
1634 * and any others (which will be non-In_sync devices) for WRITE.
1635 * If a read fails, we try reading from something else for which READ
1636 * is OK.
1637 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 r1_bio->mddev = mddev;
1640 r1_bio->sector = sector_nr;
NeilBrown191ea9b2005-06-21 17:17:23 -07001641 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 set_bit(R1BIO_IsSync, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
1644 for (i=0; i < conf->raid_disks; i++) {
NeilBrown3e198f72006-01-06 00:20:21 -08001645 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 bio = r1_bio->bios[i];
1647
1648 /* take from bio_init */
1649 bio->bi_next = NULL;
1650 bio->bi_flags |= 1 << BIO_UPTODATE;
1651 bio->bi_rw = 0;
1652 bio->bi_vcnt = 0;
1653 bio->bi_idx = 0;
1654 bio->bi_phys_segments = 0;
1655 bio->bi_hw_segments = 0;
1656 bio->bi_size = 0;
1657 bio->bi_end_io = NULL;
1658 bio->bi_private = NULL;
1659
NeilBrown3e198f72006-01-06 00:20:21 -08001660 rdev = rcu_dereference(conf->mirrors[i].rdev);
1661 if (rdev == NULL ||
1662 test_bit(Faulty, &rdev->flags)) {
NeilBrowne3b97032005-08-04 12:53:34 -07001663 still_degraded = 1;
1664 continue;
NeilBrown3e198f72006-01-06 00:20:21 -08001665 } else if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 bio->bi_rw = WRITE;
1667 bio->bi_end_io = end_sync_write;
1668 write_targets ++;
NeilBrown3e198f72006-01-06 00:20:21 -08001669 } else {
1670 /* may need to read from here */
1671 bio->bi_rw = READ;
1672 bio->bi_end_io = end_sync_read;
1673 if (test_bit(WriteMostly, &rdev->flags)) {
1674 if (wonly < 0)
1675 wonly = i;
1676 } else {
1677 if (disk < 0)
1678 disk = i;
1679 }
1680 read_targets++;
1681 }
1682 atomic_inc(&rdev->nr_pending);
1683 bio->bi_sector = sector_nr + rdev->data_offset;
1684 bio->bi_bdev = rdev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 bio->bi_private = r1_bio;
1686 }
NeilBrown3e198f72006-01-06 00:20:21 -08001687 rcu_read_unlock();
1688 if (disk < 0)
1689 disk = wonly;
1690 r1_bio->read_disk = disk;
NeilBrown191ea9b2005-06-21 17:17:23 -07001691
NeilBrown3e198f72006-01-06 00:20:21 -08001692 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1693 /* extra read targets are also write targets */
1694 write_targets += read_targets-1;
1695
1696 if (write_targets == 0 || read_targets == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 /* There is nowhere to write, so all non-sync
1698 * drives must be failed - so we are finished
1699 */
NeilBrown57afd892005-06-21 17:17:13 -07001700 sector_t rv = max_sector - sector_nr;
1701 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 put_buf(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 return rv;
1704 }
1705
1706 nr_sectors = 0;
NeilBrown289e99e2005-06-21 17:17:24 -07001707 sync_blocks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 do {
1709 struct page *page;
1710 int len = PAGE_SIZE;
1711 if (sector_nr + (len>>9) > max_sector)
1712 len = (max_sector - sector_nr) << 9;
1713 if (len == 0)
1714 break;
NeilBrown6a806c52005-07-15 03:56:35 -07001715 if (sync_blocks == 0) {
1716 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
NeilBrowne5de485f2005-11-08 21:39:38 -08001717 &sync_blocks, still_degraded) &&
1718 !conf->fullsync &&
1719 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
NeilBrown6a806c52005-07-15 03:56:35 -07001720 break;
1721 if (sync_blocks < (PAGE_SIZE>>9))
1722 BUG();
1723 if (len > (sync_blocks<<9))
1724 len = sync_blocks<<9;
NeilBrownab7a30c2005-06-21 17:17:23 -07001725 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001726
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 for (i=0 ; i < conf->raid_disks; i++) {
1728 bio = r1_bio->bios[i];
1729 if (bio->bi_end_io) {
NeilBrownd11c1712006-01-06 00:20:26 -08001730 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 if (bio_add_page(bio, page, len, 0) == 0) {
1732 /* stop here */
NeilBrownd11c1712006-01-06 00:20:26 -08001733 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 while (i > 0) {
1735 i--;
1736 bio = r1_bio->bios[i];
NeilBrown6a806c52005-07-15 03:56:35 -07001737 if (bio->bi_end_io==NULL)
1738 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 /* remove last page from this bio */
1740 bio->bi_vcnt--;
1741 bio->bi_size -= len;
1742 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
1743 }
1744 goto bio_full;
1745 }
1746 }
1747 }
1748 nr_sectors += len>>9;
1749 sector_nr += len>>9;
NeilBrown191ea9b2005-06-21 17:17:23 -07001750 sync_blocks -= (len>>9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1752 bio_full:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 r1_bio->sectors = nr_sectors;
1754
NeilBrownd11c1712006-01-06 00:20:26 -08001755 /* For a user-requested sync, we read all readable devices and do a
1756 * compare
1757 */
1758 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1759 atomic_set(&r1_bio->remaining, read_targets);
1760 for (i=0; i<conf->raid_disks; i++) {
1761 bio = r1_bio->bios[i];
1762 if (bio->bi_end_io == end_sync_read) {
1763 md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
1764 generic_make_request(bio);
1765 }
1766 }
1767 } else {
1768 atomic_set(&r1_bio->remaining, 1);
1769 bio = r1_bio->bios[r1_bio->read_disk];
1770 md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
1771 nr_sectors);
1772 generic_make_request(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
NeilBrownd11c1712006-01-06 00:20:26 -08001774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
1776 return nr_sectors;
1777}
1778
1779static int run(mddev_t *mddev)
1780{
1781 conf_t *conf;
1782 int i, j, disk_idx;
1783 mirror_info_t *disk;
1784 mdk_rdev_t *rdev;
1785 struct list_head *tmp;
1786
1787 if (mddev->level != 1) {
1788 printk("raid1: %s: raid level not set to mirroring (%d)\n",
1789 mdname(mddev), mddev->level);
1790 goto out;
1791 }
1792 /*
1793 * copy the already verified devices into our private RAID1
1794 * bookkeeping area. [whatever we allocate in run(),
1795 * should be freed in stop()]
1796 */
NeilBrown9ffae0c2006-01-06 00:20:32 -08001797 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 mddev->private = conf;
1799 if (!conf)
1800 goto out_no_mem;
1801
NeilBrown9ffae0c2006-01-06 00:20:32 -08001802 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 GFP_KERNEL);
1804 if (!conf->mirrors)
1805 goto out_no_mem;
1806
NeilBrownddaf22a2006-01-06 00:20:19 -08001807 conf->tmppage = alloc_page(GFP_KERNEL);
1808 if (!conf->tmppage)
1809 goto out_no_mem;
1810
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1812 if (!conf->poolinfo)
1813 goto out_no_mem;
1814 conf->poolinfo->mddev = mddev;
1815 conf->poolinfo->raid_disks = mddev->raid_disks;
1816 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1817 r1bio_pool_free,
1818 conf->poolinfo);
1819 if (!conf->r1bio_pool)
1820 goto out_no_mem;
1821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 ITERATE_RDEV(mddev, rdev, tmp) {
1823 disk_idx = rdev->raid_disk;
1824 if (disk_idx >= mddev->raid_disks
1825 || disk_idx < 0)
1826 continue;
1827 disk = conf->mirrors + disk_idx;
1828
1829 disk->rdev = rdev;
1830
1831 blk_queue_stack_limits(mddev->queue,
1832 rdev->bdev->bd_disk->queue);
1833 /* as we don't honour merge_bvec_fn, we must never risk
1834 * violating it, so limit ->max_sector to one PAGE, as
1835 * a one page request is never in violation.
1836 */
1837 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1838 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1839 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1840
1841 disk->head_position = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08001842 if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 conf->working_disks++;
1844 }
1845 conf->raid_disks = mddev->raid_disks;
1846 conf->mddev = mddev;
1847 spin_lock_init(&conf->device_lock);
1848 INIT_LIST_HEAD(&conf->retry_list);
1849 if (conf->working_disks == 1)
1850 mddev->recovery_cp = MaxSector;
1851
1852 spin_lock_init(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -08001853 init_waitqueue_head(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
NeilBrown191ea9b2005-06-21 17:17:23 -07001855 bio_list_init(&conf->pending_bio_list);
1856 bio_list_init(&conf->flushing_bio_list);
1857
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 if (!conf->working_disks) {
1859 printk(KERN_ERR "raid1: no operational mirrors for %s\n",
1860 mdname(mddev));
1861 goto out_free_conf;
1862 }
1863
1864 mddev->degraded = 0;
1865 for (i = 0; i < conf->raid_disks; i++) {
1866
1867 disk = conf->mirrors + i;
1868
1869 if (!disk->rdev) {
1870 disk->head_position = 0;
1871 mddev->degraded++;
1872 }
1873 }
1874
1875 /*
1876 * find the first working one and use it as a starting point
1877 * to read balancing.
1878 */
1879 for (j = 0; j < conf->raid_disks &&
1880 (!conf->mirrors[j].rdev ||
NeilBrownb2d444d2005-11-08 21:39:31 -08001881 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 /* nothing */;
1883 conf->last_used = j;
1884
1885
NeilBrown191ea9b2005-06-21 17:17:23 -07001886 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
1887 if (!mddev->thread) {
1888 printk(KERN_ERR
1889 "raid1: couldn't allocate thread for %s\n",
1890 mdname(mddev));
1891 goto out_free_conf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 printk(KERN_INFO
1895 "raid1: raid set %s active with %d out of %d mirrors\n",
1896 mdname(mddev), mddev->raid_disks - mddev->degraded,
1897 mddev->raid_disks);
1898 /*
1899 * Ok, everything is just fine now
1900 */
1901 mddev->array_size = mddev->size;
1902
NeilBrown7a5febe2005-05-16 21:53:16 -07001903 mddev->queue->unplug_fn = raid1_unplug;
1904 mddev->queue->issue_flush_fn = raid1_issue_flush;
1905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 return 0;
1907
1908out_no_mem:
1909 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
1910 mdname(mddev));
1911
1912out_free_conf:
1913 if (conf) {
1914 if (conf->r1bio_pool)
1915 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001916 kfree(conf->mirrors);
NeilBrown1345b1d2006-01-06 00:20:40 -08001917 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001918 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 kfree(conf);
1920 mddev->private = NULL;
1921 }
1922out:
1923 return -EIO;
1924}
1925
1926static int stop(mddev_t *mddev)
1927{
1928 conf_t *conf = mddev_to_conf(mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -07001929 struct bitmap *bitmap = mddev->bitmap;
1930 int behind_wait = 0;
1931
1932 /* wait for behind writes to complete */
1933 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1934 behind_wait++;
1935 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
1936 set_current_state(TASK_UNINTERRUPTIBLE);
1937 schedule_timeout(HZ); /* wait a second */
1938 /* need to kick something here to make sure I/O goes? */
1939 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
1941 md_unregister_thread(mddev->thread);
1942 mddev->thread = NULL;
1943 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1944 if (conf->r1bio_pool)
1945 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001946 kfree(conf->mirrors);
1947 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 kfree(conf);
1949 mddev->private = NULL;
1950 return 0;
1951}
1952
1953static int raid1_resize(mddev_t *mddev, sector_t sectors)
1954{
1955 /* no resync is happening, and there is enough space
1956 * on all devices, so we can resize.
1957 * We need to make sure resync covers any new space.
1958 * If the array is shrinking we should possibly wait until
1959 * any io in the removed space completes, but it hardly seems
1960 * worth it.
1961 */
1962 mddev->array_size = sectors>>1;
1963 set_capacity(mddev->gendisk, mddev->array_size << 1);
1964 mddev->changed = 1;
1965 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
1966 mddev->recovery_cp = mddev->size << 1;
1967 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1968 }
1969 mddev->size = mddev->array_size;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07001970 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 return 0;
1972}
1973
1974static int raid1_reshape(mddev_t *mddev, int raid_disks)
1975{
1976 /* We need to:
1977 * 1/ resize the r1bio_pool
1978 * 2/ resize conf->mirrors
1979 *
1980 * We allocate a new r1bio_pool if we can.
1981 * Then raise a device barrier and wait until all IO stops.
1982 * Then resize conf->mirrors and swap in the new r1bio pool.
NeilBrown6ea9c072005-06-21 17:17:09 -07001983 *
1984 * At the same time, we "pack" the devices so that all the missing
1985 * devices have the higher raid_disk numbers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 */
1987 mempool_t *newpool, *oldpool;
1988 struct pool_info *newpoolinfo;
1989 mirror_info_t *newmirrors;
1990 conf_t *conf = mddev_to_conf(mddev);
NeilBrown6ea9c072005-06-21 17:17:09 -07001991 int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
NeilBrown6ea9c072005-06-21 17:17:09 -07001993 int d, d2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
NeilBrown6ea9c072005-06-21 17:17:09 -07001995 if (raid_disks < conf->raid_disks) {
1996 cnt=0;
1997 for (d= 0; d < conf->raid_disks; d++)
1998 if (conf->mirrors[d].rdev)
1999 cnt++;
2000 if (cnt > raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 return -EBUSY;
NeilBrown6ea9c072005-06-21 17:17:09 -07002002 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
2004 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2005 if (!newpoolinfo)
2006 return -ENOMEM;
2007 newpoolinfo->mddev = mddev;
2008 newpoolinfo->raid_disks = raid_disks;
2009
2010 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2011 r1bio_pool_free, newpoolinfo);
2012 if (!newpool) {
2013 kfree(newpoolinfo);
2014 return -ENOMEM;
2015 }
NeilBrown9ffae0c2006-01-06 00:20:32 -08002016 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 if (!newmirrors) {
2018 kfree(newpoolinfo);
2019 mempool_destroy(newpool);
2020 return -ENOMEM;
2021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
NeilBrown17999be2006-01-06 00:20:12 -08002023 raise_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
2025 /* ok, everything is stopped */
2026 oldpool = conf->r1bio_pool;
2027 conf->r1bio_pool = newpool;
NeilBrown6ea9c072005-06-21 17:17:09 -07002028
2029 for (d=d2=0; d < conf->raid_disks; d++)
2030 if (conf->mirrors[d].rdev) {
2031 conf->mirrors[d].rdev->raid_disk = d2;
2032 newmirrors[d2++].rdev = conf->mirrors[d].rdev;
2033 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 kfree(conf->mirrors);
2035 conf->mirrors = newmirrors;
2036 kfree(conf->poolinfo);
2037 conf->poolinfo = newpoolinfo;
2038
2039 mddev->degraded += (raid_disks - conf->raid_disks);
2040 conf->raid_disks = mddev->raid_disks = raid_disks;
2041
NeilBrown6ea9c072005-06-21 17:17:09 -07002042 conf->last_used = 0; /* just make sure it is in-range */
NeilBrown17999be2006-01-06 00:20:12 -08002043 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2046 md_wakeup_thread(mddev->thread);
2047
2048 mempool_destroy(oldpool);
2049 return 0;
2050}
2051
NeilBrown500af872005-09-09 16:23:58 -07002052static void raid1_quiesce(mddev_t *mddev, int state)
NeilBrown36fa3062005-09-09 16:23:45 -07002053{
2054 conf_t *conf = mddev_to_conf(mddev);
2055
2056 switch(state) {
NeilBrown9e6603d2005-09-09 16:23:48 -07002057 case 1:
NeilBrown17999be2006-01-06 00:20:12 -08002058 raise_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002059 break;
NeilBrown9e6603d2005-09-09 16:23:48 -07002060 case 0:
NeilBrown17999be2006-01-06 00:20:12 -08002061 lower_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002062 break;
2063 }
NeilBrown36fa3062005-09-09 16:23:45 -07002064}
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
NeilBrown2604b702006-01-06 00:20:36 -08002067static struct mdk_personality raid1_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
2069 .name = "raid1",
NeilBrown2604b702006-01-06 00:20:36 -08002070 .level = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 .owner = THIS_MODULE,
2072 .make_request = make_request,
2073 .run = run,
2074 .stop = stop,
2075 .status = status,
2076 .error_handler = error,
2077 .hot_add_disk = raid1_add_disk,
2078 .hot_remove_disk= raid1_remove_disk,
2079 .spare_active = raid1_spare_active,
2080 .sync_request = sync_request,
2081 .resize = raid1_resize,
2082 .reshape = raid1_reshape,
NeilBrown36fa3062005-09-09 16:23:45 -07002083 .quiesce = raid1_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084};
2085
2086static int __init raid_init(void)
2087{
NeilBrown2604b702006-01-06 00:20:36 -08002088 return register_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089}
2090
2091static void raid_exit(void)
2092{
NeilBrown2604b702006-01-06 00:20:36 -08002093 unregister_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094}
2095
2096module_init(raid_init);
2097module_exit(raid_exit);
2098MODULE_LICENSE("GPL");
2099MODULE_ALIAS("md-personality-3"); /* RAID1 */
NeilBrownd9d166c2006-01-06 00:20:51 -08002100MODULE_ALIAS("md-raid1");
NeilBrown2604b702006-01-06 00:20:36 -08002101MODULE_ALIAS("md-level-1");