blob: 3b4d69c0562301b18039657ac0d47261a4eed6af [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
NeilBrown191ea9b2005-06-21 17:17:23 -070015 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
NeilBrown191ea9b2005-06-21 17:17:23 -070034#include "dm-bio-list.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/raid/raid1.h>
NeilBrown191ea9b2005-06-21 17:17:23 -070036#include <linux/raid/bitmap.h>
37
38#define DEBUG 0
39#if DEBUG
40#define PRINTK(x...) printk(x)
41#else
42#define PRINTK(x...)
43#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46 * Number of guaranteed r1bios in case of extreme VM load:
47 */
48#define NR_RAID1_BIOS 256
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51static void unplug_slaves(mddev_t *mddev);
52
NeilBrown17999be2006-01-06 00:20:12 -080053static void allow_barrier(conf_t *conf);
54static void lower_barrier(conf_t *conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Al Virodd0fc662005-10-07 07:46:04 +010056static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
58 struct pool_info *pi = data;
59 r1bio_t *r1_bio;
60 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
61
62 /* allocate a r1bio with room for raid_disks entries in the bios array */
NeilBrown9ffae0c2006-01-06 00:20:32 -080063 r1_bio = kzalloc(size, gfp_flags);
64 if (!r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 unplug_slaves(pi->mddev);
66
67 return r1_bio;
68}
69
70static void r1bio_pool_free(void *r1_bio, void *data)
71{
72 kfree(r1_bio);
73}
74
75#define RESYNC_BLOCK_SIZE (64*1024)
76//#define RESYNC_BLOCK_SIZE PAGE_SIZE
77#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
78#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
79#define RESYNC_WINDOW (2048*1024)
80
Al Virodd0fc662005-10-07 07:46:04 +010081static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 struct pool_info *pi = data;
84 struct page *page;
85 r1bio_t *r1_bio;
86 struct bio *bio;
87 int i, j;
88
89 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
90 if (!r1_bio) {
91 unplug_slaves(pi->mddev);
92 return NULL;
93 }
94
95 /*
96 * Allocate bios : 1 for reading, n-1 for writing
97 */
98 for (j = pi->raid_disks ; j-- ; ) {
99 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
100 if (!bio)
101 goto out_free_bio;
102 r1_bio->bios[j] = bio;
103 }
104 /*
105 * Allocate RESYNC_PAGES data pages and attach them to
NeilBrownd11c1712006-01-06 00:20:26 -0800106 * the first bio.
107 * If this is a user-requested check/repair, allocate
108 * RESYNC_PAGES for each bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 */
NeilBrownd11c1712006-01-06 00:20:26 -0800110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
111 j = pi->raid_disks;
112 else
113 j = 1;
114 while(j--) {
115 bio = r1_bio->bios[j];
116 for (i = 0; i < RESYNC_PAGES; i++) {
117 page = alloc_page(gfp_flags);
118 if (unlikely(!page))
119 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
NeilBrownd11c1712006-01-06 00:20:26 -0800121 bio->bi_io_vec[i].bv_page = page;
122 }
123 }
124 /* If not user-requests, copy the page pointers to all bios */
125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
126 for (i=0; i<RESYNC_PAGES ; i++)
127 for (j=1; j<pi->raid_disks; j++)
128 r1_bio->bios[j]->bi_io_vec[i].bv_page =
129 r1_bio->bios[0]->bi_io_vec[i].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131
132 r1_bio->master_bio = NULL;
133
134 return r1_bio;
135
136out_free_pages:
NeilBrownd11c1712006-01-06 00:20:26 -0800137 for (i=0; i < RESYNC_PAGES ; i++)
138 for (j=0 ; j < pi->raid_disks; j++)
NeilBrown1345b1d2006-01-06 00:20:40 -0800139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800140 j = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141out_free_bio:
142 while ( ++j < pi->raid_disks )
143 bio_put(r1_bio->bios[j]);
144 r1bio_pool_free(r1_bio, data);
145 return NULL;
146}
147
148static void r1buf_pool_free(void *__r1_bio, void *data)
149{
150 struct pool_info *pi = data;
NeilBrownd11c1712006-01-06 00:20:26 -0800151 int i,j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 r1bio_t *r1bio = __r1_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
NeilBrownd11c1712006-01-06 00:20:26 -0800154 for (i = 0; i < RESYNC_PAGES; i++)
155 for (j = pi->raid_disks; j-- ;) {
156 if (j == 0 ||
157 r1bio->bios[j]->bi_io_vec[i].bv_page !=
158 r1bio->bios[0]->bi_io_vec[i].bv_page)
NeilBrown1345b1d2006-01-06 00:20:40 -0800159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
NeilBrownd11c1712006-01-06 00:20:26 -0800160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 for (i=0 ; i < pi->raid_disks; i++)
162 bio_put(r1bio->bios[i]);
163
164 r1bio_pool_free(r1bio, data);
165}
166
167static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
168{
169 int i;
170
171 for (i = 0; i < conf->raid_disks; i++) {
172 struct bio **bio = r1_bio->bios + i;
NeilBrowncf30a472006-01-06 00:20:23 -0800173 if (*bio && *bio != IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 bio_put(*bio);
175 *bio = NULL;
176 }
177}
178
Arjan van de Ven858119e2006-01-14 13:20:43 -0800179static void free_r1bio(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 conf_t *conf = mddev_to_conf(r1_bio->mddev);
182
183 /*
184 * Wake up any possible resync thread that waits for the device
185 * to go idle.
186 */
NeilBrown17999be2006-01-06 00:20:12 -0800187 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 put_all_bios(conf, r1_bio);
190 mempool_free(r1_bio, conf->r1bio_pool);
191}
192
Arjan van de Ven858119e2006-01-14 13:20:43 -0800193static void put_buf(r1bio_t *r1_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
195 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown3e198f72006-01-06 00:20:21 -0800196 int i;
197
198 for (i=0; i<conf->raid_disks; i++) {
199 struct bio *bio = r1_bio->bios[i];
200 if (bio->bi_end_io)
201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 mempool_free(r1_bio, conf->r1buf_pool);
205
NeilBrown17999be2006-01-06 00:20:12 -0800206 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209static void reschedule_retry(r1bio_t *r1_bio)
210{
211 unsigned long flags;
212 mddev_t *mddev = r1_bio->mddev;
213 conf_t *conf = mddev_to_conf(mddev);
214
215 spin_lock_irqsave(&conf->device_lock, flags);
216 list_add(&r1_bio->retry_list, &conf->retry_list);
NeilBrownddaf22a2006-01-06 00:20:19 -0800217 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 spin_unlock_irqrestore(&conf->device_lock, flags);
219
NeilBrown17999be2006-01-06 00:20:12 -0800220 wake_up(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 md_wakeup_thread(mddev->thread);
222}
223
224/*
225 * raid_end_bio_io() is called when we have finished servicing a mirrored
226 * operation and are ready to return a success/failure code to the buffer
227 * cache layer.
228 */
229static void raid_end_bio_io(r1bio_t *r1_bio)
230{
231 struct bio *bio = r1_bio->master_bio;
232
NeilBrown4b6d2872005-09-09 16:23:47 -0700233 /* if nobody has done the final endio yet, do it now */
234 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
235 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
236 (bio_data_dir(bio) == WRITE) ? "write" : "read",
237 (unsigned long long) bio->bi_sector,
238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1);
240
241 bio_endio(bio, bio->bi_size,
242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 free_r1bio(r1_bio);
245}
246
247/*
248 * Update disk head position estimator based on IRQ completion info.
249 */
250static inline void update_head_pos(int disk, r1bio_t *r1_bio)
251{
252 conf_t *conf = mddev_to_conf(r1_bio->mddev);
253
254 conf->mirrors[disk].head_position =
255 r1_bio->sector + (r1_bio->sectors);
256}
257
258static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264
265 if (bio->bi_size)
266 return 1;
267
268 mirror = r1_bio->read_disk;
269 /*
270 * this branch is our 'one mirror IO has finished' event handler:
271 */
NeilBrownddaf22a2006-01-06 00:20:19 -0800272 update_head_pos(mirror, r1_bio);
273
274 if (uptodate || conf->working_disks <= 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 /*
276 * Set R1BIO_Uptodate in our master bio, so that
277 * we will return a good error code for to the higher
278 * levels even if IO on some other mirrored buffer fails.
279 *
280 * The 'master' represents the composite IO operation to
281 * user-side. So if something waits for IO, then it will
282 * wait for the 'master' bio.
283 */
NeilBrown220946c2006-01-06 00:20:27 -0800284 if (uptodate)
285 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 raid_end_bio_io(r1_bio);
NeilBrownddaf22a2006-01-06 00:20:19 -0800288 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 /*
290 * oops, read error:
291 */
292 char b[BDEVNAME_SIZE];
293 if (printk_ratelimit())
294 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
295 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
296 reschedule_retry(r1_bio);
297 }
298
299 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
300 return 0;
301}
302
303static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
304{
305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrowna9701a32005-11-08 21:39:34 -0800307 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
NeilBrown04b857f2006-03-09 17:33:46 -0800309 struct bio *to_put = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311 if (bio->bi_size)
312 return 1;
313
314 for (mirror = 0; mirror < conf->raid_disks; mirror++)
315 if (r1_bio->bios[mirror] == bio)
316 break;
317
NeilBrownbea27712006-05-01 12:15:46 -0700318 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
NeilBrowna9701a32005-11-08 21:39:34 -0800319 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
320 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
321 r1_bio->mddev->barriers_work = 0;
NeilBrown5e7dd2a2006-05-01 12:15:47 -0700322 /* Don't rdev_dec_pending in this branch - keep it for the retry */
NeilBrowna9701a32005-11-08 21:39:34 -0800323 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 /*
NeilBrowna9701a32005-11-08 21:39:34 -0800325 * this branch is our 'one mirror IO has finished' event handler:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 */
NeilBrowna9701a32005-11-08 21:39:34 -0800327 r1_bio->bios[mirror] = NULL;
NeilBrown04b857f2006-03-09 17:33:46 -0800328 to_put = bio;
NeilBrowna9701a32005-11-08 21:39:34 -0800329 if (!uptodate) {
330 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
331 /* an I/O failed, we can't clear the bitmap */
332 set_bit(R1BIO_Degraded, &r1_bio->state);
333 } else
334 /*
335 * Set R1BIO_Uptodate in our master bio, so that
336 * we will return a good error code for to the higher
337 * levels even if IO on some other mirrored buffer fails.
338 *
339 * The 'master' represents the composite IO operation to
340 * user-side. So if something waits for IO, then it will
341 * wait for the 'master' bio.
342 */
343 set_bit(R1BIO_Uptodate, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
NeilBrowna9701a32005-11-08 21:39:34 -0800345 update_head_pos(mirror, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
NeilBrowna9701a32005-11-08 21:39:34 -0800347 if (behind) {
348 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
349 atomic_dec(&r1_bio->behind_remaining);
NeilBrown4b6d2872005-09-09 16:23:47 -0700350
NeilBrowna9701a32005-11-08 21:39:34 -0800351 /* In behind mode, we ACK the master bio once the I/O has safely
352 * reached all non-writemostly disks. Setting the Returned bit
353 * ensures that this gets done only once -- we don't ever want to
354 * return -EIO here, instead we'll wait */
NeilBrown4b6d2872005-09-09 16:23:47 -0700355
NeilBrowna9701a32005-11-08 21:39:34 -0800356 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
357 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
358 /* Maybe we can return now */
359 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
360 struct bio *mbio = r1_bio->master_bio;
361 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
362 (unsigned long long) mbio->bi_sector,
363 (unsigned long long) mbio->bi_sector +
364 (mbio->bi_size >> 9) - 1);
365 bio_endio(mbio, mbio->bi_size, 0);
366 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700367 }
368 }
NeilBrown5e7dd2a2006-05-01 12:15:47 -0700369 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -0700370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 /*
372 *
373 * Let's see if all mirrored write operations have finished
374 * already.
375 */
376 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrownc70810b2006-06-26 00:27:35 -0700377 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
NeilBrowna9701a32005-11-08 21:39:34 -0800378 reschedule_retry(r1_bio);
NeilBrownc70810b2006-06-26 00:27:35 -0700379 else {
380 /* it really is the end of this request */
381 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
382 /* free extra copy of the data pages */
383 int i = bio->bi_vcnt;
384 while (i--)
385 safe_put_page(bio->bi_io_vec[i].bv_page);
386 }
387 /* clear the bitmap if all writes complete successfully */
388 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
389 r1_bio->sectors,
390 !test_bit(R1BIO_Degraded, &r1_bio->state),
391 behind);
392 md_write_end(r1_bio->mddev);
393 raid_end_bio_io(r1_bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
NeilBrownc70810b2006-06-26 00:27:35 -0700396
NeilBrown04b857f2006-03-09 17:33:46 -0800397 if (to_put)
398 bio_put(to_put);
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 return 0;
401}
402
403
404/*
405 * This routine returns the disk from which the requested read should
406 * be done. There is a per-array 'next expected sequential IO' sector
407 * number - if this matches on the next IO then we use the last disk.
408 * There is also a per-disk 'last know head position' sector that is
409 * maintained from IRQ contexts, both the normal and the resync IO
410 * completion handlers update this position correctly. If there is no
411 * perfect sequential match then we pick the disk whose head is closest.
412 *
413 * If there are 2 mirrors in the same 2 devices, performance degrades
414 * because position is mirror, not device based.
415 *
416 * The rdev for the device selected will have nr_pending incremented.
417 */
418static int read_balance(conf_t *conf, r1bio_t *r1_bio)
419{
420 const unsigned long this_sector = r1_bio->sector;
421 int new_disk = conf->last_used, disk = new_disk;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700422 int wonly_disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 const int sectors = r1_bio->sectors;
424 sector_t new_distance, current_distance;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700425 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427 rcu_read_lock();
428 /*
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700429 * Check if we can balance. We can balance on the whole
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 * device if no resync is going on, or below the resync window.
431 * We take the first readable disk when above the resync window.
432 */
433 retry:
434 if (conf->mddev->recovery_cp < MaxSector &&
435 (this_sector + sectors >= conf->next_resync)) {
436 /* Choose the first operation device, for consistancy */
437 new_disk = 0;
438
Suzanne Woodd6065f72005-11-08 21:39:27 -0800439 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800440 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800441 !rdev || !test_bit(In_sync, &rdev->flags)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700442 || test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800443 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700444
NeilBrowncf30a472006-01-06 00:20:23 -0800445 if (rdev && test_bit(In_sync, &rdev->flags) &&
446 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700447 wonly_disk = new_disk;
448
449 if (new_disk == conf->raid_disks - 1) {
450 new_disk = wonly_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 break;
452 }
453 }
454 goto rb_out;
455 }
456
457
458 /* make sure the disk is operational */
Suzanne Woodd6065f72005-11-08 21:39:27 -0800459 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrowncf30a472006-01-06 00:20:23 -0800460 r1_bio->bios[new_disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800461 !rdev || !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700462 test_bit(WriteMostly, &rdev->flags);
Suzanne Woodd6065f72005-11-08 21:39:27 -0800463 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700464
NeilBrowncf30a472006-01-06 00:20:23 -0800465 if (rdev && test_bit(In_sync, &rdev->flags) &&
466 r1_bio->bios[new_disk] != IO_BLOCKED)
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700467 wonly_disk = new_disk;
468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if (new_disk <= 0)
470 new_disk = conf->raid_disks;
471 new_disk--;
472 if (new_disk == disk) {
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700473 new_disk = wonly_disk;
474 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 }
476 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700477
478 if (new_disk < 0)
479 goto rb_out;
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 disk = new_disk;
482 /* now disk == new_disk == starting point for search */
483
484 /*
485 * Don't change to another disk for sequential reads:
486 */
487 if (conf->next_seq_sect == this_sector)
488 goto rb_out;
489 if (this_sector == conf->mirrors[new_disk].head_position)
490 goto rb_out;
491
492 current_distance = abs(this_sector - conf->mirrors[disk].head_position);
493
494 /* Find the disk whose head is closest */
495
496 do {
497 if (disk <= 0)
498 disk = conf->raid_disks;
499 disk--;
500
Suzanne Woodd6065f72005-11-08 21:39:27 -0800501 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700502
NeilBrowncf30a472006-01-06 00:20:23 -0800503 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800504 !test_bit(In_sync, &rdev->flags) ||
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700505 test_bit(WriteMostly, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 continue;
507
508 if (!atomic_read(&rdev->nr_pending)) {
509 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 break;
511 }
512 new_distance = abs(this_sector - conf->mirrors[disk].head_position);
513 if (new_distance < current_distance) {
514 current_distance = new_distance;
515 new_disk = disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
517 } while (disk != conf->last_used);
518
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700519 rb_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521
522 if (new_disk >= 0) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800523 rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700524 if (!rdev)
525 goto retry;
526 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800527 if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 /* cannot risk returning a device that failed
529 * before we inc'ed nr_pending
530 */
NeilBrown03c902e2006-01-06 00:20:46 -0800531 rdev_dec_pending(rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 goto retry;
533 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700534 conf->next_seq_sect = this_sector + sectors;
535 conf->last_used = new_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 }
537 rcu_read_unlock();
538
539 return new_disk;
540}
541
542static void unplug_slaves(mddev_t *mddev)
543{
544 conf_t *conf = mddev_to_conf(mddev);
545 int i;
546
547 rcu_read_lock();
548 for (i=0; i<mddev->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800549 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800550 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
552
553 atomic_inc(&rdev->nr_pending);
554 rcu_read_unlock();
555
556 if (r_queue->unplug_fn)
557 r_queue->unplug_fn(r_queue);
558
559 rdev_dec_pending(rdev, mddev);
560 rcu_read_lock();
561 }
562 }
563 rcu_read_unlock();
564}
565
566static void raid1_unplug(request_queue_t *q)
567{
NeilBrown191ea9b2005-06-21 17:17:23 -0700568 mddev_t *mddev = q->queuedata;
569
570 unplug_slaves(mddev);
571 md_wakeup_thread(mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573
574static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
575 sector_t *error_sector)
576{
577 mddev_t *mddev = q->queuedata;
578 conf_t *conf = mddev_to_conf(mddev);
579 int i, ret = 0;
580
581 rcu_read_lock();
582 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800583 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800584 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct block_device *bdev = rdev->bdev;
586 request_queue_t *r_queue = bdev_get_queue(bdev);
587
588 if (!r_queue->issue_flush_fn)
589 ret = -EOPNOTSUPP;
590 else {
591 atomic_inc(&rdev->nr_pending);
592 rcu_read_unlock();
593 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
594 error_sector);
595 rdev_dec_pending(rdev, mddev);
596 rcu_read_lock();
597 }
598 }
599 }
600 rcu_read_unlock();
601 return ret;
602}
603
NeilBrown17999be2006-01-06 00:20:12 -0800604/* Barriers....
605 * Sometimes we need to suspend IO while we do something else,
606 * either some resync/recovery, or reconfigure the array.
607 * To do this we raise a 'barrier'.
608 * The 'barrier' is a counter that can be raised multiple times
609 * to count how many activities are happening which preclude
610 * normal IO.
611 * We can only raise the barrier if there is no pending IO.
612 * i.e. if nr_pending == 0.
613 * We choose only to raise the barrier if no-one is waiting for the
614 * barrier to go down. This means that as soon as an IO request
615 * is ready, no other operations which require a barrier will start
616 * until the IO request has had a chance.
617 *
618 * So: regular IO calls 'wait_barrier'. When that returns there
619 * is no backgroup IO happening, It must arrange to call
620 * allow_barrier when it has finished its IO.
621 * backgroup IO calls must call raise_barrier. Once that returns
622 * there is no normal IO happeing. It must arrange to call
623 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 */
625#define RESYNC_DEPTH 32
626
NeilBrown17999be2006-01-06 00:20:12 -0800627static void raise_barrier(conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628{
629 spin_lock_irq(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -0800630
631 /* Wait until no block IO is waiting */
632 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
633 conf->resync_lock,
634 raid1_unplug(conf->mddev->queue));
635
636 /* block any new IO from starting */
637 conf->barrier++;
638
639 /* No wait for all pending IO to complete */
640 wait_event_lock_irq(conf->wait_barrier,
641 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
642 conf->resync_lock,
643 raid1_unplug(conf->mddev->queue));
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 spin_unlock_irq(&conf->resync_lock);
646}
647
NeilBrown17999be2006-01-06 00:20:12 -0800648static void lower_barrier(conf_t *conf)
649{
650 unsigned long flags;
651 spin_lock_irqsave(&conf->resync_lock, flags);
652 conf->barrier--;
653 spin_unlock_irqrestore(&conf->resync_lock, flags);
654 wake_up(&conf->wait_barrier);
655}
656
657static void wait_barrier(conf_t *conf)
658{
659 spin_lock_irq(&conf->resync_lock);
660 if (conf->barrier) {
661 conf->nr_waiting++;
662 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
663 conf->resync_lock,
664 raid1_unplug(conf->mddev->queue));
665 conf->nr_waiting--;
666 }
667 conf->nr_pending++;
668 spin_unlock_irq(&conf->resync_lock);
669}
670
671static void allow_barrier(conf_t *conf)
672{
673 unsigned long flags;
674 spin_lock_irqsave(&conf->resync_lock, flags);
675 conf->nr_pending--;
676 spin_unlock_irqrestore(&conf->resync_lock, flags);
677 wake_up(&conf->wait_barrier);
678}
679
NeilBrownddaf22a2006-01-06 00:20:19 -0800680static void freeze_array(conf_t *conf)
681{
682 /* stop syncio and normal IO and wait for everything to
683 * go quite.
684 * We increment barrier and nr_waiting, and then
685 * wait until barrier+nr_pending match nr_queued+2
686 */
687 spin_lock_irq(&conf->resync_lock);
688 conf->barrier++;
689 conf->nr_waiting++;
690 wait_event_lock_irq(conf->wait_barrier,
691 conf->barrier+conf->nr_pending == conf->nr_queued+2,
692 conf->resync_lock,
693 raid1_unplug(conf->mddev->queue));
694 spin_unlock_irq(&conf->resync_lock);
695}
696static void unfreeze_array(conf_t *conf)
697{
698 /* reverse the effect of the freeze */
699 spin_lock_irq(&conf->resync_lock);
700 conf->barrier--;
701 conf->nr_waiting--;
702 wake_up(&conf->wait_barrier);
703 spin_unlock_irq(&conf->resync_lock);
704}
705
NeilBrown17999be2006-01-06 00:20:12 -0800706
NeilBrown4b6d2872005-09-09 16:23:47 -0700707/* duplicate the data pages for behind I/O */
708static struct page **alloc_behind_pages(struct bio *bio)
709{
710 int i;
711 struct bio_vec *bvec;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800712 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
NeilBrown4b6d2872005-09-09 16:23:47 -0700713 GFP_NOIO);
714 if (unlikely(!pages))
715 goto do_sync_io;
716
NeilBrown4b6d2872005-09-09 16:23:47 -0700717 bio_for_each_segment(bvec, bio, i) {
718 pages[i] = alloc_page(GFP_NOIO);
719 if (unlikely(!pages[i]))
720 goto do_sync_io;
721 memcpy(kmap(pages[i]) + bvec->bv_offset,
722 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
723 kunmap(pages[i]);
724 kunmap(bvec->bv_page);
725 }
726
727 return pages;
728
729do_sync_io:
730 if (pages)
731 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
NeilBrown2d1f3b52006-01-06 00:20:31 -0800732 put_page(pages[i]);
NeilBrown4b6d2872005-09-09 16:23:47 -0700733 kfree(pages);
734 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
735 return NULL;
736}
737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738static int make_request(request_queue_t *q, struct bio * bio)
739{
740 mddev_t *mddev = q->queuedata;
741 conf_t *conf = mddev_to_conf(mddev);
742 mirror_info_t *mirror;
743 r1bio_t *r1_bio;
744 struct bio *read_bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700745 int i, targets = 0, disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 mdk_rdev_t *rdev;
NeilBrown191ea9b2005-06-21 17:17:23 -0700747 struct bitmap *bitmap = mddev->bitmap;
748 unsigned long flags;
749 struct bio_list bl;
NeilBrown4b6d2872005-09-09 16:23:47 -0700750 struct page **behind_pages = NULL;
Jens Axboea3623572005-11-01 09:26:16 +0100751 const int rw = bio_data_dir(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800752 int do_barriers;
NeilBrown191ea9b2005-06-21 17:17:23 -0700753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 /*
755 * Register the new request and wait if the reconstruction
756 * thread has put up a bar for new requests.
757 * Continue immediately if no resync is active currently.
NeilBrown62de6082006-05-01 12:15:47 -0700758 * We test barriers_work *after* md_write_start as md_write_start
759 * may cause the first superblock write, and that will check out
760 * if barriers work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 */
NeilBrown62de6082006-05-01 12:15:47 -0700762
NeilBrown3d310eb2005-06-21 17:17:26 -0700763 md_write_start(mddev, bio); /* wait on superblock update early */
764
NeilBrown62de6082006-05-01 12:15:47 -0700765 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
766 if (rw == WRITE)
767 md_write_end(mddev);
768 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
769 return 0;
770 }
771
NeilBrown17999be2006-01-06 00:20:12 -0800772 wait_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
Jens Axboea3623572005-11-01 09:26:16 +0100774 disk_stat_inc(mddev->gendisk, ios[rw]);
775 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777 /*
778 * make_request() can abort the operation when READA is being
779 * used and no empty request is available.
780 *
781 */
782 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
783
784 r1_bio->master_bio = bio;
785 r1_bio->sectors = bio->bi_size >> 9;
NeilBrown191ea9b2005-06-21 17:17:23 -0700786 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 r1_bio->mddev = mddev;
788 r1_bio->sector = bio->bi_sector;
789
Jens Axboea3623572005-11-01 09:26:16 +0100790 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 /*
792 * read balancing logic:
793 */
794 int rdisk = read_balance(conf, r1_bio);
795
796 if (rdisk < 0) {
797 /* couldn't find anywhere to read from */
798 raid_end_bio_io(r1_bio);
799 return 0;
800 }
801 mirror = conf->mirrors + rdisk;
802
803 r1_bio->read_disk = rdisk;
804
805 read_bio = bio_clone(bio, GFP_NOIO);
806
807 r1_bio->bios[rdisk] = read_bio;
808
809 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
810 read_bio->bi_bdev = mirror->rdev->bdev;
811 read_bio->bi_end_io = raid1_end_read_request;
812 read_bio->bi_rw = READ;
813 read_bio->bi_private = r1_bio;
814
815 generic_make_request(read_bio);
816 return 0;
817 }
818
819 /*
820 * WRITE:
821 */
822 /* first select target devices under spinlock and
823 * inc refcount on their rdev. Record them by setting
824 * bios[x] to bio
825 */
826 disks = conf->raid_disks;
NeilBrown191ea9b2005-06-21 17:17:23 -0700827#if 0
828 { static int first=1;
829 if (first) printk("First Write sector %llu disks %d\n",
830 (unsigned long long)r1_bio->sector, disks);
831 first = 0;
832 }
833#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 rcu_read_lock();
835 for (i = 0; i < disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800836 if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
NeilBrownb2d444d2005-11-08 21:39:31 -0800837 !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 atomic_inc(&rdev->nr_pending);
NeilBrownb2d444d2005-11-08 21:39:31 -0800839 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown03c902e2006-01-06 00:20:46 -0800840 rdev_dec_pending(rdev, mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 r1_bio->bios[i] = NULL;
842 } else
843 r1_bio->bios[i] = bio;
NeilBrown191ea9b2005-06-21 17:17:23 -0700844 targets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 } else
846 r1_bio->bios[i] = NULL;
847 }
848 rcu_read_unlock();
849
NeilBrown4b6d2872005-09-09 16:23:47 -0700850 BUG_ON(targets == 0); /* we never fail the last device */
851
NeilBrown191ea9b2005-06-21 17:17:23 -0700852 if (targets < conf->raid_disks) {
853 /* array is degraded, we will not clear the bitmap
854 * on I/O completion (see raid1_end_write_request) */
855 set_bit(R1BIO_Degraded, &r1_bio->state);
856 }
NeilBrown06d91a52005-06-21 17:17:12 -0700857
NeilBrown4b6d2872005-09-09 16:23:47 -0700858 /* do behind I/O ? */
859 if (bitmap &&
860 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
861 (behind_pages = alloc_behind_pages(bio)) != NULL)
862 set_bit(R1BIO_BehindIO, &r1_bio->state);
863
NeilBrown191ea9b2005-06-21 17:17:23 -0700864 atomic_set(&r1_bio->remaining, 0);
NeilBrown4b6d2872005-09-09 16:23:47 -0700865 atomic_set(&r1_bio->behind_remaining, 0);
NeilBrown191ea9b2005-06-21 17:17:23 -0700866
NeilBrown04b857f2006-03-09 17:33:46 -0800867 do_barriers = bio_barrier(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800868 if (do_barriers)
869 set_bit(R1BIO_Barrier, &r1_bio->state);
870
NeilBrown191ea9b2005-06-21 17:17:23 -0700871 bio_list_init(&bl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 for (i = 0; i < disks; i++) {
873 struct bio *mbio;
874 if (!r1_bio->bios[i])
875 continue;
876
877 mbio = bio_clone(bio, GFP_NOIO);
878 r1_bio->bios[i] = mbio;
879
880 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
881 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
882 mbio->bi_end_io = raid1_end_write_request;
NeilBrowna9701a32005-11-08 21:39:34 -0800883 mbio->bi_rw = WRITE | do_barriers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 mbio->bi_private = r1_bio;
885
NeilBrown4b6d2872005-09-09 16:23:47 -0700886 if (behind_pages) {
887 struct bio_vec *bvec;
888 int j;
889
890 /* Yes, I really want the '__' version so that
891 * we clear any unused pointer in the io_vec, rather
892 * than leave them unchanged. This is important
893 * because when we come to free the pages, we won't
894 * know the originial bi_idx, so we just free
895 * them all
896 */
897 __bio_for_each_segment(bvec, mbio, j, 0)
898 bvec->bv_page = behind_pages[j];
899 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
900 atomic_inc(&r1_bio->behind_remaining);
901 }
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 atomic_inc(&r1_bio->remaining);
NeilBrown191ea9b2005-06-21 17:17:23 -0700904
905 bio_list_add(&bl, mbio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 }
NeilBrown4b6d2872005-09-09 16:23:47 -0700907 kfree(behind_pages); /* the behind pages are attached to the bios now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
NeilBrown4b6d2872005-09-09 16:23:47 -0700909 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
910 test_bit(R1BIO_BehindIO, &r1_bio->state));
NeilBrown191ea9b2005-06-21 17:17:23 -0700911 spin_lock_irqsave(&conf->device_lock, flags);
912 bio_list_merge(&conf->pending_bio_list, &bl);
913 bio_list_init(&bl);
914
915 blk_plug_device(mddev->queue);
916 spin_unlock_irqrestore(&conf->device_lock, flags);
917
918#if 0
919 while ((bio = bio_list_pop(&bl)) != NULL)
920 generic_make_request(bio);
921#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
923 return 0;
924}
925
926static void status(struct seq_file *seq, mddev_t *mddev)
927{
928 conf_t *conf = mddev_to_conf(mddev);
929 int i;
930
931 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
932 conf->working_disks);
NeilBrownddac7c72006-08-31 21:27:36 -0700933 rcu_read_lock();
934 for (i = 0; i < conf->raid_disks; i++) {
935 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 seq_printf(seq, "%s",
NeilBrownddac7c72006-08-31 21:27:36 -0700937 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
938 }
939 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 seq_printf(seq, "]");
941}
942
943
944static void error(mddev_t *mddev, mdk_rdev_t *rdev)
945{
946 char b[BDEVNAME_SIZE];
947 conf_t *conf = mddev_to_conf(mddev);
948
949 /*
950 * If it is not operational, then we have already marked it as dead
951 * else if it is the last working disks, ignore the error, let the
952 * next level up know.
953 * else mark the drive as failed
954 */
NeilBrownb2d444d2005-11-08 21:39:31 -0800955 if (test_bit(In_sync, &rdev->flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 && conf->working_disks == 1)
957 /*
958 * Don't fail the drive, act as though we were just a
959 * normal single drive
960 */
961 return;
NeilBrownb2d444d2005-11-08 21:39:31 -0800962 if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 mddev->degraded++;
964 conf->working_disks--;
965 /*
966 * if recovery is running, make sure it aborts.
967 */
968 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
969 }
NeilBrownb2d444d2005-11-08 21:39:31 -0800970 clear_bit(In_sync, &rdev->flags);
971 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 mddev->sb_dirty = 1;
973 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
974 " Operation continuing on %d devices\n",
975 bdevname(rdev->bdev,b), conf->working_disks);
976}
977
978static void print_conf(conf_t *conf)
979{
980 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982 printk("RAID1 conf printout:\n");
983 if (!conf) {
984 printk("(!conf)\n");
985 return;
986 }
987 printk(" --- wd:%d rd:%d\n", conf->working_disks,
988 conf->raid_disks);
989
NeilBrownddac7c72006-08-31 21:27:36 -0700990 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 for (i = 0; i < conf->raid_disks; i++) {
992 char b[BDEVNAME_SIZE];
NeilBrownddac7c72006-08-31 21:27:36 -0700993 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
994 if (rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownddac7c72006-08-31 21:27:36 -0700996 i, !test_bit(In_sync, &rdev->flags),
997 !test_bit(Faulty, &rdev->flags),
998 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 }
NeilBrownddac7c72006-08-31 21:27:36 -07001000 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001}
1002
1003static void close_sync(conf_t *conf)
1004{
NeilBrown17999be2006-01-06 00:20:12 -08001005 wait_barrier(conf);
1006 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
1008 mempool_destroy(conf->r1buf_pool);
1009 conf->r1buf_pool = NULL;
1010}
1011
1012static int raid1_spare_active(mddev_t *mddev)
1013{
1014 int i;
1015 conf_t *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 /*
1018 * Find all failed disks within the RAID1 configuration
NeilBrownddac7c72006-08-31 21:27:36 -07001019 * and mark them readable.
1020 * Called under mddev lock, so rcu protection not needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 */
1022 for (i = 0; i < conf->raid_disks; i++) {
NeilBrownddac7c72006-08-31 21:27:36 -07001023 mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1024 if (rdev
1025 && !test_bit(Faulty, &rdev->flags)
1026 && !test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 conf->working_disks++;
1028 mddev->degraded--;
NeilBrownddac7c72006-08-31 21:27:36 -07001029 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 }
1031 }
1032
1033 print_conf(conf);
1034 return 0;
1035}
1036
1037
1038static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1039{
1040 conf_t *conf = mddev->private;
1041 int found = 0;
NeilBrown41158c72005-06-21 17:17:25 -07001042 int mirror = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 mirror_info_t *p;
1044
1045 for (mirror=0; mirror < mddev->raid_disks; mirror++)
1046 if ( !(p=conf->mirrors+mirror)->rdev) {
1047
1048 blk_queue_stack_limits(mddev->queue,
1049 rdev->bdev->bd_disk->queue);
1050 /* as we don't honour merge_bvec_fn, we must never risk
1051 * violating it, so limit ->max_sector to one PAGE, as
1052 * a one page request is never in violation.
1053 */
1054 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1055 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1056 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1057
1058 p->head_position = 0;
1059 rdev->raid_disk = mirror;
1060 found = 1;
NeilBrown6aea114a2005-11-28 13:44:13 -08001061 /* As all devices are equivalent, we don't need a full recovery
1062 * if this was recently any drive of the array
1063 */
1064 if (rdev->saved_raid_disk < 0)
NeilBrown41158c72005-06-21 17:17:25 -07001065 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08001066 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 break;
1068 }
1069
1070 print_conf(conf);
1071 return found;
1072}
1073
1074static int raid1_remove_disk(mddev_t *mddev, int number)
1075{
1076 conf_t *conf = mddev->private;
1077 int err = 0;
1078 mdk_rdev_t *rdev;
1079 mirror_info_t *p = conf->mirrors+ number;
1080
1081 print_conf(conf);
1082 rdev = p->rdev;
1083 if (rdev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001084 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 atomic_read(&rdev->nr_pending)) {
1086 err = -EBUSY;
1087 goto abort;
1088 }
1089 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07001090 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 if (atomic_read(&rdev->nr_pending)) {
1092 /* lost the race, try later */
1093 err = -EBUSY;
1094 p->rdev = rdev;
1095 }
1096 }
1097abort:
1098
1099 print_conf(conf);
1100 return err;
1101}
1102
1103
1104static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1105{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
NeilBrownd11c1712006-01-06 00:20:26 -08001107 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 if (bio->bi_size)
1110 return 1;
1111
NeilBrownd11c1712006-01-06 00:20:26 -08001112 for (i=r1_bio->mddev->raid_disks; i--; )
1113 if (r1_bio->bios[i] == bio)
1114 break;
1115 BUG_ON(i < 0);
1116 update_head_pos(i, r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 /*
1118 * we have read a block, now it needs to be re-written,
1119 * or re-read if the read failed.
1120 * We don't do much here, just schedule handling by raid1d
1121 */
NeilBrown69382e82006-01-06 00:20:22 -08001122 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 set_bit(R1BIO_Uptodate, &r1_bio->state);
NeilBrownd11c1712006-01-06 00:20:26 -08001124
1125 if (atomic_dec_and_test(&r1_bio->remaining))
1126 reschedule_retry(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 return 0;
1128}
1129
1130static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1131{
1132 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1133 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1134 mddev_t *mddev = r1_bio->mddev;
1135 conf_t *conf = mddev_to_conf(mddev);
1136 int i;
1137 int mirror=0;
1138
1139 if (bio->bi_size)
1140 return 1;
1141
1142 for (i = 0; i < conf->raid_disks; i++)
1143 if (r1_bio->bios[i] == bio) {
1144 mirror = i;
1145 break;
1146 }
NeilBrown6b1117d2006-03-31 02:31:57 -08001147 if (!uptodate) {
1148 int sync_blocks = 0;
1149 sector_t s = r1_bio->sector;
1150 long sectors_to_go = r1_bio->sectors;
1151 /* make sure these bits doesn't get cleared. */
1152 do {
NeilBrown5e3db642006-07-10 04:44:18 -07001153 bitmap_end_sync(mddev->bitmap, s,
NeilBrown6b1117d2006-03-31 02:31:57 -08001154 &sync_blocks, 1);
1155 s += sync_blocks;
1156 sectors_to_go -= sync_blocks;
1157 } while (sectors_to_go > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 md_error(mddev, conf->mirrors[mirror].rdev);
NeilBrown6b1117d2006-03-31 02:31:57 -08001159 }
NeilBrowne3b97032005-08-04 12:53:34 -07001160
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 update_head_pos(mirror, r1_bio);
1162
1163 if (atomic_dec_and_test(&r1_bio->remaining)) {
1164 md_done_sync(mddev, r1_bio->sectors, uptodate);
1165 put_buf(r1_bio);
1166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 return 0;
1168}
1169
1170static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1171{
1172 conf_t *conf = mddev_to_conf(mddev);
1173 int i;
1174 int disks = conf->raid_disks;
1175 struct bio *bio, *wbio;
1176
1177 bio = r1_bio->bios[r1_bio->read_disk];
1178
NeilBrown69382e82006-01-06 00:20:22 -08001179
NeilBrownd11c1712006-01-06 00:20:26 -08001180 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1181 /* We have read all readable devices. If we haven't
1182 * got the block, then there is no hope left.
1183 * If we have, then we want to do a comparison
1184 * and skip the write if everything is the same.
1185 * If any blocks failed to read, then we need to
1186 * attempt an over-write
1187 */
1188 int primary;
1189 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1190 for (i=0; i<mddev->raid_disks; i++)
1191 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1192 md_error(mddev, conf->mirrors[i].rdev);
1193
1194 md_done_sync(mddev, r1_bio->sectors, 1);
1195 put_buf(r1_bio);
1196 return;
1197 }
1198 for (primary=0; primary<mddev->raid_disks; primary++)
1199 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1200 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1201 r1_bio->bios[primary]->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001202 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
NeilBrownd11c1712006-01-06 00:20:26 -08001203 break;
1204 }
1205 r1_bio->read_disk = primary;
1206 for (i=0; i<mddev->raid_disks; i++)
1207 if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
1208 test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
1209 int j;
1210 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1211 struct bio *pbio = r1_bio->bios[primary];
1212 struct bio *sbio = r1_bio->bios[i];
1213 for (j = vcnt; j-- ; )
1214 if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
1215 page_address(sbio->bi_io_vec[j].bv_page),
1216 PAGE_SIZE))
1217 break;
1218 if (j >= 0)
1219 mddev->resync_mismatches += r1_bio->sectors;
NeilBrown03c902e2006-01-06 00:20:46 -08001220 if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrownd11c1712006-01-06 00:20:26 -08001221 sbio->bi_end_io = NULL;
NeilBrown03c902e2006-01-06 00:20:46 -08001222 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1223 } else {
NeilBrownd11c1712006-01-06 00:20:26 -08001224 /* fixup the bio for reuse */
1225 sbio->bi_vcnt = vcnt;
1226 sbio->bi_size = r1_bio->sectors << 9;
1227 sbio->bi_idx = 0;
1228 sbio->bi_phys_segments = 0;
1229 sbio->bi_hw_segments = 0;
1230 sbio->bi_hw_front_size = 0;
1231 sbio->bi_hw_back_size = 0;
1232 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1233 sbio->bi_flags |= 1 << BIO_UPTODATE;
1234 sbio->bi_next = NULL;
1235 sbio->bi_sector = r1_bio->sector +
1236 conf->mirrors[i].rdev->data_offset;
1237 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1238 }
1239 }
1240 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
NeilBrown69382e82006-01-06 00:20:22 -08001242 /* ouch - failed to read all of that.
1243 * Try some synchronous reads of other devices to get
1244 * good data, much like with normal read errors. Only
NeilBrownddac7c72006-08-31 21:27:36 -07001245 * read into the pages we already have so we don't
NeilBrown69382e82006-01-06 00:20:22 -08001246 * need to re-issue the read request.
1247 * We don't need to freeze the array, because being in an
1248 * active sync request, there is no normal IO, and
1249 * no overlapping syncs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 */
NeilBrown69382e82006-01-06 00:20:22 -08001251 sector_t sect = r1_bio->sector;
1252 int sectors = r1_bio->sectors;
1253 int idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
NeilBrown69382e82006-01-06 00:20:22 -08001255 while(sectors) {
1256 int s = sectors;
1257 int d = r1_bio->read_disk;
1258 int success = 0;
1259 mdk_rdev_t *rdev;
1260
1261 if (s > (PAGE_SIZE>>9))
1262 s = PAGE_SIZE >> 9;
1263 do {
1264 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
NeilBrownddac7c72006-08-31 21:27:36 -07001265 /* No rcu protection needed here devices
1266 * can only be removed when no resync is
1267 * active, and resync is currently active
1268 */
NeilBrown69382e82006-01-06 00:20:22 -08001269 rdev = conf->mirrors[d].rdev;
1270 if (sync_page_io(rdev->bdev,
1271 sect + rdev->data_offset,
1272 s<<9,
1273 bio->bi_io_vec[idx].bv_page,
1274 READ)) {
1275 success = 1;
1276 break;
1277 }
1278 }
1279 d++;
1280 if (d == conf->raid_disks)
1281 d = 0;
1282 } while (!success && d != r1_bio->read_disk);
1283
1284 if (success) {
NeilBrown097426f2006-01-06 00:20:37 -08001285 int start = d;
NeilBrown69382e82006-01-06 00:20:22 -08001286 /* write it back and re-read */
1287 set_bit(R1BIO_Uptodate, &r1_bio->state);
1288 while (d != r1_bio->read_disk) {
1289 if (d == 0)
1290 d = conf->raid_disks;
1291 d--;
1292 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1293 continue;
1294 rdev = conf->mirrors[d].rdev;
NeilBrown4dbcdc72006-01-06 00:20:52 -08001295 atomic_add(s, &rdev->corrected_errors);
NeilBrown69382e82006-01-06 00:20:22 -08001296 if (sync_page_io(rdev->bdev,
1297 sect + rdev->data_offset,
1298 s<<9,
1299 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001300 WRITE) == 0)
1301 md_error(mddev, rdev);
1302 }
1303 d = start;
1304 while (d != r1_bio->read_disk) {
1305 if (d == 0)
1306 d = conf->raid_disks;
1307 d--;
1308 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1309 continue;
1310 rdev = conf->mirrors[d].rdev;
1311 if (sync_page_io(rdev->bdev,
NeilBrown69382e82006-01-06 00:20:22 -08001312 sect + rdev->data_offset,
1313 s<<9,
1314 bio->bi_io_vec[idx].bv_page,
NeilBrown097426f2006-01-06 00:20:37 -08001315 READ) == 0)
NeilBrown69382e82006-01-06 00:20:22 -08001316 md_error(mddev, rdev);
NeilBrown69382e82006-01-06 00:20:22 -08001317 }
1318 } else {
1319 char b[BDEVNAME_SIZE];
1320 /* Cannot read from anywhere, array is toast */
1321 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1322 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1323 " for block %llu\n",
1324 bdevname(bio->bi_bdev,b),
1325 (unsigned long long)r1_bio->sector);
1326 md_done_sync(mddev, r1_bio->sectors, 0);
1327 put_buf(r1_bio);
1328 return;
1329 }
1330 sectors -= s;
1331 sect += s;
1332 idx ++;
1333 }
1334 }
NeilBrownd11c1712006-01-06 00:20:26 -08001335
1336 /*
1337 * schedule writes
1338 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 atomic_set(&r1_bio->remaining, 1);
1340 for (i = 0; i < disks ; i++) {
1341 wbio = r1_bio->bios[i];
NeilBrown3e198f72006-01-06 00:20:21 -08001342 if (wbio->bi_end_io == NULL ||
1343 (wbio->bi_end_io == end_sync_read &&
1344 (i == r1_bio->read_disk ||
1345 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 continue;
1347
NeilBrown3e198f72006-01-06 00:20:21 -08001348 wbio->bi_rw = WRITE;
1349 wbio->bi_end_io = end_sync_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 atomic_inc(&r1_bio->remaining);
1351 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
NeilBrown191ea9b2005-06-21 17:17:23 -07001352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 generic_make_request(wbio);
1354 }
1355
1356 if (atomic_dec_and_test(&r1_bio->remaining)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001357 /* if we're here, all write(s) have completed, so clean up */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 md_done_sync(mddev, r1_bio->sectors, 1);
1359 put_buf(r1_bio);
1360 }
1361}
1362
1363/*
1364 * This is a kernel thread which:
1365 *
1366 * 1. Retries failed read operations on working mirrors.
1367 * 2. Updates the raid superblock when problems encounter.
1368 * 3. Performs writes following reads for array syncronising.
1369 */
1370
1371static void raid1d(mddev_t *mddev)
1372{
1373 r1bio_t *r1_bio;
1374 struct bio *bio;
1375 unsigned long flags;
1376 conf_t *conf = mddev_to_conf(mddev);
1377 struct list_head *head = &conf->retry_list;
1378 int unplug=0;
1379 mdk_rdev_t *rdev;
1380
1381 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
1383 for (;;) {
1384 char b[BDEVNAME_SIZE];
1385 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrown191ea9b2005-06-21 17:17:23 -07001386
1387 if (conf->pending_bio_list.head) {
1388 bio = bio_list_get(&conf->pending_bio_list);
1389 blk_remove_plug(mddev->queue);
1390 spin_unlock_irqrestore(&conf->device_lock, flags);
1391 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1392 if (bitmap_unplug(mddev->bitmap) != 0)
1393 printk("%s: bitmap file write failed!\n", mdname(mddev));
1394
1395 while (bio) { /* submit pending writes */
1396 struct bio *next = bio->bi_next;
1397 bio->bi_next = NULL;
1398 generic_make_request(bio);
1399 bio = next;
1400 }
1401 unplug = 1;
1402
1403 continue;
1404 }
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 if (list_empty(head))
1407 break;
1408 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1409 list_del(head->prev);
NeilBrownddaf22a2006-01-06 00:20:19 -08001410 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 spin_unlock_irqrestore(&conf->device_lock, flags);
1412
1413 mddev = r1_bio->mddev;
1414 conf = mddev_to_conf(mddev);
1415 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1416 sync_request_write(mddev, r1_bio);
1417 unplug = 1;
NeilBrowna9701a32005-11-08 21:39:34 -08001418 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1419 /* some requests in the r1bio were BIO_RW_BARRIER
NeilBrownbea27712006-05-01 12:15:46 -07001420 * requests which failed with -EOPNOTSUPP. Hohumm..
NeilBrowna9701a32005-11-08 21:39:34 -08001421 * Better resubmit without the barrier.
1422 * We know which devices to resubmit for, because
1423 * all others have had their bios[] entry cleared.
NeilBrown5e7dd2a2006-05-01 12:15:47 -07001424 * We already have a nr_pending reference on these rdevs.
NeilBrowna9701a32005-11-08 21:39:34 -08001425 */
1426 int i;
1427 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1428 clear_bit(R1BIO_Barrier, &r1_bio->state);
1429 for (i=0; i < conf->raid_disks; i++)
NeilBrown2f889122006-03-27 01:18:19 -08001430 if (r1_bio->bios[i])
1431 atomic_inc(&r1_bio->remaining);
1432 for (i=0; i < conf->raid_disks; i++)
NeilBrowna9701a32005-11-08 21:39:34 -08001433 if (r1_bio->bios[i]) {
1434 struct bio_vec *bvec;
1435 int j;
1436
1437 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1438 /* copy pages from the failed bio, as
1439 * this might be a write-behind device */
1440 __bio_for_each_segment(bvec, bio, j, 0)
1441 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1442 bio_put(r1_bio->bios[i]);
1443 bio->bi_sector = r1_bio->sector +
1444 conf->mirrors[i].rdev->data_offset;
1445 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1446 bio->bi_end_io = raid1_end_write_request;
1447 bio->bi_rw = WRITE;
1448 bio->bi_private = r1_bio;
1449 r1_bio->bios[i] = bio;
1450 generic_make_request(bio);
1451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 } else {
1453 int disk;
NeilBrownddaf22a2006-01-06 00:20:19 -08001454
1455 /* we got a read error. Maybe the drive is bad. Maybe just
1456 * the block and we can fix it.
1457 * We freeze all other IO, and try reading the block from
1458 * other devices. When we find one, we re-write
1459 * and check it that fixes the read error.
1460 * This is all done synchronously while the array is
1461 * frozen
1462 */
1463 sector_t sect = r1_bio->sector;
1464 int sectors = r1_bio->sectors;
1465 freeze_array(conf);
NeilBrowncf30a472006-01-06 00:20:23 -08001466 if (mddev->ro == 0) while(sectors) {
NeilBrownddaf22a2006-01-06 00:20:19 -08001467 int s = sectors;
1468 int d = r1_bio->read_disk;
1469 int success = 0;
1470
1471 if (s > (PAGE_SIZE>>9))
1472 s = PAGE_SIZE >> 9;
1473
1474 do {
NeilBrownddac7c72006-08-31 21:27:36 -07001475 /* Note: no rcu protection needed here
1476 * as this is synchronous in the raid1d thread
1477 * which is the thread that might remove
1478 * a device. If raid1d ever becomes multi-threaded....
1479 */
NeilBrownddaf22a2006-01-06 00:20:19 -08001480 rdev = conf->mirrors[d].rdev;
1481 if (rdev &&
1482 test_bit(In_sync, &rdev->flags) &&
1483 sync_page_io(rdev->bdev,
1484 sect + rdev->data_offset,
1485 s<<9,
1486 conf->tmppage, READ))
1487 success = 1;
1488 else {
1489 d++;
1490 if (d == conf->raid_disks)
1491 d = 0;
1492 }
1493 } while (!success && d != r1_bio->read_disk);
1494
1495 if (success) {
1496 /* write it back and re-read */
NeilBrown097426f2006-01-06 00:20:37 -08001497 int start = d;
NeilBrownddaf22a2006-01-06 00:20:19 -08001498 while (d != r1_bio->read_disk) {
1499 if (d==0)
1500 d = conf->raid_disks;
1501 d--;
1502 rdev = conf->mirrors[d].rdev;
1503 if (rdev &&
1504 test_bit(In_sync, &rdev->flags)) {
1505 if (sync_page_io(rdev->bdev,
1506 sect + rdev->data_offset,
NeilBrown097426f2006-01-06 00:20:37 -08001507 s<<9, conf->tmppage, WRITE) == 0)
NeilBrownddaf22a2006-01-06 00:20:19 -08001508 /* Well, this device is dead */
1509 md_error(mddev, rdev);
NeilBrown097426f2006-01-06 00:20:37 -08001510 }
1511 }
1512 d = start;
1513 while (d != r1_bio->read_disk) {
1514 if (d==0)
1515 d = conf->raid_disks;
1516 d--;
1517 rdev = conf->mirrors[d].rdev;
1518 if (rdev &&
1519 test_bit(In_sync, &rdev->flags)) {
1520 if (sync_page_io(rdev->bdev,
1521 sect + rdev->data_offset,
1522 s<<9, conf->tmppage, READ) == 0)
1523 /* Well, this device is dead */
1524 md_error(mddev, rdev);
NeilBrownddac7c72006-08-31 21:27:36 -07001525 else {
1526 atomic_add(s, &rdev->corrected_errors);
NeilBrownd6950432006-07-10 04:44:20 -07001527 printk(KERN_INFO "raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
1528 mdname(mddev), s, (unsigned long long)(sect + rdev->data_offset), bdevname(rdev->bdev, b));
NeilBrownddac7c72006-08-31 21:27:36 -07001529 }
NeilBrownddaf22a2006-01-06 00:20:19 -08001530 }
1531 }
1532 } else {
1533 /* Cannot read from anywhere -- bye bye array */
1534 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1535 break;
1536 }
1537 sectors -= s;
1538 sect += s;
1539 }
1540
NeilBrownddaf22a2006-01-06 00:20:19 -08001541 unfreeze_array(conf);
1542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 bio = r1_bio->bios[r1_bio->read_disk];
1544 if ((disk=read_balance(conf, r1_bio)) == -1) {
1545 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1546 " read error for block %llu\n",
1547 bdevname(bio->bi_bdev,b),
1548 (unsigned long long)r1_bio->sector);
1549 raid_end_bio_io(r1_bio);
1550 } else {
NeilBrowncf30a472006-01-06 00:20:23 -08001551 r1_bio->bios[r1_bio->read_disk] =
1552 mddev->ro ? IO_BLOCKED : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 r1_bio->read_disk = disk;
1554 bio_put(bio);
1555 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1556 r1_bio->bios[r1_bio->read_disk] = bio;
1557 rdev = conf->mirrors[disk].rdev;
1558 if (printk_ratelimit())
1559 printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
1560 " another mirror\n",
1561 bdevname(rdev->bdev,b),
1562 (unsigned long long)r1_bio->sector);
1563 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1564 bio->bi_bdev = rdev->bdev;
1565 bio->bi_end_io = raid1_end_read_request;
1566 bio->bi_rw = READ;
1567 bio->bi_private = r1_bio;
1568 unplug = 1;
1569 generic_make_request(bio);
1570 }
1571 }
1572 }
1573 spin_unlock_irqrestore(&conf->device_lock, flags);
1574 if (unplug)
1575 unplug_slaves(mddev);
1576}
1577
1578
1579static int init_resync(conf_t *conf)
1580{
1581 int buffs;
1582
1583 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
Eric Sesterhenn9e77c482006-04-01 01:08:49 +02001584 BUG_ON(conf->r1buf_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1586 conf->poolinfo);
1587 if (!conf->r1buf_pool)
1588 return -ENOMEM;
1589 conf->next_resync = 0;
1590 return 0;
1591}
1592
1593/*
1594 * perform a "sync" on one "block"
1595 *
1596 * We need to make sure that no normal I/O request - particularly write
1597 * requests - conflict with active sync requests.
1598 *
1599 * This is achieved by tracking pending requests and a 'barrier' concept
1600 * that can be installed to exclude normal IO requests.
1601 */
1602
NeilBrown57afd892005-06-21 17:17:13 -07001603static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
1605 conf_t *conf = mddev_to_conf(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 r1bio_t *r1_bio;
1607 struct bio *bio;
1608 sector_t max_sector, nr_sectors;
NeilBrown3e198f72006-01-06 00:20:21 -08001609 int disk = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 int i;
NeilBrown3e198f72006-01-06 00:20:21 -08001611 int wonly = -1;
1612 int write_targets = 0, read_targets = 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001613 int sync_blocks;
NeilBrowne3b97032005-08-04 12:53:34 -07001614 int still_degraded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 if (!conf->r1buf_pool)
NeilBrown191ea9b2005-06-21 17:17:23 -07001617 {
1618/*
1619 printk("sync start - bitmap %p\n", mddev->bitmap);
1620*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07001622 return 0;
NeilBrown191ea9b2005-06-21 17:17:23 -07001623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624
1625 max_sector = mddev->size << 1;
1626 if (sector_nr >= max_sector) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001627 /* If we aborted, we need to abort the
1628 * sync on the 'current' bitmap chunk (there will
1629 * only be one in raid1 resync.
1630 * We can find the current addess in mddev->curr_resync
1631 */
NeilBrown6a806c52005-07-15 03:56:35 -07001632 if (mddev->curr_resync < max_sector) /* aborted */
1633 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
NeilBrown191ea9b2005-06-21 17:17:23 -07001634 &sync_blocks, 1);
NeilBrown6a806c52005-07-15 03:56:35 -07001635 else /* completed sync */
NeilBrown191ea9b2005-06-21 17:17:23 -07001636 conf->fullsync = 0;
NeilBrown6a806c52005-07-15 03:56:35 -07001637
1638 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 close_sync(conf);
1640 return 0;
1641 }
1642
NeilBrown07d84d102006-06-26 00:27:56 -07001643 if (mddev->bitmap == NULL &&
1644 mddev->recovery_cp == MaxSector &&
NeilBrown6394cca2006-08-27 01:23:50 -07001645 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
NeilBrown07d84d102006-06-26 00:27:56 -07001646 conf->fullsync == 0) {
1647 *skipped = 1;
1648 return max_sector - sector_nr;
1649 }
NeilBrown6394cca2006-08-27 01:23:50 -07001650 /* before building a request, check if we can skip these blocks..
1651 * This call the bitmap_start_sync doesn't actually record anything
1652 */
NeilBrowne3b97032005-08-04 12:53:34 -07001653 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
NeilBrowne5de4852005-11-08 21:39:38 -08001654 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown191ea9b2005-06-21 17:17:23 -07001655 /* We can skip this block, and probably several more */
1656 *skipped = 1;
1657 return sync_blocks;
1658 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 /*
NeilBrown17999be2006-01-06 00:20:12 -08001660 * If there is non-resync activity waiting for a turn,
1661 * and resync is going fast enough,
1662 * then let it though before starting on this new sync request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 */
NeilBrown17999be2006-01-06 00:20:12 -08001664 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 msleep_interruptible(1000);
NeilBrown17999be2006-01-06 00:20:12 -08001666
1667 raise_barrier(conf);
1668
1669 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
NeilBrown3e198f72006-01-06 00:20:21 -08001672 rcu_read_lock();
1673 /*
1674 * If we get a correctably read error during resync or recovery,
1675 * we might want to read from a different device. So we
1676 * flag all drives that could conceivably be read from for READ,
1677 * and any others (which will be non-In_sync devices) for WRITE.
1678 * If a read fails, we try reading from something else for which READ
1679 * is OK.
1680 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 r1_bio->mddev = mddev;
1683 r1_bio->sector = sector_nr;
NeilBrown191ea9b2005-06-21 17:17:23 -07001684 r1_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 set_bit(R1BIO_IsSync, &r1_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687 for (i=0; i < conf->raid_disks; i++) {
NeilBrown3e198f72006-01-06 00:20:21 -08001688 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 bio = r1_bio->bios[i];
1690
1691 /* take from bio_init */
1692 bio->bi_next = NULL;
1693 bio->bi_flags |= 1 << BIO_UPTODATE;
1694 bio->bi_rw = 0;
1695 bio->bi_vcnt = 0;
1696 bio->bi_idx = 0;
1697 bio->bi_phys_segments = 0;
1698 bio->bi_hw_segments = 0;
1699 bio->bi_size = 0;
1700 bio->bi_end_io = NULL;
1701 bio->bi_private = NULL;
1702
NeilBrown3e198f72006-01-06 00:20:21 -08001703 rdev = rcu_dereference(conf->mirrors[i].rdev);
1704 if (rdev == NULL ||
1705 test_bit(Faulty, &rdev->flags)) {
NeilBrowne3b97032005-08-04 12:53:34 -07001706 still_degraded = 1;
1707 continue;
NeilBrown3e198f72006-01-06 00:20:21 -08001708 } else if (!test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 bio->bi_rw = WRITE;
1710 bio->bi_end_io = end_sync_write;
1711 write_targets ++;
NeilBrown3e198f72006-01-06 00:20:21 -08001712 } else {
1713 /* may need to read from here */
1714 bio->bi_rw = READ;
1715 bio->bi_end_io = end_sync_read;
1716 if (test_bit(WriteMostly, &rdev->flags)) {
1717 if (wonly < 0)
1718 wonly = i;
1719 } else {
1720 if (disk < 0)
1721 disk = i;
1722 }
1723 read_targets++;
1724 }
1725 atomic_inc(&rdev->nr_pending);
1726 bio->bi_sector = sector_nr + rdev->data_offset;
1727 bio->bi_bdev = rdev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 bio->bi_private = r1_bio;
1729 }
NeilBrown3e198f72006-01-06 00:20:21 -08001730 rcu_read_unlock();
1731 if (disk < 0)
1732 disk = wonly;
1733 r1_bio->read_disk = disk;
NeilBrown191ea9b2005-06-21 17:17:23 -07001734
NeilBrown3e198f72006-01-06 00:20:21 -08001735 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1736 /* extra read targets are also write targets */
1737 write_targets += read_targets-1;
1738
1739 if (write_targets == 0 || read_targets == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 /* There is nowhere to write, so all non-sync
1741 * drives must be failed - so we are finished
1742 */
NeilBrown57afd892005-06-21 17:17:13 -07001743 sector_t rv = max_sector - sector_nr;
1744 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 put_buf(r1_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 return rv;
1747 }
1748
1749 nr_sectors = 0;
NeilBrown289e99e2005-06-21 17:17:24 -07001750 sync_blocks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 do {
1752 struct page *page;
1753 int len = PAGE_SIZE;
1754 if (sector_nr + (len>>9) > max_sector)
1755 len = (max_sector - sector_nr) << 9;
1756 if (len == 0)
1757 break;
NeilBrown6a806c52005-07-15 03:56:35 -07001758 if (sync_blocks == 0) {
1759 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
NeilBrowne5de4852005-11-08 21:39:38 -08001760 &sync_blocks, still_degraded) &&
1761 !conf->fullsync &&
1762 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
NeilBrown6a806c52005-07-15 03:56:35 -07001763 break;
Eric Sesterhenn9e77c482006-04-01 01:08:49 +02001764 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
NeilBrown6a806c52005-07-15 03:56:35 -07001765 if (len > (sync_blocks<<9))
1766 len = sync_blocks<<9;
NeilBrownab7a30c2005-06-21 17:17:23 -07001767 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 for (i=0 ; i < conf->raid_disks; i++) {
1770 bio = r1_bio->bios[i];
1771 if (bio->bi_end_io) {
NeilBrownd11c1712006-01-06 00:20:26 -08001772 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 if (bio_add_page(bio, page, len, 0) == 0) {
1774 /* stop here */
NeilBrownd11c1712006-01-06 00:20:26 -08001775 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 while (i > 0) {
1777 i--;
1778 bio = r1_bio->bios[i];
NeilBrown6a806c52005-07-15 03:56:35 -07001779 if (bio->bi_end_io==NULL)
1780 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 /* remove last page from this bio */
1782 bio->bi_vcnt--;
1783 bio->bi_size -= len;
1784 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
1785 }
1786 goto bio_full;
1787 }
1788 }
1789 }
1790 nr_sectors += len>>9;
1791 sector_nr += len>>9;
NeilBrown191ea9b2005-06-21 17:17:23 -07001792 sync_blocks -= (len>>9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1794 bio_full:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 r1_bio->sectors = nr_sectors;
1796
NeilBrownd11c1712006-01-06 00:20:26 -08001797 /* For a user-requested sync, we read all readable devices and do a
1798 * compare
1799 */
1800 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1801 atomic_set(&r1_bio->remaining, read_targets);
1802 for (i=0; i<conf->raid_disks; i++) {
1803 bio = r1_bio->bios[i];
1804 if (bio->bi_end_io == end_sync_read) {
NeilBrownddac7c72006-08-31 21:27:36 -07001805 md_sync_acct(bio->bi_bdev, nr_sectors);
NeilBrownd11c1712006-01-06 00:20:26 -08001806 generic_make_request(bio);
1807 }
1808 }
1809 } else {
1810 atomic_set(&r1_bio->remaining, 1);
1811 bio = r1_bio->bios[r1_bio->read_disk];
NeilBrownddac7c72006-08-31 21:27:36 -07001812 md_sync_acct(bio->bi_bdev, nr_sectors);
NeilBrownd11c1712006-01-06 00:20:26 -08001813 generic_make_request(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
NeilBrownd11c1712006-01-06 00:20:26 -08001815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 return nr_sectors;
1817}
1818
1819static int run(mddev_t *mddev)
1820{
1821 conf_t *conf;
1822 int i, j, disk_idx;
1823 mirror_info_t *disk;
1824 mdk_rdev_t *rdev;
1825 struct list_head *tmp;
1826
1827 if (mddev->level != 1) {
1828 printk("raid1: %s: raid level not set to mirroring (%d)\n",
1829 mdname(mddev), mddev->level);
1830 goto out;
1831 }
NeilBrownf6705572006-03-27 01:18:11 -08001832 if (mddev->reshape_position != MaxSector) {
1833 printk("raid1: %s: reshape_position set but not supported\n",
1834 mdname(mddev));
1835 goto out;
1836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 /*
1838 * copy the already verified devices into our private RAID1
1839 * bookkeeping area. [whatever we allocate in run(),
1840 * should be freed in stop()]
1841 */
NeilBrown9ffae0c2006-01-06 00:20:32 -08001842 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 mddev->private = conf;
1844 if (!conf)
1845 goto out_no_mem;
1846
NeilBrown9ffae0c2006-01-06 00:20:32 -08001847 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 GFP_KERNEL);
1849 if (!conf->mirrors)
1850 goto out_no_mem;
1851
NeilBrownddaf22a2006-01-06 00:20:19 -08001852 conf->tmppage = alloc_page(GFP_KERNEL);
1853 if (!conf->tmppage)
1854 goto out_no_mem;
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1857 if (!conf->poolinfo)
1858 goto out_no_mem;
1859 conf->poolinfo->mddev = mddev;
1860 conf->poolinfo->raid_disks = mddev->raid_disks;
1861 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1862 r1bio_pool_free,
1863 conf->poolinfo);
1864 if (!conf->r1bio_pool)
1865 goto out_no_mem;
1866
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 ITERATE_RDEV(mddev, rdev, tmp) {
1868 disk_idx = rdev->raid_disk;
1869 if (disk_idx >= mddev->raid_disks
1870 || disk_idx < 0)
1871 continue;
1872 disk = conf->mirrors + disk_idx;
1873
1874 disk->rdev = rdev;
1875
1876 blk_queue_stack_limits(mddev->queue,
1877 rdev->bdev->bd_disk->queue);
1878 /* as we don't honour merge_bvec_fn, we must never risk
1879 * violating it, so limit ->max_sector to one PAGE, as
1880 * a one page request is never in violation.
1881 */
1882 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1883 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1884 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1885
1886 disk->head_position = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08001887 if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 conf->working_disks++;
1889 }
1890 conf->raid_disks = mddev->raid_disks;
1891 conf->mddev = mddev;
1892 spin_lock_init(&conf->device_lock);
1893 INIT_LIST_HEAD(&conf->retry_list);
1894 if (conf->working_disks == 1)
1895 mddev->recovery_cp = MaxSector;
1896
1897 spin_lock_init(&conf->resync_lock);
NeilBrown17999be2006-01-06 00:20:12 -08001898 init_waitqueue_head(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
NeilBrown191ea9b2005-06-21 17:17:23 -07001900 bio_list_init(&conf->pending_bio_list);
1901 bio_list_init(&conf->flushing_bio_list);
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 if (!conf->working_disks) {
1904 printk(KERN_ERR "raid1: no operational mirrors for %s\n",
1905 mdname(mddev));
1906 goto out_free_conf;
1907 }
1908
1909 mddev->degraded = 0;
1910 for (i = 0; i < conf->raid_disks; i++) {
1911
1912 disk = conf->mirrors + i;
1913
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001914 if (!disk->rdev ||
1915 !test_bit(In_sync, &disk->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 disk->head_position = 0;
1917 mddev->degraded++;
1918 }
1919 }
1920
1921 /*
1922 * find the first working one and use it as a starting point
1923 * to read balancing.
1924 */
1925 for (j = 0; j < conf->raid_disks &&
1926 (!conf->mirrors[j].rdev ||
NeilBrownb2d444d2005-11-08 21:39:31 -08001927 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 /* nothing */;
1929 conf->last_used = j;
1930
1931
NeilBrown191ea9b2005-06-21 17:17:23 -07001932 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
1933 if (!mddev->thread) {
1934 printk(KERN_ERR
1935 "raid1: couldn't allocate thread for %s\n",
1936 mdname(mddev));
1937 goto out_free_conf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
NeilBrown191ea9b2005-06-21 17:17:23 -07001939
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 printk(KERN_INFO
1941 "raid1: raid set %s active with %d out of %d mirrors\n",
1942 mdname(mddev), mddev->raid_disks - mddev->degraded,
1943 mddev->raid_disks);
1944 /*
1945 * Ok, everything is just fine now
1946 */
1947 mddev->array_size = mddev->size;
1948
NeilBrown7a5febe2005-05-16 21:53:16 -07001949 mddev->queue->unplug_fn = raid1_unplug;
1950 mddev->queue->issue_flush_fn = raid1_issue_flush;
1951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 return 0;
1953
1954out_no_mem:
1955 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
1956 mdname(mddev));
1957
1958out_free_conf:
1959 if (conf) {
1960 if (conf->r1bio_pool)
1961 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001962 kfree(conf->mirrors);
NeilBrown1345b1d2006-01-06 00:20:40 -08001963 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001964 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 kfree(conf);
1966 mddev->private = NULL;
1967 }
1968out:
1969 return -EIO;
1970}
1971
1972static int stop(mddev_t *mddev)
1973{
1974 conf_t *conf = mddev_to_conf(mddev);
NeilBrown4b6d2872005-09-09 16:23:47 -07001975 struct bitmap *bitmap = mddev->bitmap;
1976 int behind_wait = 0;
1977
1978 /* wait for behind writes to complete */
1979 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1980 behind_wait++;
1981 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
1982 set_current_state(TASK_UNINTERRUPTIBLE);
1983 schedule_timeout(HZ); /* wait a second */
1984 /* need to kick something here to make sure I/O goes? */
1985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987 md_unregister_thread(mddev->thread);
1988 mddev->thread = NULL;
1989 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1990 if (conf->r1bio_pool)
1991 mempool_destroy(conf->r1bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001992 kfree(conf->mirrors);
1993 kfree(conf->poolinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 kfree(conf);
1995 mddev->private = NULL;
1996 return 0;
1997}
1998
1999static int raid1_resize(mddev_t *mddev, sector_t sectors)
2000{
2001 /* no resync is happening, and there is enough space
2002 * on all devices, so we can resize.
2003 * We need to make sure resync covers any new space.
2004 * If the array is shrinking we should possibly wait until
2005 * any io in the removed space completes, but it hardly seems
2006 * worth it.
2007 */
2008 mddev->array_size = sectors>>1;
2009 set_capacity(mddev->gendisk, mddev->array_size << 1);
2010 mddev->changed = 1;
2011 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
2012 mddev->recovery_cp = mddev->size << 1;
2013 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2014 }
2015 mddev->size = mddev->array_size;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07002016 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 return 0;
2018}
2019
NeilBrown63c70c42006-03-27 01:18:13 -08002020static int raid1_reshape(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021{
2022 /* We need to:
2023 * 1/ resize the r1bio_pool
2024 * 2/ resize conf->mirrors
2025 *
2026 * We allocate a new r1bio_pool if we can.
2027 * Then raise a device barrier and wait until all IO stops.
2028 * Then resize conf->mirrors and swap in the new r1bio pool.
NeilBrown6ea9c072005-06-21 17:17:09 -07002029 *
2030 * At the same time, we "pack" the devices so that all the missing
2031 * devices have the higher raid_disk numbers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 */
2033 mempool_t *newpool, *oldpool;
2034 struct pool_info *newpoolinfo;
2035 mirror_info_t *newmirrors;
2036 conf_t *conf = mddev_to_conf(mddev);
NeilBrown63c70c42006-03-27 01:18:13 -08002037 int cnt, raid_disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
NeilBrown6ea9c072005-06-21 17:17:09 -07002039 int d, d2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
NeilBrown63c70c42006-03-27 01:18:13 -08002041 /* Cannot change chunk_size, layout, or level */
2042 if (mddev->chunk_size != mddev->new_chunk ||
2043 mddev->layout != mddev->new_layout ||
2044 mddev->level != mddev->new_level) {
2045 mddev->new_chunk = mddev->chunk_size;
2046 mddev->new_layout = mddev->layout;
2047 mddev->new_level = mddev->level;
2048 return -EINVAL;
2049 }
2050
2051 raid_disks = mddev->raid_disks + mddev->delta_disks;
2052
NeilBrown6ea9c072005-06-21 17:17:09 -07002053 if (raid_disks < conf->raid_disks) {
2054 cnt=0;
2055 for (d= 0; d < conf->raid_disks; d++)
2056 if (conf->mirrors[d].rdev)
2057 cnt++;
2058 if (cnt > raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 return -EBUSY;
NeilBrown6ea9c072005-06-21 17:17:09 -07002060 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
2062 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2063 if (!newpoolinfo)
2064 return -ENOMEM;
2065 newpoolinfo->mddev = mddev;
2066 newpoolinfo->raid_disks = raid_disks;
2067
2068 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2069 r1bio_pool_free, newpoolinfo);
2070 if (!newpool) {
2071 kfree(newpoolinfo);
2072 return -ENOMEM;
2073 }
NeilBrown9ffae0c2006-01-06 00:20:32 -08002074 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 if (!newmirrors) {
2076 kfree(newpoolinfo);
2077 mempool_destroy(newpool);
2078 return -ENOMEM;
2079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
NeilBrown17999be2006-01-06 00:20:12 -08002081 raise_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083 /* ok, everything is stopped */
2084 oldpool = conf->r1bio_pool;
2085 conf->r1bio_pool = newpool;
NeilBrown6ea9c072005-06-21 17:17:09 -07002086
2087 for (d=d2=0; d < conf->raid_disks; d++)
2088 if (conf->mirrors[d].rdev) {
2089 conf->mirrors[d].rdev->raid_disk = d2;
2090 newmirrors[d2++].rdev = conf->mirrors[d].rdev;
2091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 kfree(conf->mirrors);
2093 conf->mirrors = newmirrors;
2094 kfree(conf->poolinfo);
2095 conf->poolinfo = newpoolinfo;
2096
2097 mddev->degraded += (raid_disks - conf->raid_disks);
2098 conf->raid_disks = mddev->raid_disks = raid_disks;
NeilBrown63c70c42006-03-27 01:18:13 -08002099 mddev->delta_disks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
NeilBrown6ea9c072005-06-21 17:17:09 -07002101 conf->last_used = 0; /* just make sure it is in-range */
NeilBrown17999be2006-01-06 00:20:12 -08002102 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
2104 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2105 md_wakeup_thread(mddev->thread);
2106
2107 mempool_destroy(oldpool);
2108 return 0;
2109}
2110
NeilBrown500af872005-09-09 16:23:58 -07002111static void raid1_quiesce(mddev_t *mddev, int state)
NeilBrown36fa3062005-09-09 16:23:45 -07002112{
2113 conf_t *conf = mddev_to_conf(mddev);
2114
2115 switch(state) {
NeilBrown9e6603d2005-09-09 16:23:48 -07002116 case 1:
NeilBrown17999be2006-01-06 00:20:12 -08002117 raise_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002118 break;
NeilBrown9e6603d2005-09-09 16:23:48 -07002119 case 0:
NeilBrown17999be2006-01-06 00:20:12 -08002120 lower_barrier(conf);
NeilBrown36fa3062005-09-09 16:23:45 -07002121 break;
2122 }
NeilBrown36fa3062005-09-09 16:23:45 -07002123}
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
NeilBrown2604b702006-01-06 00:20:36 -08002126static struct mdk_personality raid1_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127{
2128 .name = "raid1",
NeilBrown2604b702006-01-06 00:20:36 -08002129 .level = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 .owner = THIS_MODULE,
2131 .make_request = make_request,
2132 .run = run,
2133 .stop = stop,
2134 .status = status,
2135 .error_handler = error,
2136 .hot_add_disk = raid1_add_disk,
2137 .hot_remove_disk= raid1_remove_disk,
2138 .spare_active = raid1_spare_active,
2139 .sync_request = sync_request,
2140 .resize = raid1_resize,
NeilBrown63c70c42006-03-27 01:18:13 -08002141 .check_reshape = raid1_reshape,
NeilBrown36fa3062005-09-09 16:23:45 -07002142 .quiesce = raid1_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143};
2144
2145static int __init raid_init(void)
2146{
NeilBrown2604b702006-01-06 00:20:36 -08002147 return register_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148}
2149
2150static void raid_exit(void)
2151{
NeilBrown2604b702006-01-06 00:20:36 -08002152 unregister_md_personality(&raid1_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153}
2154
2155module_init(raid_init);
2156module_exit(raid_exit);
2157MODULE_LICENSE("GPL");
2158MODULE_ALIAS("md-personality-3"); /* RAID1 */
NeilBrownd9d166c2006-01-06 00:20:51 -08002159MODULE_ALIAS("md-raid1");
NeilBrown2604b702006-01-06 00:20:36 -08002160MODULE_ALIAS("md-level-1");