blob: 8f58a447d9f0379404c6819a58fb3f0e5976d4ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c. See raid1.c for futher copyright information.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
NeilBrown6cce3b22006-01-06 00:20:16 -080021#include "dm-bio-list.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/raid/raid10.h>
NeilBrown6cce3b22006-01-06 00:20:16 -080023#include <linux/raid/bitmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25/*
26 * RAID10 provides a combination of RAID0 and RAID1 functionality.
27 * The layout of data is defined by
28 * chunk_size
29 * raid_disks
30 * near_copies (stored in low byte of layout)
31 * far_copies (stored in second byte of layout)
32 *
33 * The data to be stored is divided into chunks using chunksize.
34 * Each device is divided into far_copies sections.
35 * In each section, chunks are laid out in a style similar to raid0, but
36 * near_copies copies of each chunk is stored (each on a different drive).
37 * The starting device for each section is offset near_copies from the starting
38 * device of the previous section.
39 * Thus there are (near_copies*far_copies) of each chunk, and each is on a different
40 * drive.
41 * near_copies and far_copies must be at least one, and their product is at most
42 * raid_disks.
43 */
44
45/*
46 * Number of guaranteed r10bios in case of extreme VM load:
47 */
48#define NR_RAID10_BIOS 256
49
50static void unplug_slaves(mddev_t *mddev);
51
NeilBrown0a27ec92006-01-06 00:20:13 -080052static void allow_barrier(conf_t *conf);
53static void lower_barrier(conf_t *conf);
54
Al Virodd0fc662005-10-07 07:46:04 +010055static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
57 conf_t *conf = data;
58 r10bio_t *r10_bio;
59 int size = offsetof(struct r10bio_s, devs[conf->copies]);
60
61 /* allocate a r10bio with room for raid_disks entries in the bios array */
62 r10_bio = kmalloc(size, gfp_flags);
63 if (r10_bio)
64 memset(r10_bio, 0, size);
65 else
66 unplug_slaves(conf->mddev);
67
68 return r10_bio;
69}
70
71static void r10bio_pool_free(void *r10_bio, void *data)
72{
73 kfree(r10_bio);
74}
75
76#define RESYNC_BLOCK_SIZE (64*1024)
77//#define RESYNC_BLOCK_SIZE PAGE_SIZE
78#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
79#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
80#define RESYNC_WINDOW (2048*1024)
81
82/*
83 * When performing a resync, we need to read and compare, so
84 * we need as many pages are there are copies.
85 * When performing a recovery, we need 2 bios, one for read,
86 * one for write (we recover only one drive per r10buf)
87 *
88 */
Al Virodd0fc662005-10-07 07:46:04 +010089static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
91 conf_t *conf = data;
92 struct page *page;
93 r10bio_t *r10_bio;
94 struct bio *bio;
95 int i, j;
96 int nalloc;
97
98 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
99 if (!r10_bio) {
100 unplug_slaves(conf->mddev);
101 return NULL;
102 }
103
104 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
105 nalloc = conf->copies; /* resync */
106 else
107 nalloc = 2; /* recovery */
108
109 /*
110 * Allocate bios.
111 */
112 for (j = nalloc ; j-- ; ) {
113 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
114 if (!bio)
115 goto out_free_bio;
116 r10_bio->devs[j].bio = bio;
117 }
118 /*
119 * Allocate RESYNC_PAGES data pages and attach them
120 * where needed.
121 */
122 for (j = 0 ; j < nalloc; j++) {
123 bio = r10_bio->devs[j].bio;
124 for (i = 0; i < RESYNC_PAGES; i++) {
125 page = alloc_page(gfp_flags);
126 if (unlikely(!page))
127 goto out_free_pages;
128
129 bio->bi_io_vec[i].bv_page = page;
130 }
131 }
132
133 return r10_bio;
134
135out_free_pages:
136 for ( ; i > 0 ; i--)
137 __free_page(bio->bi_io_vec[i-1].bv_page);
138 while (j--)
139 for (i = 0; i < RESYNC_PAGES ; i++)
140 __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
141 j = -1;
142out_free_bio:
143 while ( ++j < nalloc )
144 bio_put(r10_bio->devs[j].bio);
145 r10bio_pool_free(r10_bio, conf);
146 return NULL;
147}
148
149static void r10buf_pool_free(void *__r10_bio, void *data)
150{
151 int i;
152 conf_t *conf = data;
153 r10bio_t *r10bio = __r10_bio;
154 int j;
155
156 for (j=0; j < conf->copies; j++) {
157 struct bio *bio = r10bio->devs[j].bio;
158 if (bio) {
159 for (i = 0; i < RESYNC_PAGES; i++) {
160 __free_page(bio->bi_io_vec[i].bv_page);
161 bio->bi_io_vec[i].bv_page = NULL;
162 }
163 bio_put(bio);
164 }
165 }
166 r10bio_pool_free(r10bio, conf);
167}
168
169static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
170{
171 int i;
172
173 for (i = 0; i < conf->copies; i++) {
174 struct bio **bio = & r10_bio->devs[i].bio;
175 if (*bio)
176 bio_put(*bio);
177 *bio = NULL;
178 }
179}
180
181static inline void free_r10bio(r10bio_t *r10_bio)
182{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 conf_t *conf = mddev_to_conf(r10_bio->mddev);
184
185 /*
186 * Wake up any possible resync thread that waits for the device
187 * to go idle.
188 */
NeilBrown0a27ec92006-01-06 00:20:13 -0800189 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 put_all_bios(conf, r10_bio);
192 mempool_free(r10_bio, conf->r10bio_pool);
193}
194
195static inline void put_buf(r10bio_t *r10_bio)
196{
197 conf_t *conf = mddev_to_conf(r10_bio->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 mempool_free(r10_bio, conf->r10buf_pool);
200
NeilBrown0a27ec92006-01-06 00:20:13 -0800201 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
204static void reschedule_retry(r10bio_t *r10_bio)
205{
206 unsigned long flags;
207 mddev_t *mddev = r10_bio->mddev;
208 conf_t *conf = mddev_to_conf(mddev);
209
210 spin_lock_irqsave(&conf->device_lock, flags);
211 list_add(&r10_bio->retry_list, &conf->retry_list);
212 spin_unlock_irqrestore(&conf->device_lock, flags);
213
214 md_wakeup_thread(mddev->thread);
215}
216
217/*
218 * raid_end_bio_io() is called when we have finished servicing a mirrored
219 * operation and are ready to return a success/failure code to the buffer
220 * cache layer.
221 */
222static void raid_end_bio_io(r10bio_t *r10_bio)
223{
224 struct bio *bio = r10_bio->master_bio;
225
226 bio_endio(bio, bio->bi_size,
227 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
228 free_r10bio(r10_bio);
229}
230
231/*
232 * Update disk head position estimator based on IRQ completion info.
233 */
234static inline void update_head_pos(int slot, r10bio_t *r10_bio)
235{
236 conf_t *conf = mddev_to_conf(r10_bio->mddev);
237
238 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
239 r10_bio->devs[slot].addr + (r10_bio->sectors);
240}
241
242static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
243{
244 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
245 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
246 int slot, dev;
247 conf_t *conf = mddev_to_conf(r10_bio->mddev);
248
249 if (bio->bi_size)
250 return 1;
251
252 slot = r10_bio->read_slot;
253 dev = r10_bio->devs[slot].devnum;
254 /*
255 * this branch is our 'one mirror IO has finished' event handler:
256 */
257 if (!uptodate)
258 md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
259 else
260 /*
261 * Set R10BIO_Uptodate in our master bio, so that
262 * we will return a good error code to the higher
263 * levels even if IO on some other mirrored buffer fails.
264 *
265 * The 'master' represents the composite IO operation to
266 * user-side. So if something waits for IO, then it will
267 * wait for the 'master' bio.
268 */
269 set_bit(R10BIO_Uptodate, &r10_bio->state);
270
271 update_head_pos(slot, r10_bio);
272
273 /*
274 * we have only one bio on the read side
275 */
276 if (uptodate)
277 raid_end_bio_io(r10_bio);
278 else {
279 /*
280 * oops, read error:
281 */
282 char b[BDEVNAME_SIZE];
283 if (printk_ratelimit())
284 printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
285 bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
286 reschedule_retry(r10_bio);
287 }
288
289 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
290 return 0;
291}
292
293static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
294{
295 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
296 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
297 int slot, dev;
298 conf_t *conf = mddev_to_conf(r10_bio->mddev);
299
300 if (bio->bi_size)
301 return 1;
302
303 for (slot = 0; slot < conf->copies; slot++)
304 if (r10_bio->devs[slot].bio == bio)
305 break;
306 dev = r10_bio->devs[slot].devnum;
307
308 /*
309 * this branch is our 'one mirror IO has finished' event handler:
310 */
NeilBrown6cce3b22006-01-06 00:20:16 -0800311 if (!uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
NeilBrown6cce3b22006-01-06 00:20:16 -0800313 /* an I/O failed, we can't clear the bitmap */
314 set_bit(R10BIO_Degraded, &r10_bio->state);
315 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 /*
317 * Set R10BIO_Uptodate in our master bio, so that
318 * we will return a good error code for to the higher
319 * levels even if IO on some other mirrored buffer fails.
320 *
321 * The 'master' represents the composite IO operation to
322 * user-side. So if something waits for IO, then it will
323 * wait for the 'master' bio.
324 */
325 set_bit(R10BIO_Uptodate, &r10_bio->state);
326
327 update_head_pos(slot, r10_bio);
328
329 /*
330 *
331 * Let's see if all mirrored write operations have finished
332 * already.
333 */
334 if (atomic_dec_and_test(&r10_bio->remaining)) {
NeilBrown6cce3b22006-01-06 00:20:16 -0800335 /* clear the bitmap if all writes complete successfully */
336 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
337 r10_bio->sectors,
338 !test_bit(R10BIO_Degraded, &r10_bio->state),
339 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 md_write_end(r10_bio->mddev);
341 raid_end_bio_io(r10_bio);
342 }
343
344 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
345 return 0;
346}
347
348
349/*
350 * RAID10 layout manager
351 * Aswell as the chunksize and raid_disks count, there are two
352 * parameters: near_copies and far_copies.
353 * near_copies * far_copies must be <= raid_disks.
354 * Normally one of these will be 1.
355 * If both are 1, we get raid0.
356 * If near_copies == raid_disks, we get raid1.
357 *
358 * Chunks are layed out in raid0 style with near_copies copies of the
359 * first chunk, followed by near_copies copies of the next chunk and
360 * so on.
361 * If far_copies > 1, then after 1/far_copies of the array has been assigned
362 * as described above, we start again with a device offset of near_copies.
363 * So we effectively have another copy of the whole array further down all
364 * the drives, but with blocks on different drives.
365 * With this layout, and block is never stored twice on the one device.
366 *
367 * raid10_find_phys finds the sector offset of a given virtual sector
368 * on each device that it is on. If a block isn't on a device,
369 * that entry in the array is set to MaxSector.
370 *
371 * raid10_find_virt does the reverse mapping, from a device and a
372 * sector offset to a virtual address
373 */
374
375static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
376{
377 int n,f;
378 sector_t sector;
379 sector_t chunk;
380 sector_t stripe;
381 int dev;
382
383 int slot = 0;
384
385 /* now calculate first sector/dev */
386 chunk = r10bio->sector >> conf->chunk_shift;
387 sector = r10bio->sector & conf->chunk_mask;
388
389 chunk *= conf->near_copies;
390 stripe = chunk;
391 dev = sector_div(stripe, conf->raid_disks);
392
393 sector += stripe << conf->chunk_shift;
394
395 /* and calculate all the others */
396 for (n=0; n < conf->near_copies; n++) {
397 int d = dev;
398 sector_t s = sector;
399 r10bio->devs[slot].addr = sector;
400 r10bio->devs[slot].devnum = d;
401 slot++;
402
403 for (f = 1; f < conf->far_copies; f++) {
404 d += conf->near_copies;
405 if (d >= conf->raid_disks)
406 d -= conf->raid_disks;
407 s += conf->stride;
408 r10bio->devs[slot].devnum = d;
409 r10bio->devs[slot].addr = s;
410 slot++;
411 }
412 dev++;
413 if (dev >= conf->raid_disks) {
414 dev = 0;
415 sector += (conf->chunk_mask + 1);
416 }
417 }
418 BUG_ON(slot != conf->copies);
419}
420
421static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
422{
423 sector_t offset, chunk, vchunk;
424
425 while (sector > conf->stride) {
426 sector -= conf->stride;
427 if (dev < conf->near_copies)
428 dev += conf->raid_disks - conf->near_copies;
429 else
430 dev -= conf->near_copies;
431 }
432
433 offset = sector & conf->chunk_mask;
434 chunk = sector >> conf->chunk_shift;
435 vchunk = chunk * conf->raid_disks + dev;
436 sector_div(vchunk, conf->near_copies);
437 return (vchunk << conf->chunk_shift) + offset;
438}
439
440/**
441 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
442 * @q: request queue
443 * @bio: the buffer head that's been built up so far
444 * @biovec: the request that could be merged to it.
445 *
446 * Return amount of bytes we can accept at this offset
447 * If near_copies == raid_disk, there are no striping issues,
448 * but in that case, the function isn't called at all.
449 */
450static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
451 struct bio_vec *bio_vec)
452{
453 mddev_t *mddev = q->queuedata;
454 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
455 int max;
456 unsigned int chunk_sectors = mddev->chunk_size >> 9;
457 unsigned int bio_sectors = bio->bi_size >> 9;
458
459 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
460 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
461 if (max <= bio_vec->bv_len && bio_sectors == 0)
462 return bio_vec->bv_len;
463 else
464 return max;
465}
466
467/*
468 * This routine returns the disk from which the requested read should
469 * be done. There is a per-array 'next expected sequential IO' sector
470 * number - if this matches on the next IO then we use the last disk.
471 * There is also a per-disk 'last know head position' sector that is
472 * maintained from IRQ contexts, both the normal and the resync IO
473 * completion handlers update this position correctly. If there is no
474 * perfect sequential match then we pick the disk whose head is closest.
475 *
476 * If there are 2 mirrors in the same 2 devices, performance degrades
477 * because position is mirror, not device based.
478 *
479 * The rdev for the device selected will have nr_pending incremented.
480 */
481
482/*
483 * FIXME: possibly should rethink readbalancing and do it differently
484 * depending on near_copies / far_copies geometry.
485 */
486static int read_balance(conf_t *conf, r10bio_t *r10_bio)
487{
488 const unsigned long this_sector = r10_bio->sector;
489 int disk, slot, nslot;
490 const int sectors = r10_bio->sectors;
491 sector_t new_distance, current_distance;
Suzanne Woodd6065f72005-11-08 21:39:27 -0800492 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494 raid10_find_phys(conf, r10_bio);
495 rcu_read_lock();
496 /*
497 * Check if we can balance. We can balance on the whole
NeilBrown6cce3b22006-01-06 00:20:16 -0800498 * device if no resync is going on (recovery is ok), or below
499 * the resync window. We take the first readable disk when
500 * above the resync window.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 */
502 if (conf->mddev->recovery_cp < MaxSector
503 && (this_sector + sectors >= conf->next_resync)) {
504 /* make sure that disk is operational */
505 slot = 0;
506 disk = r10_bio->devs[slot].devnum;
507
Suzanne Woodd6065f72005-11-08 21:39:27 -0800508 while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800509 !test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 slot++;
511 if (slot == conf->copies) {
512 slot = 0;
513 disk = -1;
514 break;
515 }
516 disk = r10_bio->devs[slot].devnum;
517 }
518 goto rb_out;
519 }
520
521
522 /* make sure the disk is operational */
523 slot = 0;
524 disk = r10_bio->devs[slot].devnum;
Suzanne Woodd6065f72005-11-08 21:39:27 -0800525 while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800526 !test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 slot ++;
528 if (slot == conf->copies) {
529 disk = -1;
530 goto rb_out;
531 }
532 disk = r10_bio->devs[slot].devnum;
533 }
534
535
NeilBrown3ec67ac2005-09-09 16:23:40 -0700536 current_distance = abs(r10_bio->devs[slot].addr -
537 conf->mirrors[disk].head_position);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539 /* Find the disk whose head is closest */
540
541 for (nslot = slot; nslot < conf->copies; nslot++) {
542 int ndisk = r10_bio->devs[nslot].devnum;
543
544
Suzanne Woodd6065f72005-11-08 21:39:27 -0800545 if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
NeilBrownb2d444d2005-11-08 21:39:31 -0800546 !test_bit(In_sync, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 continue;
548
NeilBrown22dfdf52005-11-28 13:44:09 -0800549 /* This optimisation is debatable, and completely destroys
550 * sequential read speed for 'far copies' arrays. So only
551 * keep it for 'near' arrays, and review those later.
552 */
553 if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 disk = ndisk;
555 slot = nslot;
556 break;
557 }
558 new_distance = abs(r10_bio->devs[nslot].addr -
559 conf->mirrors[ndisk].head_position);
560 if (new_distance < current_distance) {
561 current_distance = new_distance;
562 disk = ndisk;
563 slot = nslot;
564 }
565 }
566
567rb_out:
568 r10_bio->read_slot = slot;
569/* conf->next_seq_sect = this_sector + sectors;*/
570
Suzanne Woodd6065f72005-11-08 21:39:27 -0800571 if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
573 rcu_read_unlock();
574
575 return disk;
576}
577
578static void unplug_slaves(mddev_t *mddev)
579{
580 conf_t *conf = mddev_to_conf(mddev);
581 int i;
582
583 rcu_read_lock();
584 for (i=0; i<mddev->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800585 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800586 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
588
589 atomic_inc(&rdev->nr_pending);
590 rcu_read_unlock();
591
592 if (r_queue->unplug_fn)
593 r_queue->unplug_fn(r_queue);
594
595 rdev_dec_pending(rdev, mddev);
596 rcu_read_lock();
597 }
598 }
599 rcu_read_unlock();
600}
601
602static void raid10_unplug(request_queue_t *q)
603{
NeilBrown6cce3b22006-01-06 00:20:16 -0800604 mddev_t *mddev = q->queuedata;
605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 unplug_slaves(q->queuedata);
NeilBrown6cce3b22006-01-06 00:20:16 -0800607 md_wakeup_thread(mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
611 sector_t *error_sector)
612{
613 mddev_t *mddev = q->queuedata;
614 conf_t *conf = mddev_to_conf(mddev);
615 int i, ret = 0;
616
617 rcu_read_lock();
618 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800619 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -0800620 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 struct block_device *bdev = rdev->bdev;
622 request_queue_t *r_queue = bdev_get_queue(bdev);
623
624 if (!r_queue->issue_flush_fn)
625 ret = -EOPNOTSUPP;
626 else {
627 atomic_inc(&rdev->nr_pending);
628 rcu_read_unlock();
629 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
630 error_sector);
631 rdev_dec_pending(rdev, mddev);
632 rcu_read_lock();
633 }
634 }
635 }
636 rcu_read_unlock();
637 return ret;
638}
639
NeilBrown0a27ec92006-01-06 00:20:13 -0800640/* Barriers....
641 * Sometimes we need to suspend IO while we do something else,
642 * either some resync/recovery, or reconfigure the array.
643 * To do this we raise a 'barrier'.
644 * The 'barrier' is a counter that can be raised multiple times
645 * to count how many activities are happening which preclude
646 * normal IO.
647 * We can only raise the barrier if there is no pending IO.
648 * i.e. if nr_pending == 0.
649 * We choose only to raise the barrier if no-one is waiting for the
650 * barrier to go down. This means that as soon as an IO request
651 * is ready, no other operations which require a barrier will start
652 * until the IO request has had a chance.
653 *
654 * So: regular IO calls 'wait_barrier'. When that returns there
655 * is no backgroup IO happening, It must arrange to call
656 * allow_barrier when it has finished its IO.
657 * backgroup IO calls must call raise_barrier. Once that returns
658 * there is no normal IO happeing. It must arrange to call
659 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 */
661#define RESYNC_DEPTH 32
662
NeilBrown6cce3b22006-01-06 00:20:16 -0800663static void raise_barrier(conf_t *conf, int force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
NeilBrown6cce3b22006-01-06 00:20:16 -0800665 BUG_ON(force && !conf->barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 spin_lock_irq(&conf->resync_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
NeilBrown6cce3b22006-01-06 00:20:16 -0800668 /* Wait until no block IO is waiting (unless 'force') */
669 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
NeilBrown0a27ec92006-01-06 00:20:13 -0800670 conf->resync_lock,
671 raid10_unplug(conf->mddev->queue));
672
673 /* block any new IO from starting */
674 conf->barrier++;
675
676 /* No wait for all pending IO to complete */
677 wait_event_lock_irq(conf->wait_barrier,
678 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
679 conf->resync_lock,
680 raid10_unplug(conf->mddev->queue));
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 spin_unlock_irq(&conf->resync_lock);
683}
684
NeilBrown0a27ec92006-01-06 00:20:13 -0800685static void lower_barrier(conf_t *conf)
686{
687 unsigned long flags;
688 spin_lock_irqsave(&conf->resync_lock, flags);
689 conf->barrier--;
690 spin_unlock_irqrestore(&conf->resync_lock, flags);
691 wake_up(&conf->wait_barrier);
692}
693
694static void wait_barrier(conf_t *conf)
695{
696 spin_lock_irq(&conf->resync_lock);
697 if (conf->barrier) {
698 conf->nr_waiting++;
699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
700 conf->resync_lock,
701 raid10_unplug(conf->mddev->queue));
702 conf->nr_waiting--;
703 }
704 conf->nr_pending++;
705 spin_unlock_irq(&conf->resync_lock);
706}
707
708static void allow_barrier(conf_t *conf)
709{
710 unsigned long flags;
711 spin_lock_irqsave(&conf->resync_lock, flags);
712 conf->nr_pending--;
713 spin_unlock_irqrestore(&conf->resync_lock, flags);
714 wake_up(&conf->wait_barrier);
715}
716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717static int make_request(request_queue_t *q, struct bio * bio)
718{
719 mddev_t *mddev = q->queuedata;
720 conf_t *conf = mddev_to_conf(mddev);
721 mirror_info_t *mirror;
722 r10bio_t *r10_bio;
723 struct bio *read_bio;
724 int i;
725 int chunk_sects = conf->chunk_mask + 1;
Jens Axboea3623572005-11-01 09:26:16 +0100726 const int rw = bio_data_dir(bio);
NeilBrown6cce3b22006-01-06 00:20:16 -0800727 struct bio_list bl;
728 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
NeilBrowne5dcdd82005-09-09 16:23:41 -0700730 if (unlikely(bio_barrier(bio))) {
731 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
732 return 0;
733 }
734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 /* If this request crosses a chunk boundary, we need to
736 * split it. This will only happen for 1 PAGE (or less) requests.
737 */
738 if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
739 > chunk_sects &&
740 conf->near_copies < conf->raid_disks)) {
741 struct bio_pair *bp;
742 /* Sanity check -- queue functions should prevent this happening */
743 if (bio->bi_vcnt != 1 ||
744 bio->bi_idx != 0)
745 goto bad_map;
746 /* This is a one page bio that upper layers
747 * refuse to split for us, so we need to split it.
748 */
749 bp = bio_split(bio, bio_split_pool,
750 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
751 if (make_request(q, &bp->bio1))
752 generic_make_request(&bp->bio1);
753 if (make_request(q, &bp->bio2))
754 generic_make_request(&bp->bio2);
755
756 bio_pair_release(bp);
757 return 0;
758 bad_map:
759 printk("raid10_make_request bug: can't convert block across chunks"
760 " or bigger than %dk %llu %d\n", chunk_sects/2,
761 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
762
763 bio_io_error(bio, bio->bi_size);
764 return 0;
765 }
766
NeilBrown3d310eb2005-06-21 17:17:26 -0700767 md_write_start(mddev, bio);
NeilBrown06d91a52005-06-21 17:17:12 -0700768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 /*
770 * Register the new request and wait if the reconstruction
771 * thread has put up a bar for new requests.
772 * Continue immediately if no resync is active currently.
773 */
NeilBrown0a27ec92006-01-06 00:20:13 -0800774 wait_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Jens Axboea3623572005-11-01 09:26:16 +0100776 disk_stat_inc(mddev->gendisk, ios[rw]);
777 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
780
781 r10_bio->master_bio = bio;
782 r10_bio->sectors = bio->bi_size >> 9;
783
784 r10_bio->mddev = mddev;
785 r10_bio->sector = bio->bi_sector;
NeilBrown6cce3b22006-01-06 00:20:16 -0800786 r10_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
Jens Axboea3623572005-11-01 09:26:16 +0100788 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 /*
790 * read balancing logic:
791 */
792 int disk = read_balance(conf, r10_bio);
793 int slot = r10_bio->read_slot;
794 if (disk < 0) {
795 raid_end_bio_io(r10_bio);
796 return 0;
797 }
798 mirror = conf->mirrors + disk;
799
800 read_bio = bio_clone(bio, GFP_NOIO);
801
802 r10_bio->devs[slot].bio = read_bio;
803
804 read_bio->bi_sector = r10_bio->devs[slot].addr +
805 mirror->rdev->data_offset;
806 read_bio->bi_bdev = mirror->rdev->bdev;
807 read_bio->bi_end_io = raid10_end_read_request;
808 read_bio->bi_rw = READ;
809 read_bio->bi_private = r10_bio;
810
811 generic_make_request(read_bio);
812 return 0;
813 }
814
815 /*
816 * WRITE:
817 */
818 /* first select target devices under spinlock and
819 * inc refcount on their rdev. Record them by setting
820 * bios[x] to bio
821 */
822 raid10_find_phys(conf, r10_bio);
823 rcu_read_lock();
824 for (i = 0; i < conf->copies; i++) {
825 int d = r10_bio->devs[i].devnum;
Suzanne Woodd6065f72005-11-08 21:39:27 -0800826 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
827 if (rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -0800828 !test_bit(Faulty, &rdev->flags)) {
Suzanne Woodd6065f72005-11-08 21:39:27 -0800829 atomic_inc(&rdev->nr_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 r10_bio->devs[i].bio = bio;
NeilBrown6cce3b22006-01-06 00:20:16 -0800831 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 r10_bio->devs[i].bio = NULL;
NeilBrown6cce3b22006-01-06 00:20:16 -0800833 set_bit(R10BIO_Degraded, &r10_bio->state);
834 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836 rcu_read_unlock();
837
NeilBrown6cce3b22006-01-06 00:20:16 -0800838 atomic_set(&r10_bio->remaining, 0);
NeilBrown06d91a52005-06-21 17:17:12 -0700839
NeilBrown6cce3b22006-01-06 00:20:16 -0800840 bio_list_init(&bl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 for (i = 0; i < conf->copies; i++) {
842 struct bio *mbio;
843 int d = r10_bio->devs[i].devnum;
844 if (!r10_bio->devs[i].bio)
845 continue;
846
847 mbio = bio_clone(bio, GFP_NOIO);
848 r10_bio->devs[i].bio = mbio;
849
850 mbio->bi_sector = r10_bio->devs[i].addr+
851 conf->mirrors[d].rdev->data_offset;
852 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
853 mbio->bi_end_io = raid10_end_write_request;
854 mbio->bi_rw = WRITE;
855 mbio->bi_private = r10_bio;
856
857 atomic_inc(&r10_bio->remaining);
NeilBrown6cce3b22006-01-06 00:20:16 -0800858 bio_list_add(&bl, mbio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 }
860
NeilBrown6cce3b22006-01-06 00:20:16 -0800861 bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
862 spin_lock_irqsave(&conf->device_lock, flags);
863 bio_list_merge(&conf->pending_bio_list, &bl);
864 blk_plug_device(mddev->queue);
865 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867 return 0;
868}
869
870static void status(struct seq_file *seq, mddev_t *mddev)
871{
872 conf_t *conf = mddev_to_conf(mddev);
873 int i;
874
875 if (conf->near_copies < conf->raid_disks)
876 seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
877 if (conf->near_copies > 1)
878 seq_printf(seq, " %d near-copies", conf->near_copies);
879 if (conf->far_copies > 1)
880 seq_printf(seq, " %d far-copies", conf->far_copies);
881
882 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
883 conf->working_disks);
884 for (i = 0; i < conf->raid_disks; i++)
885 seq_printf(seq, "%s",
886 conf->mirrors[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -0800887 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 seq_printf(seq, "]");
889}
890
891static void error(mddev_t *mddev, mdk_rdev_t *rdev)
892{
893 char b[BDEVNAME_SIZE];
894 conf_t *conf = mddev_to_conf(mddev);
895
896 /*
897 * If it is not operational, then we have already marked it as dead
898 * else if it is the last working disks, ignore the error, let the
899 * next level up know.
900 * else mark the drive as failed
901 */
NeilBrownb2d444d2005-11-08 21:39:31 -0800902 if (test_bit(In_sync, &rdev->flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 && conf->working_disks == 1)
904 /*
905 * Don't fail the drive, just return an IO error.
906 * The test should really be more sophisticated than
907 * "working_disks == 1", but it isn't critical, and
908 * can wait until we do more sophisticated "is the drive
909 * really dead" tests...
910 */
911 return;
NeilBrownb2d444d2005-11-08 21:39:31 -0800912 if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 mddev->degraded++;
914 conf->working_disks--;
915 /*
916 * if recovery is running, make sure it aborts.
917 */
918 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
919 }
NeilBrownb2d444d2005-11-08 21:39:31 -0800920 clear_bit(In_sync, &rdev->flags);
921 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 mddev->sb_dirty = 1;
923 printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n"
924 " Operation continuing on %d devices\n",
925 bdevname(rdev->bdev,b), conf->working_disks);
926}
927
928static void print_conf(conf_t *conf)
929{
930 int i;
931 mirror_info_t *tmp;
932
933 printk("RAID10 conf printout:\n");
934 if (!conf) {
935 printk("(!conf)\n");
936 return;
937 }
938 printk(" --- wd:%d rd:%d\n", conf->working_disks,
939 conf->raid_disks);
940
941 for (i = 0; i < conf->raid_disks; i++) {
942 char b[BDEVNAME_SIZE];
943 tmp = conf->mirrors + i;
944 if (tmp->rdev)
945 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownb2d444d2005-11-08 21:39:31 -0800946 i, !test_bit(In_sync, &tmp->rdev->flags),
947 !test_bit(Faulty, &tmp->rdev->flags),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 bdevname(tmp->rdev->bdev,b));
949 }
950}
951
952static void close_sync(conf_t *conf)
953{
NeilBrown0a27ec92006-01-06 00:20:13 -0800954 wait_barrier(conf);
955 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 mempool_destroy(conf->r10buf_pool);
958 conf->r10buf_pool = NULL;
959}
960
NeilBrown6d508242005-09-09 16:24:03 -0700961/* check if there are enough drives for
962 * every block to appear on atleast one
963 */
964static int enough(conf_t *conf)
965{
966 int first = 0;
967
968 do {
969 int n = conf->copies;
970 int cnt = 0;
971 while (n--) {
972 if (conf->mirrors[first].rdev)
973 cnt++;
974 first = (first+1) % conf->raid_disks;
975 }
976 if (cnt == 0)
977 return 0;
978 } while (first != 0);
979 return 1;
980}
981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982static int raid10_spare_active(mddev_t *mddev)
983{
984 int i;
985 conf_t *conf = mddev->private;
986 mirror_info_t *tmp;
987
988 /*
989 * Find all non-in_sync disks within the RAID10 configuration
990 * and mark them in_sync
991 */
992 for (i = 0; i < conf->raid_disks; i++) {
993 tmp = conf->mirrors + i;
994 if (tmp->rdev
NeilBrownb2d444d2005-11-08 21:39:31 -0800995 && !test_bit(Faulty, &tmp->rdev->flags)
996 && !test_bit(In_sync, &tmp->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 conf->working_disks++;
998 mddev->degraded--;
NeilBrownb2d444d2005-11-08 21:39:31 -0800999 set_bit(In_sync, &tmp->rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 }
1001 }
1002
1003 print_conf(conf);
1004 return 0;
1005}
1006
1007
1008static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1009{
1010 conf_t *conf = mddev->private;
1011 int found = 0;
1012 int mirror;
1013 mirror_info_t *p;
1014
1015 if (mddev->recovery_cp < MaxSector)
1016 /* only hot-add to in-sync arrays, as recovery is
1017 * very different from resync
1018 */
1019 return 0;
NeilBrown6d508242005-09-09 16:24:03 -07001020 if (!enough(conf))
1021 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
NeilBrown6cce3b22006-01-06 00:20:16 -08001023 if (rdev->saved_raid_disk >= 0 &&
1024 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1025 mirror = rdev->saved_raid_disk;
1026 else
1027 mirror = 0;
1028 for ( ; mirror < mddev->raid_disks; mirror++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 if ( !(p=conf->mirrors+mirror)->rdev) {
1030
1031 blk_queue_stack_limits(mddev->queue,
1032 rdev->bdev->bd_disk->queue);
1033 /* as we don't honour merge_bvec_fn, we must never risk
1034 * violating it, so limit ->max_sector to one PAGE, as
1035 * a one page request is never in violation.
1036 */
1037 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1038 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1039 mddev->queue->max_sectors = (PAGE_SIZE>>9);
1040
1041 p->head_position = 0;
1042 rdev->raid_disk = mirror;
1043 found = 1;
NeilBrown6cce3b22006-01-06 00:20:16 -08001044 if (rdev->saved_raid_disk != mirror)
1045 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08001046 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 break;
1048 }
1049
1050 print_conf(conf);
1051 return found;
1052}
1053
1054static int raid10_remove_disk(mddev_t *mddev, int number)
1055{
1056 conf_t *conf = mddev->private;
1057 int err = 0;
1058 mdk_rdev_t *rdev;
1059 mirror_info_t *p = conf->mirrors+ number;
1060
1061 print_conf(conf);
1062 rdev = p->rdev;
1063 if (rdev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001064 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 atomic_read(&rdev->nr_pending)) {
1066 err = -EBUSY;
1067 goto abort;
1068 }
1069 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07001070 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 if (atomic_read(&rdev->nr_pending)) {
1072 /* lost the race, try later */
1073 err = -EBUSY;
1074 p->rdev = rdev;
1075 }
1076 }
1077abort:
1078
1079 print_conf(conf);
1080 return err;
1081}
1082
1083
1084static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1085{
1086 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1087 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1088 conf_t *conf = mddev_to_conf(r10_bio->mddev);
1089 int i,d;
1090
1091 if (bio->bi_size)
1092 return 1;
1093
1094 for (i=0; i<conf->copies; i++)
1095 if (r10_bio->devs[i].bio == bio)
1096 break;
1097 if (i == conf->copies)
1098 BUG();
1099 update_head_pos(i, r10_bio);
1100 d = r10_bio->devs[i].devnum;
1101 if (!uptodate)
1102 md_error(r10_bio->mddev,
1103 conf->mirrors[d].rdev);
1104
1105 /* for reconstruct, we always reschedule after a read.
1106 * for resync, only after all reads
1107 */
1108 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1109 atomic_dec_and_test(&r10_bio->remaining)) {
1110 /* we have read all the blocks,
1111 * do the comparison in process context in raid10d
1112 */
1113 reschedule_retry(r10_bio);
1114 }
1115 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1116 return 0;
1117}
1118
1119static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1120{
1121 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1122 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1123 mddev_t *mddev = r10_bio->mddev;
1124 conf_t *conf = mddev_to_conf(mddev);
1125 int i,d;
1126
1127 if (bio->bi_size)
1128 return 1;
1129
1130 for (i = 0; i < conf->copies; i++)
1131 if (r10_bio->devs[i].bio == bio)
1132 break;
1133 d = r10_bio->devs[i].devnum;
1134
1135 if (!uptodate)
1136 md_error(mddev, conf->mirrors[d].rdev);
1137 update_head_pos(i, r10_bio);
1138
1139 while (atomic_dec_and_test(&r10_bio->remaining)) {
1140 if (r10_bio->master_bio == NULL) {
1141 /* the primary of several recovery bios */
1142 md_done_sync(mddev, r10_bio->sectors, 1);
1143 put_buf(r10_bio);
1144 break;
1145 } else {
1146 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1147 put_buf(r10_bio);
1148 r10_bio = r10_bio2;
1149 }
1150 }
1151 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1152 return 0;
1153}
1154
1155/*
1156 * Note: sync and recover and handled very differently for raid10
1157 * This code is for resync.
1158 * For resync, we read through virtual addresses and read all blocks.
1159 * If there is any error, we schedule a write. The lowest numbered
1160 * drive is authoritative.
1161 * However requests come for physical address, so we need to map.
1162 * For every physical address there are raid_disks/copies virtual addresses,
1163 * which is always are least one, but is not necessarly an integer.
1164 * This means that a physical address can span multiple chunks, so we may
1165 * have to submit multiple io requests for a single sync request.
1166 */
1167/*
1168 * We check if all blocks are in-sync and only write to blocks that
1169 * aren't in sync
1170 */
1171static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1172{
1173 conf_t *conf = mddev_to_conf(mddev);
1174 int i, first;
1175 struct bio *tbio, *fbio;
1176
1177 atomic_set(&r10_bio->remaining, 1);
1178
1179 /* find the first device with a block */
1180 for (i=0; i<conf->copies; i++)
1181 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1182 break;
1183
1184 if (i == conf->copies)
1185 goto done;
1186
1187 first = i;
1188 fbio = r10_bio->devs[i].bio;
1189
1190 /* now find blocks with errors */
1191 for (i=first+1 ; i < conf->copies ; i++) {
1192 int vcnt, j, d;
1193
1194 if (!test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1195 continue;
1196 /* We know that the bi_io_vec layout is the same for
1197 * both 'first' and 'i', so we just compare them.
1198 * All vec entries are PAGE_SIZE;
1199 */
1200 tbio = r10_bio->devs[i].bio;
1201 vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1202 for (j = 0; j < vcnt; j++)
1203 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1204 page_address(tbio->bi_io_vec[j].bv_page),
1205 PAGE_SIZE))
1206 break;
1207 if (j == vcnt)
1208 continue;
1209 /* Ok, we need to write this bio
1210 * First we need to fixup bv_offset, bv_len and
1211 * bi_vecs, as the read request might have corrupted these
1212 */
1213 tbio->bi_vcnt = vcnt;
1214 tbio->bi_size = r10_bio->sectors << 9;
1215 tbio->bi_idx = 0;
1216 tbio->bi_phys_segments = 0;
1217 tbio->bi_hw_segments = 0;
1218 tbio->bi_hw_front_size = 0;
1219 tbio->bi_hw_back_size = 0;
1220 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1221 tbio->bi_flags |= 1 << BIO_UPTODATE;
1222 tbio->bi_next = NULL;
1223 tbio->bi_rw = WRITE;
1224 tbio->bi_private = r10_bio;
1225 tbio->bi_sector = r10_bio->devs[i].addr;
1226
1227 for (j=0; j < vcnt ; j++) {
1228 tbio->bi_io_vec[j].bv_offset = 0;
1229 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1230
1231 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1232 page_address(fbio->bi_io_vec[j].bv_page),
1233 PAGE_SIZE);
1234 }
1235 tbio->bi_end_io = end_sync_write;
1236
1237 d = r10_bio->devs[i].devnum;
1238 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1239 atomic_inc(&r10_bio->remaining);
1240 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1241
1242 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1243 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1244 generic_make_request(tbio);
1245 }
1246
1247done:
1248 if (atomic_dec_and_test(&r10_bio->remaining)) {
1249 md_done_sync(mddev, r10_bio->sectors, 1);
1250 put_buf(r10_bio);
1251 }
1252}
1253
1254/*
1255 * Now for the recovery code.
1256 * Recovery happens across physical sectors.
1257 * We recover all non-is_sync drives by finding the virtual address of
1258 * each, and then choose a working drive that also has that virt address.
1259 * There is a separate r10_bio for each non-in_sync drive.
1260 * Only the first two slots are in use. The first for reading,
1261 * The second for writing.
1262 *
1263 */
1264
1265static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1266{
1267 conf_t *conf = mddev_to_conf(mddev);
1268 int i, d;
1269 struct bio *bio, *wbio;
1270
1271
1272 /* move the pages across to the second bio
1273 * and submit the write request
1274 */
1275 bio = r10_bio->devs[0].bio;
1276 wbio = r10_bio->devs[1].bio;
1277 for (i=0; i < wbio->bi_vcnt; i++) {
1278 struct page *p = bio->bi_io_vec[i].bv_page;
1279 bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1280 wbio->bi_io_vec[i].bv_page = p;
1281 }
1282 d = r10_bio->devs[1].devnum;
1283
1284 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1285 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1286 generic_make_request(wbio);
1287}
1288
1289
1290/*
1291 * This is a kernel thread which:
1292 *
1293 * 1. Retries failed read operations on working mirrors.
1294 * 2. Updates the raid superblock when problems encounter.
1295 * 3. Performs writes following reads for array syncronising.
1296 */
1297
1298static void raid10d(mddev_t *mddev)
1299{
1300 r10bio_t *r10_bio;
1301 struct bio *bio;
1302 unsigned long flags;
1303 conf_t *conf = mddev_to_conf(mddev);
1304 struct list_head *head = &conf->retry_list;
1305 int unplug=0;
1306 mdk_rdev_t *rdev;
1307
1308 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
1310 for (;;) {
1311 char b[BDEVNAME_SIZE];
1312 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrown6cce3b22006-01-06 00:20:16 -08001313
1314 if (conf->pending_bio_list.head) {
1315 bio = bio_list_get(&conf->pending_bio_list);
1316 blk_remove_plug(mddev->queue);
1317 spin_unlock_irqrestore(&conf->device_lock, flags);
1318 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1319 if (bitmap_unplug(mddev->bitmap) != 0)
1320 printk("%s: bitmap file write failed!\n", mdname(mddev));
1321
1322 while (bio) { /* submit pending writes */
1323 struct bio *next = bio->bi_next;
1324 bio->bi_next = NULL;
1325 generic_make_request(bio);
1326 bio = next;
1327 }
1328 unplug = 1;
1329
1330 continue;
1331 }
1332
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 if (list_empty(head))
1334 break;
1335 r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1336 list_del(head->prev);
1337 spin_unlock_irqrestore(&conf->device_lock, flags);
1338
1339 mddev = r10_bio->mddev;
1340 conf = mddev_to_conf(mddev);
1341 if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1342 sync_request_write(mddev, r10_bio);
1343 unplug = 1;
1344 } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1345 recovery_request_write(mddev, r10_bio);
1346 unplug = 1;
1347 } else {
1348 int mirror;
1349 bio = r10_bio->devs[r10_bio->read_slot].bio;
1350 r10_bio->devs[r10_bio->read_slot].bio = NULL;
1351 bio_put(bio);
1352 mirror = read_balance(conf, r10_bio);
1353 if (mirror == -1) {
1354 printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
1355 " read error for block %llu\n",
1356 bdevname(bio->bi_bdev,b),
1357 (unsigned long long)r10_bio->sector);
1358 raid_end_bio_io(r10_bio);
1359 } else {
1360 rdev = conf->mirrors[mirror].rdev;
1361 if (printk_ratelimit())
1362 printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
1363 " another mirror\n",
1364 bdevname(rdev->bdev,b),
1365 (unsigned long long)r10_bio->sector);
1366 bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1367 r10_bio->devs[r10_bio->read_slot].bio = bio;
1368 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1369 + rdev->data_offset;
1370 bio->bi_bdev = rdev->bdev;
1371 bio->bi_rw = READ;
1372 bio->bi_private = r10_bio;
1373 bio->bi_end_io = raid10_end_read_request;
1374 unplug = 1;
1375 generic_make_request(bio);
1376 }
1377 }
1378 }
1379 spin_unlock_irqrestore(&conf->device_lock, flags);
1380 if (unplug)
1381 unplug_slaves(mddev);
1382}
1383
1384
1385static int init_resync(conf_t *conf)
1386{
1387 int buffs;
1388
1389 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1390 if (conf->r10buf_pool)
1391 BUG();
1392 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1393 if (!conf->r10buf_pool)
1394 return -ENOMEM;
1395 conf->next_resync = 0;
1396 return 0;
1397}
1398
1399/*
1400 * perform a "sync" on one "block"
1401 *
1402 * We need to make sure that no normal I/O request - particularly write
1403 * requests - conflict with active sync requests.
1404 *
1405 * This is achieved by tracking pending requests and a 'barrier' concept
1406 * that can be installed to exclude normal IO requests.
1407 *
1408 * Resync and recovery are handled very differently.
1409 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1410 *
1411 * For resync, we iterate over virtual addresses, read all copies,
1412 * and update if there are differences. If only one copy is live,
1413 * skip it.
1414 * For recovery, we iterate over physical addresses, read a good
1415 * value for each non-in_sync drive, and over-write.
1416 *
1417 * So, for recovery we may have several outstanding complex requests for a
1418 * given address, one for each out-of-sync device. We model this by allocating
1419 * a number of r10_bio structures, one for each out-of-sync device.
1420 * As we setup these structures, we collect all bio's together into a list
1421 * which we then process collectively to add pages, and then process again
1422 * to pass to generic_make_request.
1423 *
1424 * The r10_bio structures are linked using a borrowed master_bio pointer.
1425 * This link is counted in ->remaining. When the r10_bio that points to NULL
1426 * has its remaining count decremented to 0, the whole complex operation
1427 * is complete.
1428 *
1429 */
1430
NeilBrown57afd892005-06-21 17:17:13 -07001431static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432{
1433 conf_t *conf = mddev_to_conf(mddev);
1434 r10bio_t *r10_bio;
1435 struct bio *biolist = NULL, *bio;
1436 sector_t max_sector, nr_sectors;
1437 int disk;
1438 int i;
NeilBrown6cce3b22006-01-06 00:20:16 -08001439 int max_sync;
1440 int sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
1442 sector_t sectors_skipped = 0;
1443 int chunks_skipped = 0;
1444
1445 if (!conf->r10buf_pool)
1446 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07001447 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 skipped:
1450 max_sector = mddev->size << 1;
1451 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1452 max_sector = mddev->resync_max_sectors;
1453 if (sector_nr >= max_sector) {
NeilBrown6cce3b22006-01-06 00:20:16 -08001454 /* If we aborted, we need to abort the
1455 * sync on the 'current' bitmap chucks (there can
1456 * be several when recovering multiple devices).
1457 * as we may have started syncing it but not finished.
1458 * We can find the current address in
1459 * mddev->curr_resync, but for recovery,
1460 * we need to convert that to several
1461 * virtual addresses.
1462 */
1463 if (mddev->curr_resync < max_sector) { /* aborted */
1464 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1465 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1466 &sync_blocks, 1);
1467 else for (i=0; i<conf->raid_disks; i++) {
1468 sector_t sect =
1469 raid10_find_virt(conf, mddev->curr_resync, i);
1470 bitmap_end_sync(mddev->bitmap, sect,
1471 &sync_blocks, 1);
1472 }
1473 } else /* completed sync */
1474 conf->fullsync = 0;
1475
1476 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 close_sync(conf);
NeilBrown57afd892005-06-21 17:17:13 -07001478 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return sectors_skipped;
1480 }
1481 if (chunks_skipped >= conf->raid_disks) {
1482 /* if there has been nothing to do on any drive,
1483 * then there is nothing to do at all..
1484 */
NeilBrown57afd892005-06-21 17:17:13 -07001485 *skipped = 1;
1486 return (max_sector - sector_nr) + sectors_skipped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 }
1488
1489 /* make sure whole request will fit in a chunk - if chunks
1490 * are meaningful
1491 */
1492 if (conf->near_copies < conf->raid_disks &&
1493 max_sector > (sector_nr | conf->chunk_mask))
1494 max_sector = (sector_nr | conf->chunk_mask) + 1;
1495 /*
1496 * If there is non-resync activity waiting for us then
1497 * put in a delay to throttle resync.
1498 */
NeilBrown0a27ec92006-01-06 00:20:13 -08001499 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 msleep_interruptible(1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
1502 /* Again, very different code for resync and recovery.
1503 * Both must result in an r10bio with a list of bios that
1504 * have bi_end_io, bi_sector, bi_bdev set,
1505 * and bi_private set to the r10bio.
1506 * For recovery, we may actually create several r10bios
1507 * with 2 bios in each, that correspond to the bios in the main one.
1508 * In this case, the subordinate r10bios link back through a
1509 * borrowed master_bio pointer, and the counter in the master
1510 * includes a ref from each subordinate.
1511 */
1512 /* First, we decide what to do and set ->bi_end_io
1513 * To end_sync_read if we want to read, and
1514 * end_sync_write if we will want to write.
1515 */
1516
NeilBrown6cce3b22006-01-06 00:20:16 -08001517 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1519 /* recovery... the complicated one */
1520 int i, j, k;
1521 r10_bio = NULL;
1522
1523 for (i=0 ; i<conf->raid_disks; i++)
1524 if (conf->mirrors[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -08001525 !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
NeilBrown6cce3b22006-01-06 00:20:16 -08001526 int still_degraded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 /* want to reconstruct this device */
1528 r10bio_t *rb2 = r10_bio;
NeilBrown6cce3b22006-01-06 00:20:16 -08001529 sector_t sect = raid10_find_virt(conf, sector_nr, i);
1530 int must_sync;
1531 /* Unless we are doing a full sync, we only need
1532 * to recover the block if it is set in the bitmap
1533 */
1534 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1535 &sync_blocks, 1);
1536 if (sync_blocks < max_sync)
1537 max_sync = sync_blocks;
1538 if (!must_sync &&
1539 !conf->fullsync) {
1540 /* yep, skip the sync_blocks here, but don't assume
1541 * that there will never be anything to do here
1542 */
1543 chunks_skipped = -1;
1544 continue;
1545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
1547 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
NeilBrown6cce3b22006-01-06 00:20:16 -08001548 raise_barrier(conf, rb2 != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 atomic_set(&r10_bio->remaining, 0);
1550
1551 r10_bio->master_bio = (struct bio*)rb2;
1552 if (rb2)
1553 atomic_inc(&rb2->remaining);
1554 r10_bio->mddev = mddev;
1555 set_bit(R10BIO_IsRecover, &r10_bio->state);
NeilBrown6cce3b22006-01-06 00:20:16 -08001556 r10_bio->sector = sect;
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 raid10_find_phys(conf, r10_bio);
NeilBrown6cce3b22006-01-06 00:20:16 -08001559 /* Need to check if this section will still be
1560 * degraded
1561 */
1562 for (j=0; j<conf->copies;j++) {
1563 int d = r10_bio->devs[j].devnum;
1564 if (conf->mirrors[d].rdev == NULL ||
1565 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
1566 still_degraded = 1;
1567 }
1568 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1569 &sync_blocks, still_degraded);
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 for (j=0; j<conf->copies;j++) {
1572 int d = r10_bio->devs[j].devnum;
1573 if (conf->mirrors[d].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -08001574 test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 /* This is where we read from */
1576 bio = r10_bio->devs[0].bio;
1577 bio->bi_next = biolist;
1578 biolist = bio;
1579 bio->bi_private = r10_bio;
1580 bio->bi_end_io = end_sync_read;
1581 bio->bi_rw = 0;
1582 bio->bi_sector = r10_bio->devs[j].addr +
1583 conf->mirrors[d].rdev->data_offset;
1584 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1585 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1586 atomic_inc(&r10_bio->remaining);
1587 /* and we write to 'i' */
1588
1589 for (k=0; k<conf->copies; k++)
1590 if (r10_bio->devs[k].devnum == i)
1591 break;
1592 bio = r10_bio->devs[1].bio;
1593 bio->bi_next = biolist;
1594 biolist = bio;
1595 bio->bi_private = r10_bio;
1596 bio->bi_end_io = end_sync_write;
1597 bio->bi_rw = 1;
1598 bio->bi_sector = r10_bio->devs[k].addr +
1599 conf->mirrors[i].rdev->data_offset;
1600 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1601
1602 r10_bio->devs[0].devnum = d;
1603 r10_bio->devs[1].devnum = i;
1604
1605 break;
1606 }
1607 }
1608 if (j == conf->copies) {
NeilBrown87fc7672005-09-09 16:24:04 -07001609 /* Cannot recover, so abort the recovery */
1610 put_buf(r10_bio);
1611 r10_bio = rb2;
1612 if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery))
1613 printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
1614 mdname(mddev));
1615 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 }
1617 }
1618 if (biolist == NULL) {
1619 while (r10_bio) {
1620 r10bio_t *rb2 = r10_bio;
1621 r10_bio = (r10bio_t*) rb2->master_bio;
1622 rb2->master_bio = NULL;
1623 put_buf(rb2);
1624 }
1625 goto giveup;
1626 }
1627 } else {
1628 /* resync. Schedule a read for every block at this virt offset */
1629 int count = 0;
NeilBrown6cce3b22006-01-06 00:20:16 -08001630
1631 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1632 &sync_blocks, mddev->degraded) &&
1633 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1634 /* We can skip this block */
1635 *skipped = 1;
1636 return sync_blocks + sectors_skipped;
1637 }
1638 if (sync_blocks < max_sync)
1639 max_sync = sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 r10_bio->mddev = mddev;
1643 atomic_set(&r10_bio->remaining, 0);
NeilBrown6cce3b22006-01-06 00:20:16 -08001644 raise_barrier(conf, 0);
1645 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
1647 r10_bio->master_bio = NULL;
1648 r10_bio->sector = sector_nr;
1649 set_bit(R10BIO_IsSync, &r10_bio->state);
1650 raid10_find_phys(conf, r10_bio);
1651 r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
1652
1653 for (i=0; i<conf->copies; i++) {
1654 int d = r10_bio->devs[i].devnum;
1655 bio = r10_bio->devs[i].bio;
1656 bio->bi_end_io = NULL;
1657 if (conf->mirrors[d].rdev == NULL ||
NeilBrownb2d444d2005-11-08 21:39:31 -08001658 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 continue;
1660 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1661 atomic_inc(&r10_bio->remaining);
1662 bio->bi_next = biolist;
1663 biolist = bio;
1664 bio->bi_private = r10_bio;
1665 bio->bi_end_io = end_sync_read;
1666 bio->bi_rw = 0;
1667 bio->bi_sector = r10_bio->devs[i].addr +
1668 conf->mirrors[d].rdev->data_offset;
1669 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1670 count++;
1671 }
1672
1673 if (count < 2) {
1674 for (i=0; i<conf->copies; i++) {
1675 int d = r10_bio->devs[i].devnum;
1676 if (r10_bio->devs[i].bio->bi_end_io)
1677 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1678 }
1679 put_buf(r10_bio);
1680 biolist = NULL;
1681 goto giveup;
1682 }
1683 }
1684
1685 for (bio = biolist; bio ; bio=bio->bi_next) {
1686
1687 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1688 if (bio->bi_end_io)
1689 bio->bi_flags |= 1 << BIO_UPTODATE;
1690 bio->bi_vcnt = 0;
1691 bio->bi_idx = 0;
1692 bio->bi_phys_segments = 0;
1693 bio->bi_hw_segments = 0;
1694 bio->bi_size = 0;
1695 }
1696
1697 nr_sectors = 0;
NeilBrown6cce3b22006-01-06 00:20:16 -08001698 if (sector_nr + max_sync < max_sector)
1699 max_sector = sector_nr + max_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 do {
1701 struct page *page;
1702 int len = PAGE_SIZE;
1703 disk = 0;
1704 if (sector_nr + (len>>9) > max_sector)
1705 len = (max_sector - sector_nr) << 9;
1706 if (len == 0)
1707 break;
1708 for (bio= biolist ; bio ; bio=bio->bi_next) {
1709 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1710 if (bio_add_page(bio, page, len, 0) == 0) {
1711 /* stop here */
1712 struct bio *bio2;
1713 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1714 for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
1715 /* remove last page from this bio */
1716 bio2->bi_vcnt--;
1717 bio2->bi_size -= len;
1718 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
1719 }
1720 goto bio_full;
1721 }
1722 disk = i;
1723 }
1724 nr_sectors += len>>9;
1725 sector_nr += len>>9;
1726 } while (biolist->bi_vcnt < RESYNC_PAGES);
1727 bio_full:
1728 r10_bio->sectors = nr_sectors;
1729
1730 while (biolist) {
1731 bio = biolist;
1732 biolist = biolist->bi_next;
1733
1734 bio->bi_next = NULL;
1735 r10_bio = bio->bi_private;
1736 r10_bio->sectors = nr_sectors;
1737
1738 if (bio->bi_end_io == end_sync_read) {
1739 md_sync_acct(bio->bi_bdev, nr_sectors);
1740 generic_make_request(bio);
1741 }
1742 }
1743
NeilBrown57afd892005-06-21 17:17:13 -07001744 if (sectors_skipped)
1745 /* pretend they weren't skipped, it makes
1746 * no important difference in this case
1747 */
1748 md_done_sync(mddev, sectors_skipped, 1);
1749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 return sectors_skipped + nr_sectors;
1751 giveup:
1752 /* There is nowhere to write, so all non-sync
1753 * drives must be failed, so try the next chunk...
1754 */
1755 {
NeilBrown57afd892005-06-21 17:17:13 -07001756 sector_t sec = max_sector - sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 sectors_skipped += sec;
1758 chunks_skipped ++;
1759 sector_nr = max_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 goto skipped;
1761 }
1762}
1763
1764static int run(mddev_t *mddev)
1765{
1766 conf_t *conf;
1767 int i, disk_idx;
1768 mirror_info_t *disk;
1769 mdk_rdev_t *rdev;
1770 struct list_head *tmp;
1771 int nc, fc;
1772 sector_t stride, size;
1773
1774 if (mddev->level != 10) {
1775 printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n",
1776 mdname(mddev), mddev->level);
1777 goto out;
1778 }
1779 nc = mddev->layout & 255;
1780 fc = (mddev->layout >> 8) & 255;
1781 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
1782 (mddev->layout >> 16)) {
1783 printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
1784 mdname(mddev), mddev->layout);
1785 goto out;
1786 }
1787 /*
1788 * copy the already verified devices into our private RAID10
1789 * bookkeeping area. [whatever we allocate in run(),
1790 * should be freed in stop()]
1791 */
1792 conf = kmalloc(sizeof(conf_t), GFP_KERNEL);
1793 mddev->private = conf;
1794 if (!conf) {
1795 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1796 mdname(mddev));
1797 goto out;
1798 }
1799 memset(conf, 0, sizeof(*conf));
1800 conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1801 GFP_KERNEL);
1802 if (!conf->mirrors) {
1803 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1804 mdname(mddev));
1805 goto out_free_conf;
1806 }
1807 memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks);
1808
1809 conf->near_copies = nc;
1810 conf->far_copies = fc;
1811 conf->copies = nc*fc;
1812 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
1813 conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
1814 stride = mddev->size >> (conf->chunk_shift-1);
1815 sector_div(stride, fc);
1816 conf->stride = stride << conf->chunk_shift;
1817
1818 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
1819 r10bio_pool_free, conf);
1820 if (!conf->r10bio_pool) {
1821 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1822 mdname(mddev));
1823 goto out_free_conf;
1824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
1826 ITERATE_RDEV(mddev, rdev, tmp) {
1827 disk_idx = rdev->raid_disk;
1828 if (disk_idx >= mddev->raid_disks
1829 || disk_idx < 0)
1830 continue;
1831 disk = conf->mirrors + disk_idx;
1832
1833 disk->rdev = rdev;
1834
1835 blk_queue_stack_limits(mddev->queue,
1836 rdev->bdev->bd_disk->queue);
1837 /* as we don't honour merge_bvec_fn, we must never risk
1838 * violating it, so limit ->max_sector to one PAGE, as
1839 * a one page request is never in violation.
1840 */
1841 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1842 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1843 mddev->queue->max_sectors = (PAGE_SIZE>>9);
1844
1845 disk->head_position = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08001846 if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 conf->working_disks++;
1848 }
1849 conf->raid_disks = mddev->raid_disks;
1850 conf->mddev = mddev;
1851 spin_lock_init(&conf->device_lock);
1852 INIT_LIST_HEAD(&conf->retry_list);
1853
1854 spin_lock_init(&conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -08001855 init_waitqueue_head(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
NeilBrown6d508242005-09-09 16:24:03 -07001857 /* need to check that every block has at least one working mirror */
1858 if (!enough(conf)) {
1859 printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
1860 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 goto out_free_conf;
1862 }
1863
1864 mddev->degraded = 0;
1865 for (i = 0; i < conf->raid_disks; i++) {
1866
1867 disk = conf->mirrors + i;
1868
1869 if (!disk->rdev) {
1870 disk->head_position = 0;
1871 mddev->degraded++;
1872 }
1873 }
1874
1875
1876 mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10");
1877 if (!mddev->thread) {
1878 printk(KERN_ERR
1879 "raid10: couldn't allocate thread for %s\n",
1880 mdname(mddev));
1881 goto out_free_conf;
1882 }
1883
1884 printk(KERN_INFO
1885 "raid10: raid set %s active with %d out of %d devices\n",
1886 mdname(mddev), mddev->raid_disks - mddev->degraded,
1887 mddev->raid_disks);
1888 /*
1889 * Ok, everything is just fine now
1890 */
1891 size = conf->stride * conf->raid_disks;
1892 sector_div(size, conf->near_copies);
1893 mddev->array_size = size/2;
1894 mddev->resync_max_sectors = size;
1895
NeilBrown7a5febe2005-05-16 21:53:16 -07001896 mddev->queue->unplug_fn = raid10_unplug;
1897 mddev->queue->issue_flush_fn = raid10_issue_flush;
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 /* Calculate max read-ahead size.
1900 * We need to readahead at least twice a whole stripe....
1901 * maybe...
1902 */
1903 {
1904 int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE;
1905 stripe /= conf->near_copies;
1906 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
1907 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
1908 }
1909
1910 if (conf->near_copies < mddev->raid_disks)
1911 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
1912 return 0;
1913
1914out_free_conf:
1915 if (conf->r10bio_pool)
1916 mempool_destroy(conf->r10bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001917 kfree(conf->mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 kfree(conf);
1919 mddev->private = NULL;
1920out:
1921 return -EIO;
1922}
1923
1924static int stop(mddev_t *mddev)
1925{
1926 conf_t *conf = mddev_to_conf(mddev);
1927
1928 md_unregister_thread(mddev->thread);
1929 mddev->thread = NULL;
1930 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1931 if (conf->r10bio_pool)
1932 mempool_destroy(conf->r10bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001933 kfree(conf->mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 kfree(conf);
1935 mddev->private = NULL;
1936 return 0;
1937}
1938
NeilBrown6cce3b22006-01-06 00:20:16 -08001939static void raid10_quiesce(mddev_t *mddev, int state)
1940{
1941 conf_t *conf = mddev_to_conf(mddev);
1942
1943 switch(state) {
1944 case 1:
1945 raise_barrier(conf, 0);
1946 break;
1947 case 0:
1948 lower_barrier(conf);
1949 break;
1950 }
1951 if (mddev->thread) {
1952 if (mddev->bitmap)
1953 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1954 else
1955 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1956 md_wakeup_thread(mddev->thread);
1957 }
1958}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960static mdk_personality_t raid10_personality =
1961{
1962 .name = "raid10",
1963 .owner = THIS_MODULE,
1964 .make_request = make_request,
1965 .run = run,
1966 .stop = stop,
1967 .status = status,
1968 .error_handler = error,
1969 .hot_add_disk = raid10_add_disk,
1970 .hot_remove_disk= raid10_remove_disk,
1971 .spare_active = raid10_spare_active,
1972 .sync_request = sync_request,
NeilBrown6cce3b22006-01-06 00:20:16 -08001973 .quiesce = raid10_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974};
1975
1976static int __init raid_init(void)
1977{
1978 return register_md_personality(RAID10, &raid10_personality);
1979}
1980
1981static void raid_exit(void)
1982{
1983 unregister_md_personality(RAID10);
1984}
1985
1986module_init(raid_init);
1987module_exit(raid_exit);
1988MODULE_LICENSE("GPL");
1989MODULE_ALIAS("md-personality-9"); /* RAID10 */