blob: 121fbaa9ed5954086856c06573c0baa75940057f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 *
6 * RAID-5 management functions.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/raid/raid5.h>
23#include <linux/highmem.h>
24#include <linux/bitops.h>
25#include <asm/atomic.h>
26
NeilBrown72626682005-09-09 16:23:54 -070027#include <linux/raid/bitmap.h>
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/*
30 * Stripe cache
31 */
32
33#define NR_STRIPES 256
34#define STRIPE_SIZE PAGE_SIZE
35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1
38#define HASH_PAGES 1
39#define HASH_PAGES_ORDER 0
40#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
41#define HASH_MASK (NR_HASH - 1)
42
43#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
44
45/* bio's attached to a stripe+device for I/O are linked together in bi_sector
46 * order without overlap. There may be several bio's per stripe+device, and
47 * a bio could span several devices.
48 * When walking this list for a particular stripe+device, we must never proceed
49 * beyond a bio that extends past this device, as the next bio might no longer
50 * be valid.
51 * This macro is used to determine the 'next' bio in the list, given the sector
52 * of the current stripe+device
53 */
54#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
55/*
56 * The following can be used to debug the driver
57 */
58#define RAID5_DEBUG 0
59#define RAID5_PARANOIA 1
60#if RAID5_PARANOIA && defined(CONFIG_SMP)
61# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
62#else
63# define CHECK_DEVLOCK()
64#endif
65
66#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
67#if RAID5_DEBUG
68#define inline
69#define __inline__
70#endif
71
72static void print_raid5_conf (raid5_conf_t *conf);
73
74static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
75{
76 if (atomic_dec_and_test(&sh->count)) {
77 if (!list_empty(&sh->lru))
78 BUG();
79 if (atomic_read(&conf->active_stripes)==0)
80 BUG();
81 if (test_bit(STRIPE_HANDLE, &sh->state)) {
82 if (test_bit(STRIPE_DELAYED, &sh->state))
83 list_add_tail(&sh->lru, &conf->delayed_list);
NeilBrown72626682005-09-09 16:23:54 -070084 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
85 conf->seq_write == sh->bm_seq)
86 list_add_tail(&sh->lru, &conf->bitmap_list);
87 else {
88 clear_bit(STRIPE_BIT_DELAY, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 list_add_tail(&sh->lru, &conf->handle_list);
NeilBrown72626682005-09-09 16:23:54 -070090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 md_wakeup_thread(conf->mddev->thread);
92 } else {
93 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
94 atomic_dec(&conf->preread_active_stripes);
95 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
96 md_wakeup_thread(conf->mddev->thread);
97 }
98 list_add_tail(&sh->lru, &conf->inactive_list);
99 atomic_dec(&conf->active_stripes);
100 if (!conf->inactive_blocked ||
101 atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4))
102 wake_up(&conf->wait_for_stripe);
103 }
104 }
105}
106static void release_stripe(struct stripe_head *sh)
107{
108 raid5_conf_t *conf = sh->raid_conf;
109 unsigned long flags;
110
111 spin_lock_irqsave(&conf->device_lock, flags);
112 __release_stripe(conf, sh);
113 spin_unlock_irqrestore(&conf->device_lock, flags);
114}
115
116static void remove_hash(struct stripe_head *sh)
117{
118 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
119
120 if (sh->hash_pprev) {
121 if (sh->hash_next)
122 sh->hash_next->hash_pprev = sh->hash_pprev;
123 *sh->hash_pprev = sh->hash_next;
124 sh->hash_pprev = NULL;
125 }
126}
127
128static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
129{
130 struct stripe_head **shp = &stripe_hash(conf, sh->sector);
131
132 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
133
134 CHECK_DEVLOCK();
135 if ((sh->hash_next = *shp) != NULL)
136 (*shp)->hash_pprev = &sh->hash_next;
137 *shp = sh;
138 sh->hash_pprev = shp;
139}
140
141
142/* find an idle stripe, make sure it is unhashed, and return it. */
143static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
144{
145 struct stripe_head *sh = NULL;
146 struct list_head *first;
147
148 CHECK_DEVLOCK();
149 if (list_empty(&conf->inactive_list))
150 goto out;
151 first = conf->inactive_list.next;
152 sh = list_entry(first, struct stripe_head, lru);
153 list_del_init(first);
154 remove_hash(sh);
155 atomic_inc(&conf->active_stripes);
156out:
157 return sh;
158}
159
160static void shrink_buffers(struct stripe_head *sh, int num)
161{
162 struct page *p;
163 int i;
164
165 for (i=0; i<num ; i++) {
166 p = sh->dev[i].page;
167 if (!p)
168 continue;
169 sh->dev[i].page = NULL;
170 page_cache_release(p);
171 }
172}
173
174static int grow_buffers(struct stripe_head *sh, int num)
175{
176 int i;
177
178 for (i=0; i<num; i++) {
179 struct page *page;
180
181 if (!(page = alloc_page(GFP_KERNEL))) {
182 return 1;
183 }
184 sh->dev[i].page = page;
185 }
186 return 0;
187}
188
189static void raid5_build_block (struct stripe_head *sh, int i);
190
191static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
192{
193 raid5_conf_t *conf = sh->raid_conf;
194 int disks = conf->raid_disks, i;
195
196 if (atomic_read(&sh->count) != 0)
197 BUG();
198 if (test_bit(STRIPE_HANDLE, &sh->state))
199 BUG();
200
201 CHECK_DEVLOCK();
202 PRINTK("init_stripe called, stripe %llu\n",
203 (unsigned long long)sh->sector);
204
205 remove_hash(sh);
206
207 sh->sector = sector;
208 sh->pd_idx = pd_idx;
209 sh->state = 0;
210
211 for (i=disks; i--; ) {
212 struct r5dev *dev = &sh->dev[i];
213
214 if (dev->toread || dev->towrite || dev->written ||
215 test_bit(R5_LOCKED, &dev->flags)) {
216 printk("sector=%llx i=%d %p %p %p %d\n",
217 (unsigned long long)sh->sector, i, dev->toread,
218 dev->towrite, dev->written,
219 test_bit(R5_LOCKED, &dev->flags));
220 BUG();
221 }
222 dev->flags = 0;
223 raid5_build_block(sh, i);
224 }
225 insert_hash(conf, sh);
226}
227
228static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
229{
230 struct stripe_head *sh;
231
232 CHECK_DEVLOCK();
233 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
234 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
235 if (sh->sector == sector)
236 return sh;
237 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
238 return NULL;
239}
240
241static void unplug_slaves(mddev_t *mddev);
242static void raid5_unplug_device(request_queue_t *q);
243
244static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
245 int pd_idx, int noblock)
246{
247 struct stripe_head *sh;
248
249 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
250
251 spin_lock_irq(&conf->device_lock);
252
253 do {
NeilBrown72626682005-09-09 16:23:54 -0700254 wait_event_lock_irq(conf->wait_for_stripe,
255 conf->quiesce == 0,
256 conf->device_lock, /* nothing */);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 sh = __find_stripe(conf, sector);
258 if (!sh) {
259 if (!conf->inactive_blocked)
260 sh = get_free_stripe(conf);
261 if (noblock && sh == NULL)
262 break;
263 if (!sh) {
264 conf->inactive_blocked = 1;
265 wait_event_lock_irq(conf->wait_for_stripe,
266 !list_empty(&conf->inactive_list) &&
267 (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4)
268 || !conf->inactive_blocked),
269 conf->device_lock,
270 unplug_slaves(conf->mddev);
271 );
272 conf->inactive_blocked = 0;
273 } else
274 init_stripe(sh, sector, pd_idx);
275 } else {
276 if (atomic_read(&sh->count)) {
277 if (!list_empty(&sh->lru))
278 BUG();
279 } else {
280 if (!test_bit(STRIPE_HANDLE, &sh->state))
281 atomic_inc(&conf->active_stripes);
282 if (list_empty(&sh->lru))
283 BUG();
284 list_del_init(&sh->lru);
285 }
286 }
287 } while (sh == NULL);
288
289 if (sh)
290 atomic_inc(&sh->count);
291
292 spin_unlock_irq(&conf->device_lock);
293 return sh;
294}
295
NeilBrown3f294f42005-11-08 21:39:25 -0800296static int grow_one_stripe(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
298 struct stripe_head *sh;
NeilBrown3f294f42005-11-08 21:39:25 -0800299 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
300 if (!sh)
301 return 0;
302 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
303 sh->raid_conf = conf;
304 spin_lock_init(&sh->lock);
305
306 if (grow_buffers(sh, conf->raid_disks)) {
307 shrink_buffers(sh, conf->raid_disks);
308 kmem_cache_free(conf->slab_cache, sh);
309 return 0;
310 }
311 /* we just created an active stripe so... */
312 atomic_set(&sh->count, 1);
313 atomic_inc(&conf->active_stripes);
314 INIT_LIST_HEAD(&sh->lru);
315 release_stripe(sh);
316 return 1;
317}
318
319static int grow_stripes(raid5_conf_t *conf, int num)
320{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 kmem_cache_t *sc;
322 int devs = conf->raid_disks;
323
324 sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev));
325
326 sc = kmem_cache_create(conf->cache_name,
327 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
328 0, 0, NULL, NULL);
329 if (!sc)
330 return 1;
331 conf->slab_cache = sc;
332 while (num--) {
NeilBrown3f294f42005-11-08 21:39:25 -0800333 if (!grow_one_stripe(conf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336 return 0;
337}
338
NeilBrown3f294f42005-11-08 21:39:25 -0800339static int drop_one_stripe(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
341 struct stripe_head *sh;
342
NeilBrown3f294f42005-11-08 21:39:25 -0800343 spin_lock_irq(&conf->device_lock);
344 sh = get_free_stripe(conf);
345 spin_unlock_irq(&conf->device_lock);
346 if (!sh)
347 return 0;
348 if (atomic_read(&sh->count))
349 BUG();
350 shrink_buffers(sh, conf->raid_disks);
351 kmem_cache_free(conf->slab_cache, sh);
352 atomic_dec(&conf->active_stripes);
353 return 1;
354}
355
356static void shrink_stripes(raid5_conf_t *conf)
357{
358 while (drop_one_stripe(conf))
359 ;
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 kmem_cache_destroy(conf->slab_cache);
362 conf->slab_cache = NULL;
363}
364
NeilBrown4e5314b2005-11-08 21:39:22 -0800365static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 int error)
367{
368 struct stripe_head *sh = bi->bi_private;
369 raid5_conf_t *conf = sh->raid_conf;
370 int disks = conf->raid_disks, i;
371 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
372
373 if (bi->bi_size)
374 return 1;
375
376 for (i=0 ; i<disks; i++)
377 if (bi == &sh->dev[i].req)
378 break;
379
380 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
381 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
382 uptodate);
383 if (i == disks) {
384 BUG();
385 return 0;
386 }
387
388 if (uptodate) {
389#if 0
390 struct bio *bio;
391 unsigned long flags;
392 spin_lock_irqsave(&conf->device_lock, flags);
393 /* we can return a buffer if we bypassed the cache or
394 * if the top buffer is not in highmem. If there are
395 * multiple buffers, leave the extra work to
396 * handle_stripe
397 */
398 buffer = sh->bh_read[i];
399 if (buffer &&
400 (!PageHighMem(buffer->b_page)
401 || buffer->b_page == bh->b_page )
402 ) {
403 sh->bh_read[i] = buffer->b_reqnext;
404 buffer->b_reqnext = NULL;
405 } else
406 buffer = NULL;
407 spin_unlock_irqrestore(&conf->device_lock, flags);
408 if (sh->bh_page[i]==bh->b_page)
409 set_buffer_uptodate(bh);
410 if (buffer) {
411 if (buffer->b_page != bh->b_page)
412 memcpy(buffer->b_data, bh->b_data, bh->b_size);
413 buffer->b_end_io(buffer, 1);
414 }
415#else
416 set_bit(R5_UPTODATE, &sh->dev[i].flags);
NeilBrown4e5314b2005-11-08 21:39:22 -0800417#endif
418 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
419 printk("R5: read error corrected!!\n");
420 clear_bit(R5_ReadError, &sh->dev[i].flags);
421 clear_bit(R5_ReWrite, &sh->dev[i].flags);
422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
NeilBrown4e5314b2005-11-08 21:39:22 -0800425 if (conf->mddev->degraded) {
426 printk("R5: read error not correctable.\n");
427 clear_bit(R5_ReadError, &sh->dev[i].flags);
428 clear_bit(R5_ReWrite, &sh->dev[i].flags);
429 md_error(conf->mddev, conf->disks[i].rdev);
430 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
431 /* Oh, no!!! */
432 printk("R5: read error NOT corrected!!\n");
433 clear_bit(R5_ReadError, &sh->dev[i].flags);
434 clear_bit(R5_ReWrite, &sh->dev[i].flags);
435 md_error(conf->mddev, conf->disks[i].rdev);
436 } else
437 set_bit(R5_ReadError, &sh->dev[i].flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 }
439 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
440#if 0
441 /* must restore b_page before unlocking buffer... */
442 if (sh->bh_page[i] != bh->b_page) {
443 bh->b_page = sh->bh_page[i];
444 bh->b_data = page_address(bh->b_page);
445 clear_buffer_uptodate(bh);
446 }
447#endif
448 clear_bit(R5_LOCKED, &sh->dev[i].flags);
449 set_bit(STRIPE_HANDLE, &sh->state);
450 release_stripe(sh);
451 return 0;
452}
453
454static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
455 int error)
456{
457 struct stripe_head *sh = bi->bi_private;
458 raid5_conf_t *conf = sh->raid_conf;
459 int disks = conf->raid_disks, i;
460 unsigned long flags;
461 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
462
463 if (bi->bi_size)
464 return 1;
465
466 for (i=0 ; i<disks; i++)
467 if (bi == &sh->dev[i].req)
468 break;
469
470 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
471 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
472 uptodate);
473 if (i == disks) {
474 BUG();
475 return 0;
476 }
477
478 spin_lock_irqsave(&conf->device_lock, flags);
479 if (!uptodate)
480 md_error(conf->mddev, conf->disks[i].rdev);
481
482 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
483
484 clear_bit(R5_LOCKED, &sh->dev[i].flags);
485 set_bit(STRIPE_HANDLE, &sh->state);
486 __release_stripe(conf, sh);
487 spin_unlock_irqrestore(&conf->device_lock, flags);
488 return 0;
489}
490
491
492static sector_t compute_blocknr(struct stripe_head *sh, int i);
493
494static void raid5_build_block (struct stripe_head *sh, int i)
495{
496 struct r5dev *dev = &sh->dev[i];
497
498 bio_init(&dev->req);
499 dev->req.bi_io_vec = &dev->vec;
500 dev->req.bi_vcnt++;
501 dev->req.bi_max_vecs++;
502 dev->vec.bv_page = dev->page;
503 dev->vec.bv_len = STRIPE_SIZE;
504 dev->vec.bv_offset = 0;
505
506 dev->req.bi_sector = sh->sector;
507 dev->req.bi_private = sh;
508
509 dev->flags = 0;
510 if (i != sh->pd_idx)
511 dev->sector = compute_blocknr(sh, i);
512}
513
514static void error(mddev_t *mddev, mdk_rdev_t *rdev)
515{
516 char b[BDEVNAME_SIZE];
517 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
518 PRINTK("raid5: error called\n");
519
520 if (!rdev->faulty) {
521 mddev->sb_dirty = 1;
522 if (rdev->in_sync) {
523 conf->working_disks--;
524 mddev->degraded++;
525 conf->failed_disks++;
526 rdev->in_sync = 0;
527 /*
528 * if recovery was running, make sure it aborts.
529 */
530 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
531 }
532 rdev->faulty = 1;
533 printk (KERN_ALERT
534 "raid5: Disk failure on %s, disabling device."
535 " Operation continuing on %d devices\n",
536 bdevname(rdev->bdev,b), conf->working_disks);
537 }
538}
539
540/*
541 * Input: a 'big' sector number,
542 * Output: index of the data and parity disk, and the sector # in them.
543 */
544static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
545 unsigned int data_disks, unsigned int * dd_idx,
546 unsigned int * pd_idx, raid5_conf_t *conf)
547{
548 long stripe;
549 unsigned long chunk_number;
550 unsigned int chunk_offset;
551 sector_t new_sector;
552 int sectors_per_chunk = conf->chunk_size >> 9;
553
554 /* First compute the information on this sector */
555
556 /*
557 * Compute the chunk number and the sector offset inside the chunk
558 */
559 chunk_offset = sector_div(r_sector, sectors_per_chunk);
560 chunk_number = r_sector;
561 BUG_ON(r_sector != chunk_number);
562
563 /*
564 * Compute the stripe number
565 */
566 stripe = chunk_number / data_disks;
567
568 /*
569 * Compute the data disk and parity disk indexes inside the stripe
570 */
571 *dd_idx = chunk_number % data_disks;
572
573 /*
574 * Select the parity disk based on the user selected algorithm.
575 */
576 if (conf->level == 4)
577 *pd_idx = data_disks;
578 else switch (conf->algorithm) {
579 case ALGORITHM_LEFT_ASYMMETRIC:
580 *pd_idx = data_disks - stripe % raid_disks;
581 if (*dd_idx >= *pd_idx)
582 (*dd_idx)++;
583 break;
584 case ALGORITHM_RIGHT_ASYMMETRIC:
585 *pd_idx = stripe % raid_disks;
586 if (*dd_idx >= *pd_idx)
587 (*dd_idx)++;
588 break;
589 case ALGORITHM_LEFT_SYMMETRIC:
590 *pd_idx = data_disks - stripe % raid_disks;
591 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
592 break;
593 case ALGORITHM_RIGHT_SYMMETRIC:
594 *pd_idx = stripe % raid_disks;
595 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
596 break;
597 default:
598 printk("raid5: unsupported algorithm %d\n",
599 conf->algorithm);
600 }
601
602 /*
603 * Finally, compute the new sector number
604 */
605 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
606 return new_sector;
607}
608
609
610static sector_t compute_blocknr(struct stripe_head *sh, int i)
611{
612 raid5_conf_t *conf = sh->raid_conf;
613 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
614 sector_t new_sector = sh->sector, check;
615 int sectors_per_chunk = conf->chunk_size >> 9;
616 sector_t stripe;
617 int chunk_offset;
618 int chunk_number, dummy1, dummy2, dd_idx = i;
619 sector_t r_sector;
620
621 chunk_offset = sector_div(new_sector, sectors_per_chunk);
622 stripe = new_sector;
623 BUG_ON(new_sector != stripe);
624
625
626 switch (conf->algorithm) {
627 case ALGORITHM_LEFT_ASYMMETRIC:
628 case ALGORITHM_RIGHT_ASYMMETRIC:
629 if (i > sh->pd_idx)
630 i--;
631 break;
632 case ALGORITHM_LEFT_SYMMETRIC:
633 case ALGORITHM_RIGHT_SYMMETRIC:
634 if (i < sh->pd_idx)
635 i += raid_disks;
636 i -= (sh->pd_idx + 1);
637 break;
638 default:
639 printk("raid5: unsupported algorithm %d\n",
640 conf->algorithm);
641 }
642
643 chunk_number = stripe * data_disks + i;
644 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
645
646 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
647 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
648 printk("compute_blocknr: map not correct\n");
649 return 0;
650 }
651 return r_sector;
652}
653
654
655
656/*
657 * Copy data between a page in the stripe cache, and a bio.
658 * There are no alignment or size guarantees between the page or the
659 * bio except that there is some overlap.
660 * All iovecs in the bio must be considered.
661 */
662static void copy_data(int frombio, struct bio *bio,
663 struct page *page,
664 sector_t sector)
665{
666 char *pa = page_address(page);
667 struct bio_vec *bvl;
668 int i;
669 int page_offset;
670
671 if (bio->bi_sector >= sector)
672 page_offset = (signed)(bio->bi_sector - sector) * 512;
673 else
674 page_offset = (signed)(sector - bio->bi_sector) * -512;
675 bio_for_each_segment(bvl, bio, i) {
676 int len = bio_iovec_idx(bio,i)->bv_len;
677 int clen;
678 int b_offset = 0;
679
680 if (page_offset < 0) {
681 b_offset = -page_offset;
682 page_offset += b_offset;
683 len -= b_offset;
684 }
685
686 if (len > 0 && page_offset + len > STRIPE_SIZE)
687 clen = STRIPE_SIZE - page_offset;
688 else clen = len;
689
690 if (clen > 0) {
691 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
692 if (frombio)
693 memcpy(pa+page_offset, ba+b_offset, clen);
694 else
695 memcpy(ba+b_offset, pa+page_offset, clen);
696 __bio_kunmap_atomic(ba, KM_USER0);
697 }
698 if (clen < len) /* hit end of page */
699 break;
700 page_offset += len;
701 }
702}
703
704#define check_xor() do { \
705 if (count == MAX_XOR_BLOCKS) { \
706 xor_block(count, STRIPE_SIZE, ptr); \
707 count = 1; \
708 } \
709 } while(0)
710
711
712static void compute_block(struct stripe_head *sh, int dd_idx)
713{
714 raid5_conf_t *conf = sh->raid_conf;
715 int i, count, disks = conf->raid_disks;
716 void *ptr[MAX_XOR_BLOCKS], *p;
717
718 PRINTK("compute_block, stripe %llu, idx %d\n",
719 (unsigned long long)sh->sector, dd_idx);
720
721 ptr[0] = page_address(sh->dev[dd_idx].page);
722 memset(ptr[0], 0, STRIPE_SIZE);
723 count = 1;
724 for (i = disks ; i--; ) {
725 if (i == dd_idx)
726 continue;
727 p = page_address(sh->dev[i].page);
728 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
729 ptr[count++] = p;
730 else
731 printk("compute_block() %d, stripe %llu, %d"
732 " not present\n", dd_idx,
733 (unsigned long long)sh->sector, i);
734
735 check_xor();
736 }
737 if (count != 1)
738 xor_block(count, STRIPE_SIZE, ptr);
739 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
740}
741
742static void compute_parity(struct stripe_head *sh, int method)
743{
744 raid5_conf_t *conf = sh->raid_conf;
745 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
746 void *ptr[MAX_XOR_BLOCKS];
747 struct bio *chosen;
748
749 PRINTK("compute_parity, stripe %llu, method %d\n",
750 (unsigned long long)sh->sector, method);
751
752 count = 1;
753 ptr[0] = page_address(sh->dev[pd_idx].page);
754 switch(method) {
755 case READ_MODIFY_WRITE:
756 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
757 BUG();
758 for (i=disks ; i-- ;) {
759 if (i==pd_idx)
760 continue;
761 if (sh->dev[i].towrite &&
762 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
763 ptr[count++] = page_address(sh->dev[i].page);
764 chosen = sh->dev[i].towrite;
765 sh->dev[i].towrite = NULL;
766
767 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
768 wake_up(&conf->wait_for_overlap);
769
770 if (sh->dev[i].written) BUG();
771 sh->dev[i].written = chosen;
772 check_xor();
773 }
774 }
775 break;
776 case RECONSTRUCT_WRITE:
777 memset(ptr[0], 0, STRIPE_SIZE);
778 for (i= disks; i-- ;)
779 if (i!=pd_idx && sh->dev[i].towrite) {
780 chosen = sh->dev[i].towrite;
781 sh->dev[i].towrite = NULL;
782
783 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
784 wake_up(&conf->wait_for_overlap);
785
786 if (sh->dev[i].written) BUG();
787 sh->dev[i].written = chosen;
788 }
789 break;
790 case CHECK_PARITY:
791 break;
792 }
793 if (count>1) {
794 xor_block(count, STRIPE_SIZE, ptr);
795 count = 1;
796 }
797
798 for (i = disks; i--;)
799 if (sh->dev[i].written) {
800 sector_t sector = sh->dev[i].sector;
801 struct bio *wbi = sh->dev[i].written;
802 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
803 copy_data(1, wbi, sh->dev[i].page, sector);
804 wbi = r5_next_bio(wbi, sector);
805 }
806
807 set_bit(R5_LOCKED, &sh->dev[i].flags);
808 set_bit(R5_UPTODATE, &sh->dev[i].flags);
809 }
810
811 switch(method) {
812 case RECONSTRUCT_WRITE:
813 case CHECK_PARITY:
814 for (i=disks; i--;)
815 if (i != pd_idx) {
816 ptr[count++] = page_address(sh->dev[i].page);
817 check_xor();
818 }
819 break;
820 case READ_MODIFY_WRITE:
821 for (i = disks; i--;)
822 if (sh->dev[i].written) {
823 ptr[count++] = page_address(sh->dev[i].page);
824 check_xor();
825 }
826 }
827 if (count != 1)
828 xor_block(count, STRIPE_SIZE, ptr);
829
830 if (method != CHECK_PARITY) {
831 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
832 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
833 } else
834 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
835}
836
837/*
838 * Each stripe/dev can have one or more bion attached.
839 * toread/towrite point to the first in a chain.
840 * The bi_next chain must be in order.
841 */
842static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
843{
844 struct bio **bip;
845 raid5_conf_t *conf = sh->raid_conf;
NeilBrown72626682005-09-09 16:23:54 -0700846 int firstwrite=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848 PRINTK("adding bh b#%llu to stripe s#%llu\n",
849 (unsigned long long)bi->bi_sector,
850 (unsigned long long)sh->sector);
851
852
853 spin_lock(&sh->lock);
854 spin_lock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -0700855 if (forwrite) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 bip = &sh->dev[dd_idx].towrite;
NeilBrown72626682005-09-09 16:23:54 -0700857 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
858 firstwrite = 1;
859 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 bip = &sh->dev[dd_idx].toread;
861 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
862 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
863 goto overlap;
864 bip = & (*bip)->bi_next;
865 }
866 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
867 goto overlap;
868
869 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
870 BUG();
871 if (*bip)
872 bi->bi_next = *bip;
873 *bip = bi;
874 bi->bi_phys_segments ++;
875 spin_unlock_irq(&conf->device_lock);
876 spin_unlock(&sh->lock);
877
878 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
879 (unsigned long long)bi->bi_sector,
880 (unsigned long long)sh->sector, dd_idx);
881
NeilBrown72626682005-09-09 16:23:54 -0700882 if (conf->mddev->bitmap && firstwrite) {
883 sh->bm_seq = conf->seq_write;
884 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
885 STRIPE_SECTORS, 0);
886 set_bit(STRIPE_BIT_DELAY, &sh->state);
887 }
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (forwrite) {
890 /* check if page is covered */
891 sector_t sector = sh->dev[dd_idx].sector;
892 for (bi=sh->dev[dd_idx].towrite;
893 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
894 bi && bi->bi_sector <= sector;
895 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
896 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
897 sector = bi->bi_sector + (bi->bi_size>>9);
898 }
899 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
900 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
901 }
902 return 1;
903
904 overlap:
905 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
906 spin_unlock_irq(&conf->device_lock);
907 spin_unlock(&sh->lock);
908 return 0;
909}
910
911
912/*
913 * handle_stripe - do things to a stripe.
914 *
915 * We lock the stripe and then examine the state of various bits
916 * to see what needs to be done.
917 * Possible results:
918 * return some read request which now have data
919 * return some write requests which are safely on disc
920 * schedule a read on some buffers
921 * schedule a write of some buffers
922 * return confirmation of parity correctness
923 *
924 * Parity calculations are done inside the stripe lock
925 * buffers are taken off read_list or write_list, and bh_cache buffers
926 * get BH_Lock set before the stripe lock is released.
927 *
928 */
929
930static void handle_stripe(struct stripe_head *sh)
931{
932 raid5_conf_t *conf = sh->raid_conf;
933 int disks = conf->raid_disks;
934 struct bio *return_bi= NULL;
935 struct bio *bi;
936 int i;
937 int syncing;
938 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
939 int non_overwrite = 0;
940 int failed_num=0;
941 struct r5dev *dev;
942
943 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
944 (unsigned long long)sh->sector, atomic_read(&sh->count),
945 sh->pd_idx);
946
947 spin_lock(&sh->lock);
948 clear_bit(STRIPE_HANDLE, &sh->state);
949 clear_bit(STRIPE_DELAYED, &sh->state);
950
951 syncing = test_bit(STRIPE_SYNCING, &sh->state);
952 /* Now to look around and see what can be done */
953
954 for (i=disks; i--; ) {
955 mdk_rdev_t *rdev;
956 dev = &sh->dev[i];
957 clear_bit(R5_Insync, &dev->flags);
958 clear_bit(R5_Syncio, &dev->flags);
959
960 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
961 i, dev->flags, dev->toread, dev->towrite, dev->written);
962 /* maybe we can reply to a read */
963 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
964 struct bio *rbi, *rbi2;
965 PRINTK("Return read for disc %d\n", i);
966 spin_lock_irq(&conf->device_lock);
967 rbi = dev->toread;
968 dev->toread = NULL;
969 if (test_and_clear_bit(R5_Overlap, &dev->flags))
970 wake_up(&conf->wait_for_overlap);
971 spin_unlock_irq(&conf->device_lock);
972 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
973 copy_data(0, rbi, dev->page, dev->sector);
974 rbi2 = r5_next_bio(rbi, dev->sector);
975 spin_lock_irq(&conf->device_lock);
976 if (--rbi->bi_phys_segments == 0) {
977 rbi->bi_next = return_bi;
978 return_bi = rbi;
979 }
980 spin_unlock_irq(&conf->device_lock);
981 rbi = rbi2;
982 }
983 }
984
985 /* now count some things */
986 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
987 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
988
989
990 if (dev->toread) to_read++;
991 if (dev->towrite) {
992 to_write++;
993 if (!test_bit(R5_OVERWRITE, &dev->flags))
994 non_overwrite++;
995 }
996 if (dev->written) written++;
997 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
998 if (!rdev || !rdev->in_sync) {
NeilBrown4e5314b2005-11-08 21:39:22 -0800999 /* The ReadError flag wil just be confusing now */
1000 clear_bit(R5_ReadError, &dev->flags);
1001 clear_bit(R5_ReWrite, &dev->flags);
1002 }
1003 if (!rdev || !rdev->in_sync
1004 || test_bit(R5_ReadError, &dev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 failed++;
1006 failed_num = i;
1007 } else
1008 set_bit(R5_Insync, &dev->flags);
1009 }
1010 PRINTK("locked=%d uptodate=%d to_read=%d"
1011 " to_write=%d failed=%d failed_num=%d\n",
1012 locked, uptodate, to_read, to_write, failed, failed_num);
1013 /* check if the array has lost two devices and, if so, some requests might
1014 * need to be failed
1015 */
1016 if (failed > 1 && to_read+to_write+written) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 for (i=disks; i--; ) {
NeilBrown72626682005-09-09 16:23:54 -07001018 int bitmap_end = 0;
NeilBrown4e5314b2005-11-08 21:39:22 -08001019
1020 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1021 mdk_rdev_t *rdev = conf->disks[i].rdev;
1022 if (rdev && rdev->in_sync)
1023 /* multiple read failures in one stripe */
1024 md_error(conf->mddev, rdev);
1025 }
1026
NeilBrown72626682005-09-09 16:23:54 -07001027 spin_lock_irq(&conf->device_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 /* fail all writes first */
1029 bi = sh->dev[i].towrite;
1030 sh->dev[i].towrite = NULL;
NeilBrown72626682005-09-09 16:23:54 -07001031 if (bi) { to_write--; bitmap_end = 1; }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
1033 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1034 wake_up(&conf->wait_for_overlap);
1035
1036 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1037 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1038 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1039 if (--bi->bi_phys_segments == 0) {
1040 md_write_end(conf->mddev);
1041 bi->bi_next = return_bi;
1042 return_bi = bi;
1043 }
1044 bi = nextbi;
1045 }
1046 /* and fail all 'written' */
1047 bi = sh->dev[i].written;
1048 sh->dev[i].written = NULL;
NeilBrown72626682005-09-09 16:23:54 -07001049 if (bi) bitmap_end = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1051 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1052 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1053 if (--bi->bi_phys_segments == 0) {
1054 md_write_end(conf->mddev);
1055 bi->bi_next = return_bi;
1056 return_bi = bi;
1057 }
1058 bi = bi2;
1059 }
1060
1061 /* fail any reads if this device is non-operational */
NeilBrown4e5314b2005-11-08 21:39:22 -08001062 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1063 test_bit(R5_ReadError, &sh->dev[i].flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 bi = sh->dev[i].toread;
1065 sh->dev[i].toread = NULL;
1066 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1067 wake_up(&conf->wait_for_overlap);
1068 if (bi) to_read--;
1069 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1070 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1071 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1072 if (--bi->bi_phys_segments == 0) {
1073 bi->bi_next = return_bi;
1074 return_bi = bi;
1075 }
1076 bi = nextbi;
1077 }
1078 }
NeilBrown72626682005-09-09 16:23:54 -07001079 spin_unlock_irq(&conf->device_lock);
1080 if (bitmap_end)
1081 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1082 STRIPE_SECTORS, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 }
1085 if (failed > 1 && syncing) {
1086 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1087 clear_bit(STRIPE_SYNCING, &sh->state);
1088 syncing = 0;
1089 }
1090
1091 /* might be able to return some write requests if the parity block
1092 * is safe, or on a failed drive
1093 */
1094 dev = &sh->dev[sh->pd_idx];
1095 if ( written &&
1096 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1097 test_bit(R5_UPTODATE, &dev->flags))
1098 || (failed == 1 && failed_num == sh->pd_idx))
1099 ) {
1100 /* any written block on an uptodate or failed drive can be returned.
1101 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1102 * never LOCKED, so we don't need to test 'failed' directly.
1103 */
1104 for (i=disks; i--; )
1105 if (sh->dev[i].written) {
1106 dev = &sh->dev[i];
1107 if (!test_bit(R5_LOCKED, &dev->flags) &&
1108 test_bit(R5_UPTODATE, &dev->flags) ) {
1109 /* We can return any write requests */
1110 struct bio *wbi, *wbi2;
NeilBrown72626682005-09-09 16:23:54 -07001111 int bitmap_end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 PRINTK("Return write for disc %d\n", i);
1113 spin_lock_irq(&conf->device_lock);
1114 wbi = dev->written;
1115 dev->written = NULL;
1116 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1117 wbi2 = r5_next_bio(wbi, dev->sector);
1118 if (--wbi->bi_phys_segments == 0) {
1119 md_write_end(conf->mddev);
1120 wbi->bi_next = return_bi;
1121 return_bi = wbi;
1122 }
1123 wbi = wbi2;
1124 }
NeilBrown72626682005-09-09 16:23:54 -07001125 if (dev->towrite == NULL)
1126 bitmap_end = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 spin_unlock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -07001128 if (bitmap_end)
1129 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1130 STRIPE_SECTORS,
1131 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133 }
1134 }
1135
1136 /* Now we might consider reading some blocks, either to check/generate
1137 * parity, or to satisfy requests
1138 * or to load a block that is being partially written.
1139 */
1140 if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1141 for (i=disks; i--;) {
1142 dev = &sh->dev[i];
1143 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1144 (dev->toread ||
1145 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1146 syncing ||
1147 (failed && (sh->dev[failed_num].toread ||
1148 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1149 )
1150 ) {
1151 /* we would like to get this block, possibly
1152 * by computing it, but we might not be able to
1153 */
1154 if (uptodate == disks-1) {
1155 PRINTK("Computing block %d\n", i);
1156 compute_block(sh, i);
1157 uptodate++;
1158 } else if (test_bit(R5_Insync, &dev->flags)) {
1159 set_bit(R5_LOCKED, &dev->flags);
1160 set_bit(R5_Wantread, &dev->flags);
1161#if 0
1162 /* if I am just reading this block and we don't have
1163 a failed drive, or any pending writes then sidestep the cache */
1164 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1165 ! syncing && !failed && !to_write) {
1166 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1167 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1168 }
1169#endif
1170 locked++;
1171 PRINTK("Reading block %d (sync=%d)\n",
1172 i, syncing);
1173 if (syncing)
1174 md_sync_acct(conf->disks[i].rdev->bdev,
1175 STRIPE_SECTORS);
1176 }
1177 }
1178 }
1179 set_bit(STRIPE_HANDLE, &sh->state);
1180 }
1181
1182 /* now to consider writing and what else, if anything should be read */
1183 if (to_write) {
1184 int rmw=0, rcw=0;
1185 for (i=disks ; i--;) {
1186 /* would I have to read this buffer for read_modify_write */
1187 dev = &sh->dev[i];
1188 if ((dev->towrite || i == sh->pd_idx) &&
1189 (!test_bit(R5_LOCKED, &dev->flags)
1190#if 0
1191|| sh->bh_page[i]!=bh->b_page
1192#endif
1193 ) &&
1194 !test_bit(R5_UPTODATE, &dev->flags)) {
1195 if (test_bit(R5_Insync, &dev->flags)
1196/* && !(!mddev->insync && i == sh->pd_idx) */
1197 )
1198 rmw++;
1199 else rmw += 2*disks; /* cannot read it */
1200 }
1201 /* Would I have to read this buffer for reconstruct_write */
1202 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1203 (!test_bit(R5_LOCKED, &dev->flags)
1204#if 0
1205|| sh->bh_page[i] != bh->b_page
1206#endif
1207 ) &&
1208 !test_bit(R5_UPTODATE, &dev->flags)) {
1209 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1210 else rcw += 2*disks;
1211 }
1212 }
1213 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1214 (unsigned long long)sh->sector, rmw, rcw);
1215 set_bit(STRIPE_HANDLE, &sh->state);
1216 if (rmw < rcw && rmw > 0)
1217 /* prefer read-modify-write, but need to get some data */
1218 for (i=disks; i--;) {
1219 dev = &sh->dev[i];
1220 if ((dev->towrite || i == sh->pd_idx) &&
1221 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1222 test_bit(R5_Insync, &dev->flags)) {
1223 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1224 {
1225 PRINTK("Read_old block %d for r-m-w\n", i);
1226 set_bit(R5_LOCKED, &dev->flags);
1227 set_bit(R5_Wantread, &dev->flags);
1228 locked++;
1229 } else {
1230 set_bit(STRIPE_DELAYED, &sh->state);
1231 set_bit(STRIPE_HANDLE, &sh->state);
1232 }
1233 }
1234 }
1235 if (rcw <= rmw && rcw > 0)
1236 /* want reconstruct write, but need to get some data */
1237 for (i=disks; i--;) {
1238 dev = &sh->dev[i];
1239 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1240 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1241 test_bit(R5_Insync, &dev->flags)) {
1242 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1243 {
1244 PRINTK("Read_old block %d for Reconstruct\n", i);
1245 set_bit(R5_LOCKED, &dev->flags);
1246 set_bit(R5_Wantread, &dev->flags);
1247 locked++;
1248 } else {
1249 set_bit(STRIPE_DELAYED, &sh->state);
1250 set_bit(STRIPE_HANDLE, &sh->state);
1251 }
1252 }
1253 }
1254 /* now if nothing is locked, and if we have enough data, we can start a write request */
NeilBrown72626682005-09-09 16:23:54 -07001255 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1256 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 PRINTK("Computing parity...\n");
1258 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1259 /* now every locked buffer is ready to be written */
1260 for (i=disks; i--;)
1261 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1262 PRINTK("Writing block %d\n", i);
1263 locked++;
1264 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1265 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1266 || (i==sh->pd_idx && failed == 0))
1267 set_bit(STRIPE_INSYNC, &sh->state);
1268 }
1269 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1270 atomic_dec(&conf->preread_active_stripes);
1271 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1272 md_wakeup_thread(conf->mddev->thread);
1273 }
1274 }
1275 }
1276
1277 /* maybe we need to check and possibly fix the parity for this stripe
1278 * Any reads will already have been scheduled, so we just see if enough data
1279 * is available
1280 */
1281 if (syncing && locked == 0 &&
1282 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) {
1283 set_bit(STRIPE_HANDLE, &sh->state);
1284 if (failed == 0) {
1285 char *pagea;
1286 if (uptodate != disks)
1287 BUG();
1288 compute_parity(sh, CHECK_PARITY);
1289 uptodate--;
1290 pagea = page_address(sh->dev[sh->pd_idx].page);
1291 if ((*(u32*)pagea) == 0 &&
1292 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1293 /* parity is correct (on disc, not in buffer any more) */
1294 set_bit(STRIPE_INSYNC, &sh->state);
1295 }
1296 }
1297 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1298 if (failed==0)
1299 failed_num = sh->pd_idx;
1300 /* should be able to compute the missing block and write it to spare */
1301 if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) {
1302 if (uptodate+1 != disks)
1303 BUG();
1304 compute_block(sh, failed_num);
1305 uptodate++;
1306 }
1307 if (uptodate != disks)
1308 BUG();
1309 dev = &sh->dev[failed_num];
1310 set_bit(R5_LOCKED, &dev->flags);
1311 set_bit(R5_Wantwrite, &dev->flags);
NeilBrown72626682005-09-09 16:23:54 -07001312 clear_bit(STRIPE_DEGRADED, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 locked++;
1314 set_bit(STRIPE_INSYNC, &sh->state);
1315 set_bit(R5_Syncio, &dev->flags);
1316 }
1317 }
1318 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1319 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1320 clear_bit(STRIPE_SYNCING, &sh->state);
1321 }
NeilBrown4e5314b2005-11-08 21:39:22 -08001322
1323 /* If the failed drive is just a ReadError, then we might need to progress
1324 * the repair/check process
1325 */
1326 if (failed == 1 && test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1327 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1328 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1329 ) {
1330 dev = &sh->dev[failed_num];
1331 if (!test_bit(R5_ReWrite, &dev->flags)) {
1332 set_bit(R5_Wantwrite, &dev->flags);
1333 set_bit(R5_ReWrite, &dev->flags);
1334 set_bit(R5_LOCKED, &dev->flags);
1335 } else {
1336 /* let's read it back */
1337 set_bit(R5_Wantread, &dev->flags);
1338 set_bit(R5_LOCKED, &dev->flags);
1339 }
1340 }
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 spin_unlock(&sh->lock);
1343
1344 while ((bi=return_bi)) {
1345 int bytes = bi->bi_size;
1346
1347 return_bi = bi->bi_next;
1348 bi->bi_next = NULL;
1349 bi->bi_size = 0;
1350 bi->bi_end_io(bi, bytes, 0);
1351 }
1352 for (i=disks; i-- ;) {
1353 int rw;
1354 struct bio *bi;
1355 mdk_rdev_t *rdev;
1356 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1357 rw = 1;
1358 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1359 rw = 0;
1360 else
1361 continue;
1362
1363 bi = &sh->dev[i].req;
1364
1365 bi->bi_rw = rw;
1366 if (rw)
1367 bi->bi_end_io = raid5_end_write_request;
1368 else
1369 bi->bi_end_io = raid5_end_read_request;
1370
1371 rcu_read_lock();
1372 rdev = conf->disks[i].rdev;
1373 if (rdev && rdev->faulty)
1374 rdev = NULL;
1375 if (rdev)
1376 atomic_inc(&rdev->nr_pending);
1377 rcu_read_unlock();
1378
1379 if (rdev) {
1380 if (test_bit(R5_Syncio, &sh->dev[i].flags))
1381 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1382
1383 bi->bi_bdev = rdev->bdev;
1384 PRINTK("for %llu schedule op %ld on disc %d\n",
1385 (unsigned long long)sh->sector, bi->bi_rw, i);
1386 atomic_inc(&sh->count);
1387 bi->bi_sector = sh->sector + rdev->data_offset;
1388 bi->bi_flags = 1 << BIO_UPTODATE;
1389 bi->bi_vcnt = 1;
1390 bi->bi_max_vecs = 1;
1391 bi->bi_idx = 0;
1392 bi->bi_io_vec = &sh->dev[i].vec;
1393 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1394 bi->bi_io_vec[0].bv_offset = 0;
1395 bi->bi_size = STRIPE_SIZE;
1396 bi->bi_next = NULL;
1397 generic_make_request(bi);
1398 } else {
NeilBrown72626682005-09-09 16:23:54 -07001399 if (rw == 1)
1400 set_bit(STRIPE_DEGRADED, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 PRINTK("skip op %ld on disc %d for sector %llu\n",
1402 bi->bi_rw, i, (unsigned long long)sh->sector);
1403 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1404 set_bit(STRIPE_HANDLE, &sh->state);
1405 }
1406 }
1407}
1408
1409static inline void raid5_activate_delayed(raid5_conf_t *conf)
1410{
1411 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1412 while (!list_empty(&conf->delayed_list)) {
1413 struct list_head *l = conf->delayed_list.next;
1414 struct stripe_head *sh;
1415 sh = list_entry(l, struct stripe_head, lru);
1416 list_del_init(l);
1417 clear_bit(STRIPE_DELAYED, &sh->state);
1418 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1419 atomic_inc(&conf->preread_active_stripes);
1420 list_add_tail(&sh->lru, &conf->handle_list);
1421 }
1422 }
1423}
1424
NeilBrown72626682005-09-09 16:23:54 -07001425static inline void activate_bit_delay(raid5_conf_t *conf)
1426{
1427 /* device_lock is held */
1428 struct list_head head;
1429 list_add(&head, &conf->bitmap_list);
1430 list_del_init(&conf->bitmap_list);
1431 while (!list_empty(&head)) {
1432 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1433 list_del_init(&sh->lru);
1434 atomic_inc(&sh->count);
1435 __release_stripe(conf, sh);
1436 }
1437}
1438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439static void unplug_slaves(mddev_t *mddev)
1440{
1441 raid5_conf_t *conf = mddev_to_conf(mddev);
1442 int i;
1443
1444 rcu_read_lock();
1445 for (i=0; i<mddev->raid_disks; i++) {
1446 mdk_rdev_t *rdev = conf->disks[i].rdev;
1447 if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
1448 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1449
1450 atomic_inc(&rdev->nr_pending);
1451 rcu_read_unlock();
1452
1453 if (r_queue->unplug_fn)
1454 r_queue->unplug_fn(r_queue);
1455
1456 rdev_dec_pending(rdev, mddev);
1457 rcu_read_lock();
1458 }
1459 }
1460 rcu_read_unlock();
1461}
1462
1463static void raid5_unplug_device(request_queue_t *q)
1464{
1465 mddev_t *mddev = q->queuedata;
1466 raid5_conf_t *conf = mddev_to_conf(mddev);
1467 unsigned long flags;
1468
1469 spin_lock_irqsave(&conf->device_lock, flags);
1470
NeilBrown72626682005-09-09 16:23:54 -07001471 if (blk_remove_plug(q)) {
1472 conf->seq_flush++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 raid5_activate_delayed(conf);
NeilBrown72626682005-09-09 16:23:54 -07001474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 md_wakeup_thread(mddev->thread);
1476
1477 spin_unlock_irqrestore(&conf->device_lock, flags);
1478
1479 unplug_slaves(mddev);
1480}
1481
1482static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1483 sector_t *error_sector)
1484{
1485 mddev_t *mddev = q->queuedata;
1486 raid5_conf_t *conf = mddev_to_conf(mddev);
1487 int i, ret = 0;
1488
1489 rcu_read_lock();
1490 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1491 mdk_rdev_t *rdev = conf->disks[i].rdev;
1492 if (rdev && !rdev->faulty) {
1493 struct block_device *bdev = rdev->bdev;
1494 request_queue_t *r_queue = bdev_get_queue(bdev);
1495
1496 if (!r_queue->issue_flush_fn)
1497 ret = -EOPNOTSUPP;
1498 else {
1499 atomic_inc(&rdev->nr_pending);
1500 rcu_read_unlock();
1501 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1502 error_sector);
1503 rdev_dec_pending(rdev, mddev);
1504 rcu_read_lock();
1505 }
1506 }
1507 }
1508 rcu_read_unlock();
1509 return ret;
1510}
1511
1512static inline void raid5_plug_device(raid5_conf_t *conf)
1513{
1514 spin_lock_irq(&conf->device_lock);
1515 blk_plug_device(conf->mddev->queue);
1516 spin_unlock_irq(&conf->device_lock);
1517}
1518
1519static int make_request (request_queue_t *q, struct bio * bi)
1520{
1521 mddev_t *mddev = q->queuedata;
1522 raid5_conf_t *conf = mddev_to_conf(mddev);
1523 const unsigned int raid_disks = conf->raid_disks;
1524 const unsigned int data_disks = raid_disks - 1;
1525 unsigned int dd_idx, pd_idx;
1526 sector_t new_sector;
1527 sector_t logical_sector, last_sector;
1528 struct stripe_head *sh;
Jens Axboea3623572005-11-01 09:26:16 +01001529 const int rw = bio_data_dir(bi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
NeilBrowne5dcdd82005-09-09 16:23:41 -07001531 if (unlikely(bio_barrier(bi))) {
1532 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1533 return 0;
1534 }
1535
NeilBrown3d310eb2005-06-21 17:17:26 -07001536 md_write_start(mddev, bi);
NeilBrown06d91a52005-06-21 17:17:12 -07001537
Jens Axboea3623572005-11-01 09:26:16 +01001538 disk_stat_inc(mddev->gendisk, ios[rw]);
1539 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1542 last_sector = bi->bi_sector + (bi->bi_size>>9);
1543 bi->bi_next = NULL;
1544 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
NeilBrown06d91a52005-06-21 17:17:12 -07001545
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1547 DEFINE_WAIT(w);
1548
1549 new_sector = raid5_compute_sector(logical_sector,
1550 raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1551
1552 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1553 (unsigned long long)new_sector,
1554 (unsigned long long)logical_sector);
1555
1556 retry:
1557 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1558 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1559 if (sh) {
1560 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1561 /* Add failed due to overlap. Flush everything
1562 * and wait a while
1563 */
1564 raid5_unplug_device(mddev->queue);
1565 release_stripe(sh);
1566 schedule();
1567 goto retry;
1568 }
1569 finish_wait(&conf->wait_for_overlap, &w);
1570 raid5_plug_device(conf);
1571 handle_stripe(sh);
1572 release_stripe(sh);
1573
1574 } else {
1575 /* cannot get stripe for read-ahead, just give-up */
1576 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1577 finish_wait(&conf->wait_for_overlap, &w);
1578 break;
1579 }
1580
1581 }
1582 spin_lock_irq(&conf->device_lock);
1583 if (--bi->bi_phys_segments == 0) {
1584 int bytes = bi->bi_size;
1585
1586 if ( bio_data_dir(bi) == WRITE )
1587 md_write_end(mddev);
1588 bi->bi_size = 0;
1589 bi->bi_end_io(bi, bytes, 0);
1590 }
1591 spin_unlock_irq(&conf->device_lock);
1592 return 0;
1593}
1594
1595/* FIXME go_faster isn't used */
NeilBrown57afd892005-06-21 17:17:13 -07001596static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
1598 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1599 struct stripe_head *sh;
1600 int sectors_per_chunk = conf->chunk_size >> 9;
1601 sector_t x;
1602 unsigned long stripe;
1603 int chunk_offset;
1604 int dd_idx, pd_idx;
1605 sector_t first_sector;
1606 int raid_disks = conf->raid_disks;
1607 int data_disks = raid_disks-1;
NeilBrown72626682005-09-09 16:23:54 -07001608 sector_t max_sector = mddev->size << 1;
1609 int sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
NeilBrown72626682005-09-09 16:23:54 -07001611 if (sector_nr >= max_sector) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 /* just being told to finish up .. nothing much to do */
1613 unplug_slaves(mddev);
NeilBrown72626682005-09-09 16:23:54 -07001614
1615 if (mddev->curr_resync < max_sector) /* aborted */
1616 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1617 &sync_blocks, 1);
1618 else /* compelted sync */
1619 conf->fullsync = 0;
1620 bitmap_close_sync(mddev->bitmap);
1621
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 return 0;
1623 }
1624 /* if there is 1 or more failed drives and we are trying
1625 * to resync, then assert that we are finished, because there is
1626 * nothing we can do.
1627 */
1628 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
NeilBrown57afd892005-06-21 17:17:13 -07001629 sector_t rv = (mddev->size << 1) - sector_nr;
1630 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 return rv;
1632 }
NeilBrown72626682005-09-09 16:23:54 -07001633 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1634 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1635 /* we can skip this block, and probably more */
1636 sync_blocks /= STRIPE_SECTORS;
1637 *skipped = 1;
1638 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1639 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
1641 x = sector_nr;
1642 chunk_offset = sector_div(x, sectors_per_chunk);
1643 stripe = x;
1644 BUG_ON(x != stripe);
1645
1646 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1647 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1648 sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1649 if (sh == NULL) {
1650 sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1651 /* make sure we don't swamp the stripe cache if someone else
1652 * is trying to get access
1653 */
Nishanth Aravamudan66c006a2005-11-07 01:01:17 -08001654 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 }
NeilBrown72626682005-09-09 16:23:54 -07001656 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 spin_lock(&sh->lock);
1658 set_bit(STRIPE_SYNCING, &sh->state);
1659 clear_bit(STRIPE_INSYNC, &sh->state);
1660 spin_unlock(&sh->lock);
1661
1662 handle_stripe(sh);
1663 release_stripe(sh);
1664
1665 return STRIPE_SECTORS;
1666}
1667
1668/*
1669 * This is our raid5 kernel thread.
1670 *
1671 * We scan the hash table for stripes which can be handled now.
1672 * During the scan, completed stripes are saved for us by the interrupt
1673 * handler, so that they will not have to wait for our next wakeup.
1674 */
1675static void raid5d (mddev_t *mddev)
1676{
1677 struct stripe_head *sh;
1678 raid5_conf_t *conf = mddev_to_conf(mddev);
1679 int handled;
1680
1681 PRINTK("+++ raid5d active\n");
1682
1683 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
1685 handled = 0;
1686 spin_lock_irq(&conf->device_lock);
1687 while (1) {
1688 struct list_head *first;
1689
NeilBrown72626682005-09-09 16:23:54 -07001690 if (conf->seq_flush - conf->seq_write > 0) {
1691 int seq = conf->seq_flush;
1692 bitmap_unplug(mddev->bitmap);
1693 conf->seq_write = seq;
1694 activate_bit_delay(conf);
1695 }
1696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 if (list_empty(&conf->handle_list) &&
1698 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1699 !blk_queue_plugged(mddev->queue) &&
1700 !list_empty(&conf->delayed_list))
1701 raid5_activate_delayed(conf);
1702
1703 if (list_empty(&conf->handle_list))
1704 break;
1705
1706 first = conf->handle_list.next;
1707 sh = list_entry(first, struct stripe_head, lru);
1708
1709 list_del_init(first);
1710 atomic_inc(&sh->count);
1711 if (atomic_read(&sh->count)!= 1)
1712 BUG();
1713 spin_unlock_irq(&conf->device_lock);
1714
1715 handled++;
1716 handle_stripe(sh);
1717 release_stripe(sh);
1718
1719 spin_lock_irq(&conf->device_lock);
1720 }
1721 PRINTK("%d stripes handled\n", handled);
1722
1723 spin_unlock_irq(&conf->device_lock);
1724
1725 unplug_slaves(mddev);
1726
1727 PRINTK("--- raid5d inactive\n");
1728}
1729
NeilBrown3f294f42005-11-08 21:39:25 -08001730struct raid5_sysfs_entry {
1731 struct attribute attr;
1732 ssize_t (*show)(raid5_conf_t *, char *);
1733 ssize_t (*store)(raid5_conf_t *, const char *, ssize_t);
1734};
1735
1736static ssize_t
1737raid5_show_stripe_cache_size(raid5_conf_t *conf, char *page)
1738{
1739 return sprintf(page, "%d\n", conf->max_nr_stripes);
1740}
1741
1742static ssize_t
1743raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len)
1744{
1745 char *end;
1746 int new;
1747 if (len >= PAGE_SIZE)
1748 return -EINVAL;
1749
1750 new = simple_strtoul(page, &end, 10);
1751 if (!*page || (*end && *end != '\n') )
1752 return -EINVAL;
1753 if (new <= 16 || new > 32768)
1754 return -EINVAL;
1755 while (new < conf->max_nr_stripes) {
1756 if (drop_one_stripe(conf))
1757 conf->max_nr_stripes--;
1758 else
1759 break;
1760 }
1761 while (new > conf->max_nr_stripes) {
1762 if (grow_one_stripe(conf))
1763 conf->max_nr_stripes++;
1764 else break;
1765 }
1766 return len;
1767}
1768static struct raid5_sysfs_entry raid5_stripecache_size = {
1769 .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR },
1770 .show = raid5_show_stripe_cache_size,
1771 .store = raid5_store_stripe_cache_size,
1772};
1773
1774static ssize_t
1775raid5_show_stripe_cache_active(raid5_conf_t *conf, char *page)
1776{
1777 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1778}
1779
1780static struct raid5_sysfs_entry raid5_stripecache_active = {
1781 .attr = {.name = "stripe_cache_active", .mode = S_IRUGO},
1782 .show = raid5_show_stripe_cache_active,
1783};
1784
1785static struct attribute *raid5_default_attrs[] = {
1786 &raid5_stripecache_size.attr,
1787 &raid5_stripecache_active.attr,
1788 NULL,
1789};
1790
1791static ssize_t
1792raid5_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1793{
1794 struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr);
1795 raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj);
1796
1797 if (!entry->show)
1798 return -EIO;
1799 return entry->show(conf, page);
1800}
1801
1802static ssize_t
1803raid5_attr_store(struct kobject *kobj, struct attribute *attr,
1804 const char *page, size_t length)
1805{
1806 struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr);
1807 raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj);
1808
1809 if (!entry->store)
1810 return -EIO;
1811 return entry->store(conf, page, length);
1812}
1813
1814static void raid5_free(struct kobject *ko)
1815{
1816 raid5_conf_t *conf = container_of(ko, raid5_conf_t, kobj);
1817 kfree(conf);
1818}
1819
1820
1821static struct sysfs_ops raid5_sysfs_ops = {
1822 .show = raid5_attr_show,
1823 .store = raid5_attr_store,
1824};
1825
1826static struct kobj_type raid5_ktype = {
1827 .release = raid5_free,
1828 .sysfs_ops = &raid5_sysfs_ops,
1829 .default_attrs = raid5_default_attrs,
1830};
1831
NeilBrown72626682005-09-09 16:23:54 -07001832static int run(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833{
1834 raid5_conf_t *conf;
1835 int raid_disk, memory;
1836 mdk_rdev_t *rdev;
1837 struct disk_info *disk;
1838 struct list_head *tmp;
1839
1840 if (mddev->level != 5 && mddev->level != 4) {
1841 printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level);
1842 return -EIO;
1843 }
1844
1845 mddev->private = kmalloc (sizeof (raid5_conf_t)
1846 + mddev->raid_disks * sizeof(struct disk_info),
1847 GFP_KERNEL);
1848 if ((conf = mddev->private) == NULL)
1849 goto abort;
1850 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
1851 conf->mddev = mddev;
1852
1853 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL)
1854 goto abort;
1855 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1856
1857 spin_lock_init(&conf->device_lock);
1858 init_waitqueue_head(&conf->wait_for_stripe);
1859 init_waitqueue_head(&conf->wait_for_overlap);
1860 INIT_LIST_HEAD(&conf->handle_list);
1861 INIT_LIST_HEAD(&conf->delayed_list);
NeilBrown72626682005-09-09 16:23:54 -07001862 INIT_LIST_HEAD(&conf->bitmap_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 INIT_LIST_HEAD(&conf->inactive_list);
1864 atomic_set(&conf->active_stripes, 0);
1865 atomic_set(&conf->preread_active_stripes, 0);
1866
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
1868
1869 ITERATE_RDEV(mddev,rdev,tmp) {
1870 raid_disk = rdev->raid_disk;
1871 if (raid_disk >= mddev->raid_disks
1872 || raid_disk < 0)
1873 continue;
1874 disk = conf->disks + raid_disk;
1875
1876 disk->rdev = rdev;
1877
1878 if (rdev->in_sync) {
1879 char b[BDEVNAME_SIZE];
1880 printk(KERN_INFO "raid5: device %s operational as raid"
1881 " disk %d\n", bdevname(rdev->bdev,b),
1882 raid_disk);
1883 conf->working_disks++;
1884 }
1885 }
1886
1887 conf->raid_disks = mddev->raid_disks;
1888 /*
1889 * 0 for a fully functional array, 1 for a degraded array.
1890 */
1891 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1892 conf->mddev = mddev;
1893 conf->chunk_size = mddev->chunk_size;
1894 conf->level = mddev->level;
1895 conf->algorithm = mddev->layout;
1896 conf->max_nr_stripes = NR_STRIPES;
1897
1898 /* device size must be a multiple of chunk size */
1899 mddev->size &= ~(mddev->chunk_size/1024 -1);
NeilBrownb1581562005-07-31 22:34:50 -07001900 mddev->resync_max_sectors = mddev->size << 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
1902 if (!conf->chunk_size || conf->chunk_size % 4) {
1903 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
1904 conf->chunk_size, mdname(mddev));
1905 goto abort;
1906 }
1907 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
1908 printk(KERN_ERR
1909 "raid5: unsupported parity algorithm %d for %s\n",
1910 conf->algorithm, mdname(mddev));
1911 goto abort;
1912 }
1913 if (mddev->degraded > 1) {
1914 printk(KERN_ERR "raid5: not enough operational devices for %s"
1915 " (%d/%d failed)\n",
1916 mdname(mddev), conf->failed_disks, conf->raid_disks);
1917 goto abort;
1918 }
1919
1920 if (mddev->degraded == 1 &&
1921 mddev->recovery_cp != MaxSector) {
1922 printk(KERN_ERR
1923 "raid5: cannot start dirty degraded array for %s\n",
1924 mdname(mddev));
1925 goto abort;
1926 }
1927
1928 {
1929 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
1930 if (!mddev->thread) {
1931 printk(KERN_ERR
1932 "raid5: couldn't allocate thread for %s\n",
1933 mdname(mddev));
1934 goto abort;
1935 }
1936 }
1937memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1938 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1939 if (grow_stripes(conf, conf->max_nr_stripes)) {
1940 printk(KERN_ERR
1941 "raid5: couldn't allocate %dkB for buffers\n", memory);
1942 shrink_stripes(conf);
1943 md_unregister_thread(mddev->thread);
1944 goto abort;
1945 } else
1946 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
1947 memory, mdname(mddev));
1948
1949 if (mddev->degraded == 0)
1950 printk("raid5: raid level %d set %s active with %d out of %d"
1951 " devices, algorithm %d\n", conf->level, mdname(mddev),
1952 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
1953 conf->algorithm);
1954 else
1955 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
1956 " out of %d devices, algorithm %d\n", conf->level,
1957 mdname(mddev), mddev->raid_disks - mddev->degraded,
1958 mddev->raid_disks, conf->algorithm);
1959
1960 print_raid5_conf(conf);
1961
1962 /* read-ahead size must cover two whole stripes, which is
1963 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
1964 */
1965 {
1966 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
1967 / PAGE_CACHE_SIZE;
1968 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1969 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1970 }
1971
1972 /* Ok, everything is just fine now */
NeilBrown3f294f42005-11-08 21:39:25 -08001973 conf->kobj.parent = kobject_get(&mddev->kobj);
1974 strcpy(conf->kobj.name, "raid5");
1975 conf->kobj.ktype = &raid5_ktype;
1976 kobject_register(&conf->kobj);
NeilBrown7a5febe2005-05-16 21:53:16 -07001977
NeilBrown72626682005-09-09 16:23:54 -07001978 if (mddev->bitmap)
1979 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1980
NeilBrown7a5febe2005-05-16 21:53:16 -07001981 mddev->queue->unplug_fn = raid5_unplug_device;
1982 mddev->queue->issue_flush_fn = raid5_issue_flush;
1983
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
1985 return 0;
1986abort:
1987 if (conf) {
1988 print_raid5_conf(conf);
1989 if (conf->stripe_hashtbl)
1990 free_pages((unsigned long) conf->stripe_hashtbl,
1991 HASH_PAGES_ORDER);
1992 kfree(conf);
1993 }
1994 mddev->private = NULL;
1995 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
1996 return -EIO;
1997}
1998
1999
2000
NeilBrown3f294f42005-11-08 21:39:25 -08002001static int stop(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002{
2003 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2004
2005 md_unregister_thread(mddev->thread);
2006 mddev->thread = NULL;
2007 shrink_stripes(conf);
2008 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
2009 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
NeilBrown3f294f42005-11-08 21:39:25 -08002010 kobject_unregister(&conf->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 mddev->private = NULL;
2012 return 0;
2013}
2014
2015#if RAID5_DEBUG
2016static void print_sh (struct stripe_head *sh)
2017{
2018 int i;
2019
2020 printk("sh %llu, pd_idx %d, state %ld.\n",
2021 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2022 printk("sh %llu, count %d.\n",
2023 (unsigned long long)sh->sector, atomic_read(&sh->count));
2024 printk("sh %llu, ", (unsigned long long)sh->sector);
2025 for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2026 printk("(cache%d: %p %ld) ",
2027 i, sh->dev[i].page, sh->dev[i].flags);
2028 }
2029 printk("\n");
2030}
2031
2032static void printall (raid5_conf_t *conf)
2033{
2034 struct stripe_head *sh;
2035 int i;
2036
2037 spin_lock_irq(&conf->device_lock);
2038 for (i = 0; i < NR_HASH; i++) {
2039 sh = conf->stripe_hashtbl[i];
2040 for (; sh; sh = sh->hash_next) {
2041 if (sh->raid_conf != conf)
2042 continue;
2043 print_sh(sh);
2044 }
2045 }
2046 spin_unlock_irq(&conf->device_lock);
2047}
2048#endif
2049
2050static void status (struct seq_file *seq, mddev_t *mddev)
2051{
2052 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2053 int i;
2054
2055 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2056 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2057 for (i = 0; i < conf->raid_disks; i++)
2058 seq_printf (seq, "%s",
2059 conf->disks[i].rdev &&
2060 conf->disks[i].rdev->in_sync ? "U" : "_");
2061 seq_printf (seq, "]");
2062#if RAID5_DEBUG
2063#define D(x) \
2064 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2065 printall(conf);
2066#endif
2067}
2068
2069static void print_raid5_conf (raid5_conf_t *conf)
2070{
2071 int i;
2072 struct disk_info *tmp;
2073
2074 printk("RAID5 conf printout:\n");
2075 if (!conf) {
2076 printk("(conf==NULL)\n");
2077 return;
2078 }
2079 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2080 conf->working_disks, conf->failed_disks);
2081
2082 for (i = 0; i < conf->raid_disks; i++) {
2083 char b[BDEVNAME_SIZE];
2084 tmp = conf->disks + i;
2085 if (tmp->rdev)
2086 printk(" disk %d, o:%d, dev:%s\n",
2087 i, !tmp->rdev->faulty,
2088 bdevname(tmp->rdev->bdev,b));
2089 }
2090}
2091
2092static int raid5_spare_active(mddev_t *mddev)
2093{
2094 int i;
2095 raid5_conf_t *conf = mddev->private;
2096 struct disk_info *tmp;
2097
2098 for (i = 0; i < conf->raid_disks; i++) {
2099 tmp = conf->disks + i;
2100 if (tmp->rdev
2101 && !tmp->rdev->faulty
2102 && !tmp->rdev->in_sync) {
2103 mddev->degraded--;
2104 conf->failed_disks--;
2105 conf->working_disks++;
2106 tmp->rdev->in_sync = 1;
2107 }
2108 }
2109 print_raid5_conf(conf);
2110 return 0;
2111}
2112
2113static int raid5_remove_disk(mddev_t *mddev, int number)
2114{
2115 raid5_conf_t *conf = mddev->private;
2116 int err = 0;
2117 mdk_rdev_t *rdev;
2118 struct disk_info *p = conf->disks + number;
2119
2120 print_raid5_conf(conf);
2121 rdev = p->rdev;
2122 if (rdev) {
2123 if (rdev->in_sync ||
2124 atomic_read(&rdev->nr_pending)) {
2125 err = -EBUSY;
2126 goto abort;
2127 }
2128 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07002129 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 if (atomic_read(&rdev->nr_pending)) {
2131 /* lost the race, try later */
2132 err = -EBUSY;
2133 p->rdev = rdev;
2134 }
2135 }
2136abort:
2137
2138 print_raid5_conf(conf);
2139 return err;
2140}
2141
2142static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2143{
2144 raid5_conf_t *conf = mddev->private;
2145 int found = 0;
2146 int disk;
2147 struct disk_info *p;
2148
2149 if (mddev->degraded > 1)
2150 /* no point adding a device */
2151 return 0;
2152
2153 /*
2154 * find the disk ...
2155 */
2156 for (disk=0; disk < mddev->raid_disks; disk++)
2157 if ((p=conf->disks + disk)->rdev == NULL) {
2158 rdev->in_sync = 0;
2159 rdev->raid_disk = disk;
2160 found = 1;
NeilBrown72626682005-09-09 16:23:54 -07002161 if (rdev->saved_raid_disk != disk)
2162 conf->fullsync = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 p->rdev = rdev;
2164 break;
2165 }
2166 print_raid5_conf(conf);
2167 return found;
2168}
2169
2170static int raid5_resize(mddev_t *mddev, sector_t sectors)
2171{
2172 /* no resync is happening, and there is enough space
2173 * on all devices, so we can resize.
2174 * We need to make sure resync covers any new space.
2175 * If the array is shrinking we should possibly wait until
2176 * any io in the removed space completes, but it hardly seems
2177 * worth it.
2178 */
2179 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2180 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2181 set_capacity(mddev->gendisk, mddev->array_size << 1);
2182 mddev->changed = 1;
2183 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2184 mddev->recovery_cp = mddev->size << 1;
2185 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2186 }
2187 mddev->size = sectors /2;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07002188 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 return 0;
2190}
2191
NeilBrown72626682005-09-09 16:23:54 -07002192static void raid5_quiesce(mddev_t *mddev, int state)
2193{
2194 raid5_conf_t *conf = mddev_to_conf(mddev);
2195
2196 switch(state) {
2197 case 1: /* stop all writes */
2198 spin_lock_irq(&conf->device_lock);
2199 conf->quiesce = 1;
2200 wait_event_lock_irq(conf->wait_for_stripe,
2201 atomic_read(&conf->active_stripes) == 0,
2202 conf->device_lock, /* nothing */);
2203 spin_unlock_irq(&conf->device_lock);
2204 break;
2205
2206 case 0: /* re-enable writes */
2207 spin_lock_irq(&conf->device_lock);
2208 conf->quiesce = 0;
2209 wake_up(&conf->wait_for_stripe);
2210 spin_unlock_irq(&conf->device_lock);
2211 break;
2212 }
2213 if (mddev->thread) {
2214 if (mddev->bitmap)
2215 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2216 else
2217 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2218 md_wakeup_thread(mddev->thread);
2219 }
2220}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221static mdk_personality_t raid5_personality=
2222{
2223 .name = "raid5",
2224 .owner = THIS_MODULE,
2225 .make_request = make_request,
2226 .run = run,
2227 .stop = stop,
2228 .status = status,
2229 .error_handler = error,
2230 .hot_add_disk = raid5_add_disk,
2231 .hot_remove_disk= raid5_remove_disk,
2232 .spare_active = raid5_spare_active,
2233 .sync_request = sync_request,
2234 .resize = raid5_resize,
NeilBrown72626682005-09-09 16:23:54 -07002235 .quiesce = raid5_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236};
2237
2238static int __init raid5_init (void)
2239{
2240 return register_md_personality (RAID5, &raid5_personality);
2241}
2242
2243static void raid5_exit (void)
2244{
2245 unregister_md_personality (RAID5);
2246}
2247
2248module_init(raid5_init);
2249module_exit(raid5_exit);
2250MODULE_LICENSE("GPL");
2251MODULE_ALIAS("md-personality-4"); /* RAID5 */